build: Add basic Hydrun CI config

This commit is contained in:
Felicitas Pojtinger
2021-12-20 01:41:09 +01:00
parent 63fb7df81c
commit 07f7af284d
17 changed files with 137 additions and 3740 deletions

75
.github/workflows/hydrun.yaml vendored Normal file
View File

@@ -0,0 +1,75 @@
name: hydrun CI
on:
push:
pull_request:
schedule:
- cron: "0 0 * * 0"
jobs:
build-linux:
runs-on: ubuntu-latest
strategy:
matrix:
target:
- id: stfs
src: .
os: ghcr.io/pojntfx/bagccgop-base-sid
flags: -e '--privileged'
cmd: ./Hydrunfile
dst: out/*
steps:
- name: Maximize build space
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set up hydrun
run: |
curl -L -o /tmp/hydrun "https://github.com/pojntfx/hydrun/releases/latest/download/hydrun.linux-$(uname -m)"
sudo install /tmp/hydrun /usr/local/bin
- name: Build with hydrun
working-directory: ${{ matrix.target.src }}
run: hydrun -o ${{ matrix.target.os }} ${{ matrix.target.flags }} "${{ matrix.target.cmd }}"
- name: Fix permissions for output
run: sudo chown -R $USER .
- name: Upload output
uses: actions/upload-artifact@v2
with:
name: ${{ matrix.target.id }}
path: ${{ matrix.target.dst }}
publish-linux:
runs-on: ubuntu-latest
needs: build-linux
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Download output
uses: actions/download-artifact@v2
with:
path: /tmp/out
- name: Publish pre-release to GitHub releases
if: ${{ github.ref == 'refs/heads/main' }}
uses: marvinpinto/action-automatic-releases@latest
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
automatic_release_tag: unstable
prerelease: true
files: |
/tmp/out/*/*
- name: Publish release to GitHub releases
if: startsWith(github.ref, 'refs/tags/v')
uses: marvinpinto/action-automatic-releases@latest
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
prerelease: false
files: |
/tmp/out/*/*

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
out

16
Hydrunfile Executable file
View File

@@ -0,0 +1,16 @@
#!/bin/bash
set -e
# Install native dependencies
apt update
apt install -y curl
# Install bagccgop
curl -L -o /tmp/bagccgop "https://github.com/pojntfx/bagccgop/releases/latest/download/bagccgop.linux-$(uname -m)"
install /tmp/bagccgop /usr/local/bin
# Build
GOFLAGS='-gccgoflags=-static' bagccgop -x '(linux/alpha|linux/mipsle)' -j1 -b stfs -r 'make depend' -m 'libsqlite3-dev' -p 'make build/stfs DST=$DST' -d out
exit 0

View File

@@ -1,6 +1,43 @@
generate: # Public variables
sql-migrate up -env="production" -config configs/sql-migrate/metadata.yaml DESTDIR ?=
go generate ./... PREFIX ?= /usr/local
OUTPUT_DIR ?= out
DST ?=
# Private variables
obj = stfs
all: $(addprefix build/,$(obj))
# Build
build: $(addprefix build/,$(obj))
$(addprefix build/,$(obj)):
ifdef DST
go build -o $(DST) ./cmd/$(subst build/,,$@)
else
go build -o $(OUTPUT_DIR)/$(subst build/,,$@) ./cmd/$(subst build/,,$@)
endif
# Install
install: $(addprefix install/,$(obj))
$(addprefix install/,$(obj)):
install -D -m 0755 $(OUTPUT_DIR)/$(subst install/,,$@) $(DESTDIR)$(PREFIX)/bin/$(subst install/,,$@)
# Uninstall
uninstall: $(addprefix uninstall/,$(obj))
$(addprefix uninstall/,$(obj)):
rm $(DESTDIR)$(PREFIX)/bin/$(subst uninstall/,,$@)
# Run
$(addprefix run/,$(obj)):
$(subst run/,,$@) $(ARGS)
# Clean
clean: clean:
rm -rf internal/db rm -rf out internal/db
# Dependencies
depend:
go install github.com/rubenv/sql-migrate/sql-migrate@latest
go install github.com/volatiletech/sqlboiler/v4@latest
sql-migrate up -env="production" -config configs/sql-migrate/metadata.yaml
go generate ./...

View File

@@ -2,6 +2,10 @@
Simple Tape File System (STFS), a file system for tapes and tar files. Simple Tape File System (STFS), a file system for tapes and tar files.
[![hydrun CI](https://github.com/pojntfx/stfs/actions/workflows/hydrun.yaml/badge.svg)](https://github.com/pojntfx/stfs/actions/workflows/hydrun.yaml)
[![Matrix](https://img.shields.io/matrix/stfs:matrix.org)](https://matrix.to/#/#stfs:matrix.org?via=matrix.org)
[![Binary Downloads](https://img.shields.io/github/downloads/pojntfx/stfs/total?label=binary%20downloads)](https://github.com/pojntfx/stfs/releases)
## Overview ## Overview
🚧 This project is a work-in-progress! Instructions will be added as soon as it is usable. 🚧 🚧 This project is a work-in-progress! Instructions will be added as soon as it is usable. 🚧

View File

@@ -1,115 +0,0 @@
package metadata
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"strings"
)
func bindata_read(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
return buf.Bytes(), nil
}
var _db_sqlite_migrations_metadata_1637447083_sql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x56\xcd\x72\xdb\x36\x10\x3e\xd7\x4f\xb1\x93\x4b\xec\xa9\xa8\x73\xa7\x99\x1e\xdc\xb8\x71\x3d\x13\x3b\x19\xd9\xaa\x73\x34\x48\x2c\x49\x54\x20\x16\x5d\x80\x92\x99\xa7\xef\x2c\x40\x2a\xb2\x14\xc9\xe9\xf4\x24\x0a\xd8\xfd\xf6\xff\x5b\x14\x05\xfc\xdc\x99\x86\x55\x44\x58\xfa\xb3\x8a\x51\xbe\xa2\x2a\x2d\x42\x8b\x4a\x23\x07\x38\x3f\x03\x00\x28\x0a\x58\x60\x45\xac\x81\x6a\x88\xad\x09\xe3\x3d\x90\x83\xd8\x8a\x8e\xc7\x24\xc8\x59\xca\xb8\x88\x0d\x32\x38\x8a\xe0\x7a\x6b\x67\xdf\x43\x41\xb0\x2a\x44\xe8\xbd\x16\xb3\x13\xe0\x69\x7c\xd1\x58\x39\xda\xb8\xd7\x0c\xfd\x6e\xa9\x5a\xed\xa3\x99\x8c\x96\x75\x93\x64\x99\xc4\x7e\x04\xe5\x47\xbc\x3d\xc4\xdf\xfa\xfb\x8a\xa1\x9b\x1a\x02\xc6\x59\x52\x1f\xc1\x5a\x15\xa0\x44\x74\xa0\xd1\x62\x44\x9d\x72\xa1\x3c\xce\xa0\xec\x23\x3c\xed\x65\xe2\x09\x94\xd3\x3b\xa7\xc9\xde\x13\x28\x46\x08\xd1\x58\x2b\xae\x32\x5a\x5c\x2b\x57\xe5\x54\x4e\xb0\x47\x7d\x7a\x18\x3c\xd6\x56\x35\x60\x42\x2e\xc2\xe0\x51\x60\x46\xff\xd0\x45\x1e\xe6\x5b\xe1\x16\xe1\x2b\x32\xc1\x5a\xd9\x1e\x45\x45\xf5\x91\x3a\x15\x4d\xa5\xac\x1d\xc0\x33\x75\x24\xe6\x22\x01\x9a\xd8\x22\x27\xfc\x05\x36\x40\xf9\xf3\xca\xf0\x04\xa6\xd1\xa3\xd3\xc6\x35\x53\xfd\x3d\x63\x40\x57\x25\xf3\x0a\x22\x2b\x63\xe5\x36\x58\x15\x5a\xc9\xfa\x9d\xea\x30\xbb\x12\xb7\x4e\x1f\x0b\x4b\x64\x05\xa7\x36\x16\x73\x10\xe9\xc6\xc9\x71\xc4\xe7\xb8\xd5\x00\xcf\xa6\x53\x3c\xc0\x0a\x87\x6f\x49\x51\xdc\x60\xcc\xd2\x54\x83\x35\x6e\x05\xe7\x6b\x65\x8d\x86\x7a\x0c\xe4\xa3\x9c\x8d\xdf\xf7\x43\x27\x22\x17\xb9\x19\x8c\x5b\x1d\x9a\xd9\x42\x7f\xa4\x46\x92\x95\x1d\x0b\xe6\x2b\x4a\x64\xe5\x10\x31\x24\x89\xf1\xe4\x48\x54\x9f\x91\x3b\x13\x82\x21\x97\x1a\xa1\x23\x8d\x50\x9a\x98\x55\xd3\xbf\xa3\xaa\xcb\x80\x0c\x37\x57\x12\x0e\x6d\x1c\xe6\x2a\xf4\xe6\x44\x67\x5c\x33\xf5\xfe\x40\xa5\x39\xa5\x92\x8c\x4c\x59\xdb\x31\x73\x22\x1f\xd9\xca\xa1\x4e\x73\x42\xe7\x26\x8f\xea\x07\xe2\x4e\x45\xe9\xc2\xde\x05\x8f\x95\xa9\x0d\xea\x34\x5d\x0e\x1e\xd9\x44\xe4\x79\xfa\xf9\x33\xb7\x32\x53\xef\x74\x80\x5b\xd2\x0f\xa6\xc3\x09\x2b\x52\xc2\x72\xa8\x18\x43\x84\x80\x15\x39\x9d\xb2\x6b\x1a\x47\x8c\x79\x2a\x2e\xab\x0a\x43\x10\xbd\x74\xf5\xbe\x55\xae\xc1\xf4\xb7\x36\x68\x75\x98\x26\x64\xdb\x40\x04\x7d\x78\xa1\x46\xbc\xa3\x35\x83\xec\xef\xb0\x1b\x87\x0a\xf0\xf9\xf2\x8b\x08\x5e\xdf\x2d\xe7\x7b\x48\xa1\x2f\x8b\xd1\x37\xc6\x40\xb6\x8f\x86\xdc\x09\x98\xad\xfe\x2d\x69\x53\x9b\x4a\x89\x3c\xc4\x29\xf0\x8e\xb4\x7c\x43\xa2\xb9\x83\xfc\x66\xb7\x93\x34\x9c\x33\xfe\xd3\x1b\xc9\xc3\x38\xd0\xdf\x7c\x84\xd0\x7b\x4f\x1c\x73\xdf\xab\xa4\x74\x02\x35\x87\xff\x1f\x51\xab\xa4\x74\x02\xf5\x56\xfd\x4d\x0c\x1a\xd7\xa6\x42\x70\x7d\x57\x22\xef\x8f\xea\xfb\x56\xf1\x34\xaa\x89\xeb\x2f\x46\x6a\x5c\x77\x49\xf9\x68\x3b\xdf\x1a\xf7\x3f\xb0\x93\xf2\xf1\x51\xbe\xfc\x92\xd7\x64\x48\x34\x0a\x9d\xf2\x32\x00\x92\x07\x7c\x8e\xe8\x34\xea\x89\x83\x33\xf7\x1f\xf4\x98\x0c\x5b\xa1\xb1\x36\x0e\xf5\x24\x03\xa1\xa5\xde\x6a\x68\xd5\x1a\x85\xd1\xc2\xb4\xd6\x6a\xb2\x96\x36\x42\xa7\x35\x71\xf7\xeb\x88\xf1\xd3\x5f\x7f\xdc\x5d\x7d\x5a\xcc\x57\x38\x6c\xa6\x65\x56\x14\xf0\xd8\x22\x23\xe4\x3b\xf1\x2e\x50\x87\x69\x44\x83\x57\x55\x22\x2c\x65\x2d\xf4\xde\x23\x57\x2a\xe0\x2c\xcd\xc4\x88\x01\x9d\x1a\x26\x1c\x09\xba\x22\x17\xd5\xb8\x32\xdf\xfe\xf6\x56\x0a\xca\xaa\x8a\x92\x49\x9c\x37\xf3\x19\xbc\xb9\xfe\xf4\xf1\xf2\xee\x7a\xee\x57\xcd\x7c\x8d\x2c\xe4\xf6\xe6\xe2\xc5\xc6\x59\xe1\x90\x2c\xe4\xa5\x33\x46\x58\x4a\x2f\xb8\x02\x3b\x1f\x07\x58\x3e\x7c\x28\x7e\x81\x10\xd9\xb8\xe6\x20\x4f\x8f\x47\xf8\xc0\x04\x90\xad\x25\x94\x21\x59\x9f\x32\xa8\x91\xcd\x1a\x35\xd4\x4c\x9d\x78\x3d\xc1\x50\xea\xd3\x3c\xee\x92\x82\x11\x25\xaa\x55\xda\x5c\x15\xea\xbc\xbb\xd6\xb9\x9b\x17\xbb\x45\xf3\xea\x79\x82\xff\x3e\xa3\x8d\xb3\x3b\xb1\x58\x18\x6b\x96\x0e\xc7\x0a\x46\xc5\x63\x43\x1c\x90\x8d\x3c\x4e\xa4\x4c\x18\xa1\x1c\x60\x91\x85\xee\xc4\x90\x92\xd6\x2a\x31\xc4\x02\xeb\x9a\x38\x42\xd3\xcb\x64\xab\xb8\x63\x60\x9b\xeb\x7b\x23\x01\xc8\x45\x86\x00\x6b\x4a\xe4\xb4\xd8\x19\x95\x9e\xfa\x80\x5c\x51\x51\xe7\xad\x51\x2e\xa6\x35\x16\xb6\x61\x98\xc4\xc6\x9e\x42\x30\xf2\xbc\x94\x21\x49\x2f\xa7\x48\x52\xaf\x1c\xe4\xd2\xa5\xa7\xcb\x7e\x10\x37\xf5\x6e\xcc\x2f\x39\x1d\x36\xaf\x96\x70\x4b\xe7\x22\x69\xa2\x70\xe6\x98\x44\xc3\x21\x4e\xb0\xe7\x63\x23\x12\x8f\xef\xba\xe5\xfd\xc3\xe5\x22\xd5\x7f\x26\xdc\x73\x31\xc1\x54\xca\xa7\x07\x32\xd5\x80\xae\xa2\xf4\x4a\x49\x91\x8c\x86\xcf\x03\x4e\xf1\x8c\xbd\x3a\x39\xbe\x37\xef\x67\x17\xef\xce\x76\x5f\xe0\x57\xb4\x71\x67\x9a\xc9\xbf\x7c\x81\xbf\xfb\x37\x00\x00\xff\xff\x7a\x41\x6c\x96\xa6\x0b\x00\x00")
func db_sqlite_migrations_metadata_1637447083_sql() ([]byte, error) {
return bindata_read(
_db_sqlite_migrations_metadata_1637447083_sql,
"../../db/sqlite/migrations/metadata/1637447083.sql",
)
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
return f()
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() ([]byte, error){
"../../db/sqlite/migrations/metadata/1637447083.sql": db_sqlite_migrations_metadata_1637447083_sql,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for name := range node.Children {
rv = append(rv, name)
}
return rv, nil
}
type _bintree_t struct {
Func func() ([]byte, error)
Children map[string]*_bintree_t
}
var _bintree = &_bintree_t{nil, map[string]*_bintree_t{
"..": &_bintree_t{nil, map[string]*_bintree_t{
"..": &_bintree_t{nil, map[string]*_bintree_t{
"db": &_bintree_t{nil, map[string]*_bintree_t{
"sqlite": &_bintree_t{nil, map[string]*_bintree_t{
"migrations": &_bintree_t{nil, map[string]*_bintree_t{
"metadata": &_bintree_t{nil, map[string]*_bintree_t{
"1637447083.sql": &_bintree_t{db_sqlite_migrations_metadata_1637447083_sql, map[string]*_bintree_t{
}},
}},
}},
}},
}},
}},
}},
}}

View File

@@ -1,119 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"database/sql"
"flag"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil"
)
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
const outputDirDepth = 5
var (
dbMain tester
)
type tester interface {
setup() error
conn() (*sql.DB, error)
teardown() error
}
func TestMain(m *testing.M) {
if dbMain == nil {
fmt.Println("no dbMain tester interface was ready")
os.Exit(-1)
}
rand.Seed(time.Now().UnixNano())
flag.Parse()
var err error
// Load configuration
err = initViper()
if err != nil {
fmt.Println("unable to load config file")
os.Exit(-2)
}
// Set DebugMode so we can see generated sql statements
boil.DebugMode = *flagDebugMode
if err = dbMain.setup(); err != nil {
fmt.Println("Unable to execute setup:", err)
os.Exit(-4)
}
conn, err := dbMain.conn()
if err != nil {
fmt.Println("failed to get connection:", err)
}
var code int
boil.SetDB(conn)
code = m.Run()
if err = dbMain.teardown(); err != nil {
fmt.Println("Unable to execute teardown:", err)
os.Exit(-5)
}
os.Exit(code)
}
func initViper() error {
if flagConfigFile != nil && *flagConfigFile != "" {
viper.SetConfigFile(*flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = strings.Repeat("../", outputDirDepth)
} else {
wd = wd + strings.Repeat("/..", outputDirDepth)
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fall back to defaults and validation to provide errs
_ = viper.ReadInConfig()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
return nil
}

View File

@@ -1,33 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"github.com/volatiletech/sqlboiler/v4/drivers"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
var dialect = drivers.Dialect{
LQ: 0x22,
RQ: 0x22,
UseIndexPlaceholders: false,
UseLastInsertID: false,
UseSchema: false,
UseDefaultKeyword: true,
UseAutoColumns: false,
UseTopClause: false,
UseOutputClause: false,
UseCaseWhenExistsClause: false,
}
// NewQuery initializes a new Query using the passed in QueryMods
func NewQuery(mods ...qm.QueryMod) *queries.Query {
q := &queries.Query{}
queries.SetDialect(q, &dialect)
qm.Apply(q, mods...)
return q
}

View File

@@ -1,52 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"regexp"
"github.com/volatiletech/sqlboiler/v4/boil"
)
var dbNameRand *rand.Rand
func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
if err != nil {
panic(fmt.Sprintf("Cannot create a transactor: %s", err))
}
return transactor
}
func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
return &fKeyDestroyer{
reader: reader,
rgx: regex,
}
}
type fKeyDestroyer struct {
reader io.Reader
buf *bytes.Buffer
rgx *regexp.Regexp
}
func (f *fKeyDestroyer) Read(b []byte) (int, error) {
if f.buf == nil {
all, err := ioutil.ReadAll(f.reader)
if err != nil {
return 0, err
}
all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
all = f.rgx.ReplaceAll(all, []byte{})
f.buf = bytes.NewBuffer(all)
}
return f.buf.Read(b)
}

View File

@@ -1,139 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import "testing"
// This test suite runs each operation test in parallel.
// Example, if your database has 3 tables, the suite will run:
// table1, table2 and table3 Delete in parallel
// table1, table2 and table3 Insert in parallel, and so forth.
// It does NOT run each operation group in parallel.
// Separating the tests thusly grants avoidance of Postgres deadlocks.
func TestParent(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrations)
t.Run("Headers", testHeaders)
}
func TestDelete(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsDelete)
t.Run("Headers", testHeadersDelete)
}
func TestQueryDeleteAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsQueryDeleteAll)
t.Run("Headers", testHeadersQueryDeleteAll)
}
func TestSliceDeleteAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsSliceDeleteAll)
t.Run("Headers", testHeadersSliceDeleteAll)
}
func TestExists(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsExists)
t.Run("Headers", testHeadersExists)
}
func TestFind(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsFind)
t.Run("Headers", testHeadersFind)
}
func TestBind(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsBind)
t.Run("Headers", testHeadersBind)
}
func TestOne(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsOne)
t.Run("Headers", testHeadersOne)
}
func TestAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsAll)
t.Run("Headers", testHeadersAll)
}
func TestCount(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsCount)
t.Run("Headers", testHeadersCount)
}
func TestHooks(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsHooks)
t.Run("Headers", testHeadersHooks)
}
func TestInsert(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsInsert)
t.Run("GorpMigrations", testGorpMigrationsInsertWhitelist)
t.Run("Headers", testHeadersInsert)
t.Run("Headers", testHeadersInsertWhitelist)
}
// TestToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestToOne(t *testing.T) {}
// TestOneToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOne(t *testing.T) {}
// TestToMany tests cannot be run in parallel
// or deadlocks can occur.
func TestToMany(t *testing.T) {}
// TestToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneSet(t *testing.T) {}
// TestToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneRemove(t *testing.T) {}
// TestOneToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneSet(t *testing.T) {}
// TestOneToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneRemove(t *testing.T) {}
// TestToManyAdd tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyAdd(t *testing.T) {}
// TestToManySet tests cannot be run in parallel
// or deadlocks can occur.
func TestToManySet(t *testing.T) {}
// TestToManyRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyRemove(t *testing.T) {}
func TestReload(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsReload)
t.Run("Headers", testHeadersReload)
}
func TestReloadAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsReloadAll)
t.Run("Headers", testHeadersReloadAll)
}
func TestSelect(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsSelect)
t.Run("Headers", testHeadersSelect)
}
func TestUpdate(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsUpdate)
t.Run("Headers", testHeadersUpdate)
}
func TestSliceUpdateAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsSliceUpdateAll)
t.Run("Headers", testHeadersSliceUpdateAll)
}

View File

@@ -1,12 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
var TableNames = struct {
GorpMigrations string
Headers string
}{
GorpMigrations: "gorp_migrations",
Headers: "headers",
}

View File

@@ -1,52 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"strconv"
"github.com/friendsofgo/errors"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/strmangle"
)
// M type is for providing columns and column values to UpdateAll.
type M map[string]interface{}
// ErrSyncFail occurs during insert when the record could not be retrieved in
// order to populate default value information. This usually happens when LastInsertId
// fails or there was a primary key configuration that was not resolvable.
var ErrSyncFail = errors.New("models: failed to synchronize data after insert")
type insertCache struct {
query string
retQuery string
valueMapping []uint64
retMapping []uint64
}
type updateCache struct {
query string
valueMapping []uint64
}
func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
buf := strmangle.GetBuffer()
buf.WriteString(strconv.Itoa(cols.Kind))
for _, w := range cols.Cols {
buf.WriteString(w)
}
if len(nzDefaults) != 0 {
buf.WriteByte('.')
}
for _, nz := range nzDefaults {
buf.WriteString(nz)
}
str := buf.String()
strmangle.PutBuffer(buf)
return str
}

View File

@@ -1,812 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/friendsofgo/errors"
"github.com/volatiletech/null/v8"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
"github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
"github.com/volatiletech/strmangle"
)
// GorpMigration is an object representing the database table.
type GorpMigration struct {
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
AppliedAt null.Time `boil:"applied_at" json:"applied_at,omitempty" toml:"applied_at" yaml:"applied_at,omitempty"`
R *gorpMigrationR `boil:"-" json:"-" toml:"-" yaml:"-"`
L gorpMigrationL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var GorpMigrationColumns = struct {
ID string
AppliedAt string
}{
ID: "id",
AppliedAt: "applied_at",
}
var GorpMigrationTableColumns = struct {
ID string
AppliedAt string
}{
ID: "gorp_migrations.id",
AppliedAt: "gorp_migrations.applied_at",
}
// Generated where
type whereHelperstring struct{ field string }
func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
func (w whereHelperstring) IN(slice []string) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
}
func (w whereHelperstring) NIN(slice []string) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
}
type whereHelpernull_Time struct{ field string }
func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
var GorpMigrationWhere = struct {
ID whereHelperstring
AppliedAt whereHelpernull_Time
}{
ID: whereHelperstring{field: "\"gorp_migrations\".\"id\""},
AppliedAt: whereHelpernull_Time{field: "\"gorp_migrations\".\"applied_at\""},
}
// GorpMigrationRels is where relationship names are stored.
var GorpMigrationRels = struct {
}{}
// gorpMigrationR is where relationships are stored.
type gorpMigrationR struct {
}
// NewStruct creates a new relationship struct
func (*gorpMigrationR) NewStruct() *gorpMigrationR {
return &gorpMigrationR{}
}
// gorpMigrationL is where Load methods for each relationship are stored.
type gorpMigrationL struct{}
var (
gorpMigrationAllColumns = []string{"id", "applied_at"}
gorpMigrationColumnsWithoutDefault = []string{"id", "applied_at"}
gorpMigrationColumnsWithDefault = []string{}
gorpMigrationPrimaryKeyColumns = []string{"id"}
)
type (
// GorpMigrationSlice is an alias for a slice of pointers to GorpMigration.
// This should almost always be used instead of []GorpMigration.
GorpMigrationSlice []*GorpMigration
// GorpMigrationHook is the signature for custom GorpMigration hook methods
GorpMigrationHook func(context.Context, boil.ContextExecutor, *GorpMigration) error
gorpMigrationQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
gorpMigrationType = reflect.TypeOf(&GorpMigration{})
gorpMigrationMapping = queries.MakeStructMapping(gorpMigrationType)
gorpMigrationPrimaryKeyMapping, _ = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, gorpMigrationPrimaryKeyColumns)
gorpMigrationInsertCacheMut sync.RWMutex
gorpMigrationInsertCache = make(map[string]insertCache)
gorpMigrationUpdateCacheMut sync.RWMutex
gorpMigrationUpdateCache = make(map[string]updateCache)
gorpMigrationUpsertCacheMut sync.RWMutex
gorpMigrationUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var gorpMigrationBeforeInsertHooks []GorpMigrationHook
var gorpMigrationBeforeUpdateHooks []GorpMigrationHook
var gorpMigrationBeforeDeleteHooks []GorpMigrationHook
var gorpMigrationBeforeUpsertHooks []GorpMigrationHook
var gorpMigrationAfterInsertHooks []GorpMigrationHook
var gorpMigrationAfterSelectHooks []GorpMigrationHook
var gorpMigrationAfterUpdateHooks []GorpMigrationHook
var gorpMigrationAfterDeleteHooks []GorpMigrationHook
var gorpMigrationAfterUpsertHooks []GorpMigrationHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *GorpMigration) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *GorpMigration) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *GorpMigration) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *GorpMigration) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *GorpMigration) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *GorpMigration) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *GorpMigration) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *GorpMigration) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *GorpMigration) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddGorpMigrationHook registers your hook function for all future operations.
func AddGorpMigrationHook(hookPoint boil.HookPoint, gorpMigrationHook GorpMigrationHook) {
switch hookPoint {
case boil.BeforeInsertHook:
gorpMigrationBeforeInsertHooks = append(gorpMigrationBeforeInsertHooks, gorpMigrationHook)
case boil.BeforeUpdateHook:
gorpMigrationBeforeUpdateHooks = append(gorpMigrationBeforeUpdateHooks, gorpMigrationHook)
case boil.BeforeDeleteHook:
gorpMigrationBeforeDeleteHooks = append(gorpMigrationBeforeDeleteHooks, gorpMigrationHook)
case boil.BeforeUpsertHook:
gorpMigrationBeforeUpsertHooks = append(gorpMigrationBeforeUpsertHooks, gorpMigrationHook)
case boil.AfterInsertHook:
gorpMigrationAfterInsertHooks = append(gorpMigrationAfterInsertHooks, gorpMigrationHook)
case boil.AfterSelectHook:
gorpMigrationAfterSelectHooks = append(gorpMigrationAfterSelectHooks, gorpMigrationHook)
case boil.AfterUpdateHook:
gorpMigrationAfterUpdateHooks = append(gorpMigrationAfterUpdateHooks, gorpMigrationHook)
case boil.AfterDeleteHook:
gorpMigrationAfterDeleteHooks = append(gorpMigrationAfterDeleteHooks, gorpMigrationHook)
case boil.AfterUpsertHook:
gorpMigrationAfterUpsertHooks = append(gorpMigrationAfterUpsertHooks, gorpMigrationHook)
}
}
// One returns a single gorpMigration record from the query.
func (q gorpMigrationQuery) One(ctx context.Context, exec boil.ContextExecutor) (*GorpMigration, error) {
o := &GorpMigration{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "models: failed to execute a one query for gorp_migrations")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all GorpMigration records from the query.
func (q gorpMigrationQuery) All(ctx context.Context, exec boil.ContextExecutor) (GorpMigrationSlice, error) {
var o []*GorpMigration
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "models: failed to assign all query results to GorpMigration slice")
}
if len(gorpMigrationAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all GorpMigration records in the query.
func (q gorpMigrationQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "models: failed to count gorp_migrations rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q gorpMigrationQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "models: failed to check if gorp_migrations exists")
}
return count > 0, nil
}
// GorpMigrations retrieves all the records using an executor.
func GorpMigrations(mods ...qm.QueryMod) gorpMigrationQuery {
mods = append(mods, qm.From("\"gorp_migrations\""))
return gorpMigrationQuery{NewQuery(mods...)}
}
// FindGorpMigration retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindGorpMigration(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*GorpMigration, error) {
gorpMigrationObj := &GorpMigration{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"gorp_migrations\" where \"id\"=?", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, gorpMigrationObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "models: unable to select from gorp_migrations")
}
if err = gorpMigrationObj.doAfterSelectHooks(ctx, exec); err != nil {
return gorpMigrationObj, err
}
return gorpMigrationObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *GorpMigration) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("models: no gorp_migrations provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(gorpMigrationColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
gorpMigrationInsertCacheMut.RLock()
cache, cached := gorpMigrationInsertCache[key]
gorpMigrationInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
gorpMigrationAllColumns,
gorpMigrationColumnsWithDefault,
gorpMigrationColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"gorp_migrations\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"gorp_migrations\" %sDEFAULT VALUES%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "models: unable to insert into gorp_migrations")
}
if !cached {
gorpMigrationInsertCacheMut.Lock()
gorpMigrationInsertCache[key] = cache
gorpMigrationInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the GorpMigration.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *GorpMigration) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
gorpMigrationUpdateCacheMut.RLock()
cache, cached := gorpMigrationUpdateCache[key]
gorpMigrationUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
gorpMigrationAllColumns,
gorpMigrationPrimaryKeyColumns,
)
if !columns.IsWhitelist() {
wl = strmangle.SetComplement(wl, []string{"created_at"})
}
if len(wl) == 0 {
return 0, errors.New("models: unable to update gorp_migrations, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, wl),
strmangle.WhereClause("\"", "\"", 0, gorpMigrationPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, append(wl, gorpMigrationPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update gorp_migrations row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by update for gorp_migrations")
}
if !cached {
gorpMigrationUpdateCacheMut.Lock()
gorpMigrationUpdateCache[key] = cache
gorpMigrationUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q gorpMigrationQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update all for gorp_migrations")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for gorp_migrations")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o GorpMigrationSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("models: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o)))
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update all in gorpMigration slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all gorpMigration")
}
return rowsAff, nil
}
// Delete deletes a single GorpMigration record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *GorpMigration) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("models: no GorpMigration provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), gorpMigrationPrimaryKeyMapping)
sql := "DELETE FROM \"gorp_migrations\" WHERE \"id\"=?"
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete from gorp_migrations")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for gorp_migrations")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q gorpMigrationQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("models: no gorpMigrationQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete all from gorp_migrations")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o GorpMigrationSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(gorpMigrationBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"gorp_migrations\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o))
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete all from gorpMigration slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations")
}
if len(gorpMigrationAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *GorpMigration) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindGorpMigration(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *GorpMigrationSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := GorpMigrationSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"gorp_migrations\".* FROM \"gorp_migrations\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "models: unable to reload all in GorpMigrationSlice")
}
*o = slice
return nil
}
// GorpMigrationExists checks if the GorpMigration row exists.
func GorpMigrationExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"gorp_migrations\" where \"id\"=? limit 1)"
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "models: unable to check if gorp_migrations exists")
}
return exists, nil
}

View File

@@ -1,684 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/volatiletech/randomize"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testGorpMigrations(t *testing.T) {
t.Parallel()
query := GorpMigrations()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testGorpMigrationsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testGorpMigrationsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := GorpMigrations().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testGorpMigrationsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := GorpMigrationSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testGorpMigrationsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := GorpMigrationExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if GorpMigration exists: %s", err)
}
if !e {
t.Errorf("Expected GorpMigrationExists to return true, but got false.")
}
}
func testGorpMigrationsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
gorpMigrationFound, err := FindGorpMigration(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if gorpMigrationFound == nil {
t.Error("want a record, got nil")
}
}
func testGorpMigrationsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = GorpMigrations().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testGorpMigrationsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := GorpMigrations().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testGorpMigrationsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
gorpMigrationOne := &GorpMigration{}
gorpMigrationTwo := &GorpMigration{}
if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := GorpMigrations().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testGorpMigrationsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
gorpMigrationOne := &GorpMigration{}
gorpMigrationTwo := &GorpMigration{}
if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func gorpMigrationBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func testGorpMigrationsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &GorpMigration{}
o := &GorpMigration{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, false); err != nil {
t.Errorf("Unable to randomize GorpMigration object: %s", err)
}
AddGorpMigrationHook(boil.BeforeInsertHook, gorpMigrationBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeInsertHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterInsertHook, gorpMigrationAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterInsertHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterSelectHook, gorpMigrationAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterSelectHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.BeforeUpdateHook, gorpMigrationBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeUpdateHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterUpdateHook, gorpMigrationAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterUpdateHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.BeforeDeleteHook, gorpMigrationBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeDeleteHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterDeleteHook, gorpMigrationAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterDeleteHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.BeforeUpsertHook, gorpMigrationBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeUpsertHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterUpsertHook, gorpMigrationAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterUpsertHooks = []GorpMigrationHook{}
}
func testGorpMigrationsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testGorpMigrationsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(gorpMigrationColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testGorpMigrationsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testGorpMigrationsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := GorpMigrationSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testGorpMigrationsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := GorpMigrations().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
gorpMigrationDBTypes = map[string]string{`ID`: `VARCHAR(255)`, `AppliedAt`: `DATETIME`}
_ = bytes.MinRead
)
func testGorpMigrationsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testGorpMigrationsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(gorpMigrationAllColumns, gorpMigrationPrimaryKeyColumns) {
fields = gorpMigrationAllColumns
} else {
fields = strmangle.SetComplement(
gorpMigrationAllColumns,
gorpMigrationPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := GorpMigrationSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -1,941 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/friendsofgo/errors"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
"github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
"github.com/volatiletech/strmangle"
)
// Header is an object representing the database table.
type Header struct {
Record int64 `boil:"record" json:"record" toml:"record" yaml:"record"`
Lastknownrecord int64 `boil:"lastknownrecord" json:"lastknownrecord" toml:"lastknownrecord" yaml:"lastknownrecord"`
Block int64 `boil:"block" json:"block" toml:"block" yaml:"block"`
Lastknownblock int64 `boil:"lastknownblock" json:"lastknownblock" toml:"lastknownblock" yaml:"lastknownblock"`
Deleted int64 `boil:"deleted" json:"deleted" toml:"deleted" yaml:"deleted"`
Typeflag int64 `boil:"typeflag" json:"typeflag" toml:"typeflag" yaml:"typeflag"`
Name string `boil:"name" json:"name" toml:"name" yaml:"name"`
Linkname string `boil:"linkname" json:"linkname" toml:"linkname" yaml:"linkname"`
Size int64 `boil:"size" json:"size" toml:"size" yaml:"size"`
Mode int64 `boil:"mode" json:"mode" toml:"mode" yaml:"mode"`
UID int64 `boil:"uid" json:"uid" toml:"uid" yaml:"uid"`
Gid int64 `boil:"gid" json:"gid" toml:"gid" yaml:"gid"`
Uname string `boil:"uname" json:"uname" toml:"uname" yaml:"uname"`
Gname string `boil:"gname" json:"gname" toml:"gname" yaml:"gname"`
Modtime time.Time `boil:"modtime" json:"modtime" toml:"modtime" yaml:"modtime"`
Accesstime time.Time `boil:"accesstime" json:"accesstime" toml:"accesstime" yaml:"accesstime"`
Changetime time.Time `boil:"changetime" json:"changetime" toml:"changetime" yaml:"changetime"`
Devmajor int64 `boil:"devmajor" json:"devmajor" toml:"devmajor" yaml:"devmajor"`
Devminor int64 `boil:"devminor" json:"devminor" toml:"devminor" yaml:"devminor"`
Paxrecords string `boil:"paxrecords" json:"paxrecords" toml:"paxrecords" yaml:"paxrecords"`
Format int64 `boil:"format" json:"format" toml:"format" yaml:"format"`
R *headerR `boil:"-" json:"-" toml:"-" yaml:"-"`
L headerL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var HeaderColumns = struct {
Record string
Lastknownrecord string
Block string
Lastknownblock string
Deleted string
Typeflag string
Name string
Linkname string
Size string
Mode string
UID string
Gid string
Uname string
Gname string
Modtime string
Accesstime string
Changetime string
Devmajor string
Devminor string
Paxrecords string
Format string
}{
Record: "record",
Lastknownrecord: "lastknownrecord",
Block: "block",
Lastknownblock: "lastknownblock",
Deleted: "deleted",
Typeflag: "typeflag",
Name: "name",
Linkname: "linkname",
Size: "size",
Mode: "mode",
UID: "uid",
Gid: "gid",
Uname: "uname",
Gname: "gname",
Modtime: "modtime",
Accesstime: "accesstime",
Changetime: "changetime",
Devmajor: "devmajor",
Devminor: "devminor",
Paxrecords: "paxrecords",
Format: "format",
}
var HeaderTableColumns = struct {
Record string
Lastknownrecord string
Block string
Lastknownblock string
Deleted string
Typeflag string
Name string
Linkname string
Size string
Mode string
UID string
Gid string
Uname string
Gname string
Modtime string
Accesstime string
Changetime string
Devmajor string
Devminor string
Paxrecords string
Format string
}{
Record: "headers.record",
Lastknownrecord: "headers.lastknownrecord",
Block: "headers.block",
Lastknownblock: "headers.lastknownblock",
Deleted: "headers.deleted",
Typeflag: "headers.typeflag",
Name: "headers.name",
Linkname: "headers.linkname",
Size: "headers.size",
Mode: "headers.mode",
UID: "headers.uid",
Gid: "headers.gid",
Uname: "headers.uname",
Gname: "headers.gname",
Modtime: "headers.modtime",
Accesstime: "headers.accesstime",
Changetime: "headers.changetime",
Devmajor: "headers.devmajor",
Devminor: "headers.devminor",
Paxrecords: "headers.paxrecords",
Format: "headers.format",
}
// Generated where
type whereHelperint64 struct{ field string }
func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
func (w whereHelperint64) IN(slice []int64) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
}
func (w whereHelperint64) NIN(slice []int64) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
}
type whereHelpertime_Time struct{ field string }
func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.EQ, x)
}
func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.NEQ, x)
}
func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var HeaderWhere = struct {
Record whereHelperint64
Lastknownrecord whereHelperint64
Block whereHelperint64
Lastknownblock whereHelperint64
Deleted whereHelperint64
Typeflag whereHelperint64
Name whereHelperstring
Linkname whereHelperstring
Size whereHelperint64
Mode whereHelperint64
UID whereHelperint64
Gid whereHelperint64
Uname whereHelperstring
Gname whereHelperstring
Modtime whereHelpertime_Time
Accesstime whereHelpertime_Time
Changetime whereHelpertime_Time
Devmajor whereHelperint64
Devminor whereHelperint64
Paxrecords whereHelperstring
Format whereHelperint64
}{
Record: whereHelperint64{field: "\"headers\".\"record\""},
Lastknownrecord: whereHelperint64{field: "\"headers\".\"lastknownrecord\""},
Block: whereHelperint64{field: "\"headers\".\"block\""},
Lastknownblock: whereHelperint64{field: "\"headers\".\"lastknownblock\""},
Deleted: whereHelperint64{field: "\"headers\".\"deleted\""},
Typeflag: whereHelperint64{field: "\"headers\".\"typeflag\""},
Name: whereHelperstring{field: "\"headers\".\"name\""},
Linkname: whereHelperstring{field: "\"headers\".\"linkname\""},
Size: whereHelperint64{field: "\"headers\".\"size\""},
Mode: whereHelperint64{field: "\"headers\".\"mode\""},
UID: whereHelperint64{field: "\"headers\".\"uid\""},
Gid: whereHelperint64{field: "\"headers\".\"gid\""},
Uname: whereHelperstring{field: "\"headers\".\"uname\""},
Gname: whereHelperstring{field: "\"headers\".\"gname\""},
Modtime: whereHelpertime_Time{field: "\"headers\".\"modtime\""},
Accesstime: whereHelpertime_Time{field: "\"headers\".\"accesstime\""},
Changetime: whereHelpertime_Time{field: "\"headers\".\"changetime\""},
Devmajor: whereHelperint64{field: "\"headers\".\"devmajor\""},
Devminor: whereHelperint64{field: "\"headers\".\"devminor\""},
Paxrecords: whereHelperstring{field: "\"headers\".\"paxrecords\""},
Format: whereHelperint64{field: "\"headers\".\"format\""},
}
// HeaderRels is where relationship names are stored.
var HeaderRels = struct {
}{}
// headerR is where relationships are stored.
type headerR struct {
}
// NewStruct creates a new relationship struct
func (*headerR) NewStruct() *headerR {
return &headerR{}
}
// headerL is where Load methods for each relationship are stored.
type headerL struct{}
var (
headerAllColumns = []string{"record", "lastknownrecord", "block", "lastknownblock", "deleted", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format"}
headerColumnsWithoutDefault = []string{"record", "lastknownrecord", "block", "lastknownblock", "deleted", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format"}
headerColumnsWithDefault = []string{}
headerPrimaryKeyColumns = []string{"name"}
)
type (
// HeaderSlice is an alias for a slice of pointers to Header.
// This should almost always be used instead of []Header.
HeaderSlice []*Header
// HeaderHook is the signature for custom Header hook methods
HeaderHook func(context.Context, boil.ContextExecutor, *Header) error
headerQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
headerType = reflect.TypeOf(&Header{})
headerMapping = queries.MakeStructMapping(headerType)
headerPrimaryKeyMapping, _ = queries.BindMapping(headerType, headerMapping, headerPrimaryKeyColumns)
headerInsertCacheMut sync.RWMutex
headerInsertCache = make(map[string]insertCache)
headerUpdateCacheMut sync.RWMutex
headerUpdateCache = make(map[string]updateCache)
headerUpsertCacheMut sync.RWMutex
headerUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var headerBeforeInsertHooks []HeaderHook
var headerBeforeUpdateHooks []HeaderHook
var headerBeforeDeleteHooks []HeaderHook
var headerBeforeUpsertHooks []HeaderHook
var headerAfterInsertHooks []HeaderHook
var headerAfterSelectHooks []HeaderHook
var headerAfterUpdateHooks []HeaderHook
var headerAfterDeleteHooks []HeaderHook
var headerAfterUpsertHooks []HeaderHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *Header) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *Header) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *Header) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *Header) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *Header) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *Header) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *Header) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *Header) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *Header) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range headerAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddHeaderHook registers your hook function for all future operations.
func AddHeaderHook(hookPoint boil.HookPoint, headerHook HeaderHook) {
switch hookPoint {
case boil.BeforeInsertHook:
headerBeforeInsertHooks = append(headerBeforeInsertHooks, headerHook)
case boil.BeforeUpdateHook:
headerBeforeUpdateHooks = append(headerBeforeUpdateHooks, headerHook)
case boil.BeforeDeleteHook:
headerBeforeDeleteHooks = append(headerBeforeDeleteHooks, headerHook)
case boil.BeforeUpsertHook:
headerBeforeUpsertHooks = append(headerBeforeUpsertHooks, headerHook)
case boil.AfterInsertHook:
headerAfterInsertHooks = append(headerAfterInsertHooks, headerHook)
case boil.AfterSelectHook:
headerAfterSelectHooks = append(headerAfterSelectHooks, headerHook)
case boil.AfterUpdateHook:
headerAfterUpdateHooks = append(headerAfterUpdateHooks, headerHook)
case boil.AfterDeleteHook:
headerAfterDeleteHooks = append(headerAfterDeleteHooks, headerHook)
case boil.AfterUpsertHook:
headerAfterUpsertHooks = append(headerAfterUpsertHooks, headerHook)
}
}
// One returns a single header record from the query.
func (q headerQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Header, error) {
o := &Header{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "models: failed to execute a one query for headers")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all Header records from the query.
func (q headerQuery) All(ctx context.Context, exec boil.ContextExecutor) (HeaderSlice, error) {
var o []*Header
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "models: failed to assign all query results to Header slice")
}
if len(headerAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all Header records in the query.
func (q headerQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "models: failed to count headers rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q headerQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "models: failed to check if headers exists")
}
return count > 0, nil
}
// Headers retrieves all the records using an executor.
func Headers(mods ...qm.QueryMod) headerQuery {
mods = append(mods, qm.From("\"headers\""))
return headerQuery{NewQuery(mods...)}
}
// FindHeader retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindHeader(ctx context.Context, exec boil.ContextExecutor, name string, selectCols ...string) (*Header, error) {
headerObj := &Header{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"headers\" where \"name\"=?", sel,
)
q := queries.Raw(query, name)
err := q.Bind(ctx, exec, headerObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "models: unable to select from headers")
}
if err = headerObj.doAfterSelectHooks(ctx, exec); err != nil {
return headerObj, err
}
return headerObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *Header) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("models: no headers provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(headerColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
headerInsertCacheMut.RLock()
cache, cached := headerInsertCache[key]
headerInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
headerAllColumns,
headerColumnsWithDefault,
headerColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(headerType, headerMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"headers\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"headers\" %sDEFAULT VALUES%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "models: unable to insert into headers")
}
if !cached {
headerInsertCacheMut.Lock()
headerInsertCache[key] = cache
headerInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the Header.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *Header) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
headerUpdateCacheMut.RLock()
cache, cached := headerUpdateCache[key]
headerUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
headerAllColumns,
headerPrimaryKeyColumns,
)
if !columns.IsWhitelist() {
wl = strmangle.SetComplement(wl, []string{"created_at"})
}
if len(wl) == 0 {
return 0, errors.New("models: unable to update headers, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"headers\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, wl),
strmangle.WhereClause("\"", "\"", 0, headerPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, append(wl, headerPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update headers row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by update for headers")
}
if !cached {
headerUpdateCacheMut.Lock()
headerUpdateCache[key] = cache
headerUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q headerQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update all for headers")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for headers")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o HeaderSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("models: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"headers\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(o)))
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update all in header slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all header")
}
return rowsAff, nil
}
// Delete deletes a single Header record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *Header) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("models: no Header provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), headerPrimaryKeyMapping)
sql := "DELETE FROM \"headers\" WHERE \"name\"=?"
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete from headers")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for headers")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q headerQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("models: no headerQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete all from headers")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for headers")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o HeaderSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(headerBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"headers\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(o))
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete all from header slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for headers")
}
if len(headerAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *Header) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindHeader(ctx, exec, o.Name)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *HeaderSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := HeaderSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"headers\".* FROM \"headers\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "models: unable to reload all in HeaderSlice")
}
*o = slice
return nil
}
// HeaderExists checks if the Header row exists.
func HeaderExists(ctx context.Context, exec boil.ContextExecutor, name string) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"headers\" where \"name\"=? limit 1)"
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, name)
}
row := exec.QueryRowContext(ctx, sql, name)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "models: unable to check if headers exists")
}
return exists, nil
}

View File

@@ -1,684 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/volatiletech/randomize"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testHeaders(t *testing.T) {
t.Parallel()
query := Headers()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testHeadersDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testHeadersQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Headers().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testHeadersSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := HeaderSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testHeadersExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := HeaderExists(ctx, tx, o.Name)
if err != nil {
t.Errorf("Unable to check if Header exists: %s", err)
}
if !e {
t.Errorf("Expected HeaderExists to return true, but got false.")
}
}
func testHeadersFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
headerFound, err := FindHeader(ctx, tx, o.Name)
if err != nil {
t.Error(err)
}
if headerFound == nil {
t.Error("want a record, got nil")
}
}
func testHeadersBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Headers().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testHeadersOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Headers().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testHeadersAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
headerOne := &Header{}
headerTwo := &Header{}
if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Headers().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testHeadersCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
headerOne := &Header{}
headerTwo := &Header{}
if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func headerBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func testHeadersHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Header{}
o := &Header{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, headerDBTypes, false); err != nil {
t.Errorf("Unable to randomize Header object: %s", err)
}
AddHeaderHook(boil.BeforeInsertHook, headerBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
headerBeforeInsertHooks = []HeaderHook{}
AddHeaderHook(boil.AfterInsertHook, headerAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
headerAfterInsertHooks = []HeaderHook{}
AddHeaderHook(boil.AfterSelectHook, headerAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
headerAfterSelectHooks = []HeaderHook{}
AddHeaderHook(boil.BeforeUpdateHook, headerBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
headerBeforeUpdateHooks = []HeaderHook{}
AddHeaderHook(boil.AfterUpdateHook, headerAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
headerAfterUpdateHooks = []HeaderHook{}
AddHeaderHook(boil.BeforeDeleteHook, headerBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
headerBeforeDeleteHooks = []HeaderHook{}
AddHeaderHook(boil.AfterDeleteHook, headerAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
headerAfterDeleteHooks = []HeaderHook{}
AddHeaderHook(boil.BeforeUpsertHook, headerBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
headerBeforeUpsertHooks = []HeaderHook{}
AddHeaderHook(boil.AfterUpsertHook, headerAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
headerAfterUpsertHooks = []HeaderHook{}
}
func testHeadersInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testHeadersInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(headerColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testHeadersReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testHeadersReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := HeaderSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testHeadersSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Headers().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
headerDBTypes = map[string]string{`Record`: `INTEGER`, `Lastknownrecord`: `INTEGER`, `Block`: `INTEGER`, `Lastknownblock`: `INTEGER`, `Deleted`: `INTEGER`, `Typeflag`: `INTEGER`, `Name`: `TEXT`, `Linkname`: `TEXT`, `Size`: `INTEGER`, `Mode`: `INTEGER`, `UID`: `INTEGER`, `Gid`: `INTEGER`, `Uname`: `TEXT`, `Gname`: `TEXT`, `Modtime`: `DATE`, `Accesstime`: `DATE`, `Changetime`: `DATE`, `Devmajor`: `INTEGER`, `Devminor`: `INTEGER`, `Paxrecords`: `TEXT`, `Format`: `INTEGER`}
_ = bytes.MinRead
)
func testHeadersUpdate(t *testing.T) {
t.Parallel()
if 0 == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testHeadersSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(headerAllColumns, headerPrimaryKeyColumns) {
fields = headerAllColumns
} else {
fields = strmangle.SetComplement(
headerAllColumns,
headerPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := HeaderSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -1,93 +0,0 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"database/sql"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"path/filepath"
"regexp"
_ "github.com/mattn/go-sqlite3"
"github.com/pkg/errors"
"github.com/spf13/viper"
)
var rgxSQLitekey = regexp.MustCompile(`(?mi)((,\n)?\s+foreign key.*?\n)+`)
type sqliteTester struct {
dbConn *sql.DB
dbName string
testDBName string
}
func init() {
dbMain = &sqliteTester{}
}
func (s *sqliteTester) setup() error {
var err error
s.dbName = viper.GetString("sqlite3.dbname")
if len(s.dbName) == 0 {
return errors.New("no dbname specified")
}
s.testDBName = filepath.Join(os.TempDir(), fmt.Sprintf("boil-sqlite3-%d.sql", rand.Int()))
dumpCmd := exec.Command("sqlite3", "-cmd", ".dump", s.dbName)
createCmd := exec.Command("sqlite3", s.testDBName)
r, w := io.Pipe()
dumpCmd.Stdout = w
createCmd.Stdin = newFKeyDestroyer(rgxSQLitekey, r)
if err = dumpCmd.Start(); err != nil {
return errors.Wrap(err, "failed to start sqlite3 dump command")
}
if err = createCmd.Start(); err != nil {
return errors.Wrap(err, "failed to start sqlite3 create command")
}
if err = dumpCmd.Wait(); err != nil {
fmt.Println(err)
return errors.Wrap(err, "failed to wait for sqlite3 dump command")
}
w.Close() // After dumpCmd is done, close the write end of the pipe
if err = createCmd.Wait(); err != nil {
fmt.Println(err)
return errors.Wrap(err, "failed to wait for sqlite3 create command")
}
return nil
}
func (s *sqliteTester) teardown() error {
if s.dbConn != nil {
s.dbConn.Close()
}
return os.Remove(s.testDBName)
}
func (s *sqliteTester) conn() (*sql.DB, error) {
if s.dbConn != nil {
return s.dbConn, nil
}
var err error
s.dbConn, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?_loc=UTC", s.testDBName))
if err != nil {
return nil, err
}
return s.dbConn, nil
}