build: Add SQLite driver to depend target

This commit is contained in:
Felicitas Pojtinger
2021-12-20 01:50:11 +01:00
parent 07f7af284d
commit 092f8ed240
17 changed files with 4249 additions and 19 deletions

View File

@@ -0,0 +1,119 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"database/sql"
"flag"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/v4/boil"
)
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
const outputDirDepth = 5
var (
dbMain tester
)
type tester interface {
setup() error
conn() (*sql.DB, error)
teardown() error
}
func TestMain(m *testing.M) {
if dbMain == nil {
fmt.Println("no dbMain tester interface was ready")
os.Exit(-1)
}
rand.Seed(time.Now().UnixNano())
flag.Parse()
var err error
// Load configuration
err = initViper()
if err != nil {
fmt.Println("unable to load config file")
os.Exit(-2)
}
// Set DebugMode so we can see generated sql statements
boil.DebugMode = *flagDebugMode
if err = dbMain.setup(); err != nil {
fmt.Println("Unable to execute setup:", err)
os.Exit(-4)
}
conn, err := dbMain.conn()
if err != nil {
fmt.Println("failed to get connection:", err)
}
var code int
boil.SetDB(conn)
code = m.Run()
if err = dbMain.teardown(); err != nil {
fmt.Println("Unable to execute teardown:", err)
os.Exit(-5)
}
os.Exit(code)
}
func initViper() error {
if flagConfigFile != nil && *flagConfigFile != "" {
viper.SetConfigFile(*flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = strings.Repeat("../", outputDirDepth)
} else {
wd = wd + strings.Repeat("/..", outputDirDepth)
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fall back to defaults and validation to provide errs
_ = viper.ReadInConfig()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
return nil
}

View File

@@ -0,0 +1,33 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"github.com/volatiletech/sqlboiler/v4/drivers"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
var dialect = drivers.Dialect{
LQ: 0x22,
RQ: 0x22,
UseIndexPlaceholders: false,
UseLastInsertID: false,
UseSchema: false,
UseDefaultKeyword: true,
UseAutoColumns: false,
UseTopClause: false,
UseOutputClause: false,
UseCaseWhenExistsClause: false,
}
// NewQuery initializes a new Query using the passed in QueryMods
func NewQuery(mods ...qm.QueryMod) *queries.Query {
q := &queries.Query{}
queries.SetDialect(q, &dialect)
qm.Apply(q, mods...)
return q
}

View File

@@ -0,0 +1,52 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"regexp"
"github.com/volatiletech/sqlboiler/v4/boil"
)
var dbNameRand *rand.Rand
func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
if err != nil {
panic(fmt.Sprintf("Cannot create a transactor: %s", err))
}
return transactor
}
func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
return &fKeyDestroyer{
reader: reader,
rgx: regex,
}
}
type fKeyDestroyer struct {
reader io.Reader
buf *bytes.Buffer
rgx *regexp.Regexp
}
func (f *fKeyDestroyer) Read(b []byte) (int, error) {
if f.buf == nil {
all, err := ioutil.ReadAll(f.reader)
if err != nil {
return 0, err
}
all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
all = f.rgx.ReplaceAll(all, []byte{})
f.buf = bytes.NewBuffer(all)
}
return f.buf.Read(b)
}

View File

@@ -0,0 +1,139 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import "testing"
// This test suite runs each operation test in parallel.
// Example, if your database has 3 tables, the suite will run:
// table1, table2 and table3 Delete in parallel
// table1, table2 and table3 Insert in parallel, and so forth.
// It does NOT run each operation group in parallel.
// Separating the tests thusly grants avoidance of Postgres deadlocks.
func TestParent(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrations)
t.Run("Headers", testHeaders)
}
func TestDelete(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsDelete)
t.Run("Headers", testHeadersDelete)
}
func TestQueryDeleteAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsQueryDeleteAll)
t.Run("Headers", testHeadersQueryDeleteAll)
}
func TestSliceDeleteAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsSliceDeleteAll)
t.Run("Headers", testHeadersSliceDeleteAll)
}
func TestExists(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsExists)
t.Run("Headers", testHeadersExists)
}
func TestFind(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsFind)
t.Run("Headers", testHeadersFind)
}
func TestBind(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsBind)
t.Run("Headers", testHeadersBind)
}
func TestOne(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsOne)
t.Run("Headers", testHeadersOne)
}
func TestAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsAll)
t.Run("Headers", testHeadersAll)
}
func TestCount(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsCount)
t.Run("Headers", testHeadersCount)
}
func TestHooks(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsHooks)
t.Run("Headers", testHeadersHooks)
}
func TestInsert(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsInsert)
t.Run("GorpMigrations", testGorpMigrationsInsertWhitelist)
t.Run("Headers", testHeadersInsert)
t.Run("Headers", testHeadersInsertWhitelist)
}
// TestToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestToOne(t *testing.T) {}
// TestOneToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOne(t *testing.T) {}
// TestToMany tests cannot be run in parallel
// or deadlocks can occur.
func TestToMany(t *testing.T) {}
// TestToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneSet(t *testing.T) {}
// TestToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneRemove(t *testing.T) {}
// TestOneToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneSet(t *testing.T) {}
// TestOneToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneRemove(t *testing.T) {}
// TestToManyAdd tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyAdd(t *testing.T) {}
// TestToManySet tests cannot be run in parallel
// or deadlocks can occur.
func TestToManySet(t *testing.T) {}
// TestToManyRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyRemove(t *testing.T) {}
func TestReload(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsReload)
t.Run("Headers", testHeadersReload)
}
func TestReloadAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsReloadAll)
t.Run("Headers", testHeadersReloadAll)
}
func TestSelect(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsSelect)
t.Run("Headers", testHeadersSelect)
}
func TestUpdate(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsUpdate)
t.Run("Headers", testHeadersUpdate)
}
func TestSliceUpdateAll(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsSliceUpdateAll)
t.Run("Headers", testHeadersSliceUpdateAll)
}

View File

@@ -0,0 +1,12 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
var TableNames = struct {
GorpMigrations string
Headers string
}{
GorpMigrations: "gorp_migrations",
Headers: "headers",
}

View File

@@ -0,0 +1,52 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"strconv"
"github.com/friendsofgo/errors"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/strmangle"
)
// M type is for providing columns and column values to UpdateAll.
type M map[string]interface{}
// ErrSyncFail occurs during insert when the record could not be retrieved in
// order to populate default value information. This usually happens when LastInsertId
// fails or there was a primary key configuration that was not resolvable.
var ErrSyncFail = errors.New("models: failed to synchronize data after insert")
type insertCache struct {
query string
retQuery string
valueMapping []uint64
retMapping []uint64
}
type updateCache struct {
query string
valueMapping []uint64
}
func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
buf := strmangle.GetBuffer()
buf.WriteString(strconv.Itoa(cols.Kind))
for _, w := range cols.Cols {
buf.WriteString(w)
}
if len(nzDefaults) != 0 {
buf.WriteByte('.')
}
for _, nz := range nzDefaults {
buf.WriteString(nz)
}
str := buf.String()
strmangle.PutBuffer(buf)
return str
}

View File

@@ -0,0 +1,928 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"context"
"database/sql"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/friendsofgo/errors"
"github.com/volatiletech/null/v8"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
"github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
"github.com/volatiletech/strmangle"
)
// GorpMigration is an object representing the database table.
type GorpMigration struct {
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
AppliedAt null.Time `boil:"applied_at" json:"applied_at,omitempty" toml:"applied_at" yaml:"applied_at,omitempty"`
R *gorpMigrationR `boil:"-" json:"-" toml:"-" yaml:"-"`
L gorpMigrationL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var GorpMigrationColumns = struct {
ID string
AppliedAt string
}{
ID: "id",
AppliedAt: "applied_at",
}
var GorpMigrationTableColumns = struct {
ID string
AppliedAt string
}{
ID: "gorp_migrations.id",
AppliedAt: "gorp_migrations.applied_at",
}
// Generated where
type whereHelperstring struct{ field string }
func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
func (w whereHelperstring) IN(slice []string) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
}
func (w whereHelperstring) NIN(slice []string) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
}
type whereHelpernull_Time struct{ field string }
func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
var GorpMigrationWhere = struct {
ID whereHelperstring
AppliedAt whereHelpernull_Time
}{
ID: whereHelperstring{field: "\"gorp_migrations\".\"id\""},
AppliedAt: whereHelpernull_Time{field: "\"gorp_migrations\".\"applied_at\""},
}
// GorpMigrationRels is where relationship names are stored.
var GorpMigrationRels = struct {
}{}
// gorpMigrationR is where relationships are stored.
type gorpMigrationR struct {
}
// NewStruct creates a new relationship struct
func (*gorpMigrationR) NewStruct() *gorpMigrationR {
return &gorpMigrationR{}
}
// gorpMigrationL is where Load methods for each relationship are stored.
type gorpMigrationL struct{}
var (
gorpMigrationAllColumns = []string{"id", "applied_at"}
gorpMigrationColumnsWithoutDefault = []string{"id", "applied_at"}
gorpMigrationColumnsWithDefault = []string{}
gorpMigrationPrimaryKeyColumns = []string{"id"}
)
type (
// GorpMigrationSlice is an alias for a slice of pointers to GorpMigration.
// This should almost always be used instead of []GorpMigration.
GorpMigrationSlice []*GorpMigration
// GorpMigrationHook is the signature for custom GorpMigration hook methods
GorpMigrationHook func(context.Context, boil.ContextExecutor, *GorpMigration) error
gorpMigrationQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
gorpMigrationType = reflect.TypeOf(&GorpMigration{})
gorpMigrationMapping = queries.MakeStructMapping(gorpMigrationType)
gorpMigrationPrimaryKeyMapping, _ = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, gorpMigrationPrimaryKeyColumns)
gorpMigrationInsertCacheMut sync.RWMutex
gorpMigrationInsertCache = make(map[string]insertCache)
gorpMigrationUpdateCacheMut sync.RWMutex
gorpMigrationUpdateCache = make(map[string]updateCache)
gorpMigrationUpsertCacheMut sync.RWMutex
gorpMigrationUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var gorpMigrationBeforeInsertHooks []GorpMigrationHook
var gorpMigrationBeforeUpdateHooks []GorpMigrationHook
var gorpMigrationBeforeDeleteHooks []GorpMigrationHook
var gorpMigrationBeforeUpsertHooks []GorpMigrationHook
var gorpMigrationAfterInsertHooks []GorpMigrationHook
var gorpMigrationAfterSelectHooks []GorpMigrationHook
var gorpMigrationAfterUpdateHooks []GorpMigrationHook
var gorpMigrationAfterDeleteHooks []GorpMigrationHook
var gorpMigrationAfterUpsertHooks []GorpMigrationHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *GorpMigration) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *GorpMigration) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *GorpMigration) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *GorpMigration) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *GorpMigration) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *GorpMigration) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *GorpMigration) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *GorpMigration) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *GorpMigration) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range gorpMigrationAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddGorpMigrationHook registers your hook function for all future operations.
func AddGorpMigrationHook(hookPoint boil.HookPoint, gorpMigrationHook GorpMigrationHook) {
switch hookPoint {
case boil.BeforeInsertHook:
gorpMigrationBeforeInsertHooks = append(gorpMigrationBeforeInsertHooks, gorpMigrationHook)
case boil.BeforeUpdateHook:
gorpMigrationBeforeUpdateHooks = append(gorpMigrationBeforeUpdateHooks, gorpMigrationHook)
case boil.BeforeDeleteHook:
gorpMigrationBeforeDeleteHooks = append(gorpMigrationBeforeDeleteHooks, gorpMigrationHook)
case boil.BeforeUpsertHook:
gorpMigrationBeforeUpsertHooks = append(gorpMigrationBeforeUpsertHooks, gorpMigrationHook)
case boil.AfterInsertHook:
gorpMigrationAfterInsertHooks = append(gorpMigrationAfterInsertHooks, gorpMigrationHook)
case boil.AfterSelectHook:
gorpMigrationAfterSelectHooks = append(gorpMigrationAfterSelectHooks, gorpMigrationHook)
case boil.AfterUpdateHook:
gorpMigrationAfterUpdateHooks = append(gorpMigrationAfterUpdateHooks, gorpMigrationHook)
case boil.AfterDeleteHook:
gorpMigrationAfterDeleteHooks = append(gorpMigrationAfterDeleteHooks, gorpMigrationHook)
case boil.AfterUpsertHook:
gorpMigrationAfterUpsertHooks = append(gorpMigrationAfterUpsertHooks, gorpMigrationHook)
}
}
// One returns a single gorpMigration record from the query.
func (q gorpMigrationQuery) One(ctx context.Context, exec boil.ContextExecutor) (*GorpMigration, error) {
o := &GorpMigration{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "models: failed to execute a one query for gorp_migrations")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all GorpMigration records from the query.
func (q gorpMigrationQuery) All(ctx context.Context, exec boil.ContextExecutor) (GorpMigrationSlice, error) {
var o []*GorpMigration
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "models: failed to assign all query results to GorpMigration slice")
}
if len(gorpMigrationAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all GorpMigration records in the query.
func (q gorpMigrationQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "models: failed to count gorp_migrations rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q gorpMigrationQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "models: failed to check if gorp_migrations exists")
}
return count > 0, nil
}
// GorpMigrations retrieves all the records using an executor.
func GorpMigrations(mods ...qm.QueryMod) gorpMigrationQuery {
mods = append(mods, qm.From("\"gorp_migrations\""))
return gorpMigrationQuery{NewQuery(mods...)}
}
// FindGorpMigration retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindGorpMigration(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*GorpMigration, error) {
gorpMigrationObj := &GorpMigration{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"gorp_migrations\" where \"id\"=?", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, gorpMigrationObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "models: unable to select from gorp_migrations")
}
if err = gorpMigrationObj.doAfterSelectHooks(ctx, exec); err != nil {
return gorpMigrationObj, err
}
return gorpMigrationObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *GorpMigration) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("models: no gorp_migrations provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(gorpMigrationColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
gorpMigrationInsertCacheMut.RLock()
cache, cached := gorpMigrationInsertCache[key]
gorpMigrationInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
gorpMigrationAllColumns,
gorpMigrationColumnsWithDefault,
gorpMigrationColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"gorp_migrations\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"gorp_migrations\" %sDEFAULT VALUES%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "models: unable to insert into gorp_migrations")
}
if !cached {
gorpMigrationInsertCacheMut.Lock()
gorpMigrationInsertCache[key] = cache
gorpMigrationInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the GorpMigration.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *GorpMigration) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
gorpMigrationUpdateCacheMut.RLock()
cache, cached := gorpMigrationUpdateCache[key]
gorpMigrationUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
gorpMigrationAllColumns,
gorpMigrationPrimaryKeyColumns,
)
if !columns.IsWhitelist() {
wl = strmangle.SetComplement(wl, []string{"created_at"})
}
if len(wl) == 0 {
return 0, errors.New("models: unable to update gorp_migrations, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, wl),
strmangle.WhereClause("\"", "\"", 0, gorpMigrationPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, append(wl, gorpMigrationPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update gorp_migrations row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by update for gorp_migrations")
}
if !cached {
gorpMigrationUpdateCacheMut.Lock()
gorpMigrationUpdateCache[key] = cache
gorpMigrationUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q gorpMigrationQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update all for gorp_migrations")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for gorp_migrations")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o GorpMigrationSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("models: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o)))
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to update all in gorpMigration slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all gorpMigration")
}
return rowsAff, nil
}
// Delete deletes a single GorpMigration record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *GorpMigration) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("models: no GorpMigration provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), gorpMigrationPrimaryKeyMapping)
sql := "DELETE FROM \"gorp_migrations\" WHERE \"id\"=?"
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete from gorp_migrations")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for gorp_migrations")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q gorpMigrationQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("models: no gorpMigrationQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete all from gorp_migrations")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o GorpMigrationSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(gorpMigrationBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"gorp_migrations\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o))
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "models: unable to delete all from gorpMigration slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations")
}
if len(gorpMigrationAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *GorpMigration) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindGorpMigration(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *GorpMigrationSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := GorpMigrationSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"gorp_migrations\".* FROM \"gorp_migrations\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "models: unable to reload all in GorpMigrationSlice")
}
*o = slice
return nil
}
// GorpMigrationExists checks if the GorpMigration row exists.
func GorpMigrationExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"gorp_migrations\" where \"id\"=? limit 1)"
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, sql)
fmt.Fprintln(writer, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "models: unable to check if gorp_migrations exists")
}
return exists, nil
}
// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
func (o *GorpMigration) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
if o == nil {
return errors.New("models: no gorp_migrations provided for upsert")
}
if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(gorpMigrationColumnsWithDefault, o)
// Build cache key in-line uglily - mysql vs psql problems
buf := strmangle.GetBuffer()
if updateOnConflict {
buf.WriteByte('t')
} else {
buf.WriteByte('f')
}
buf.WriteByte('.')
for _, c := range conflictColumns {
buf.WriteString(c)
}
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(updateColumns.Kind))
for _, c := range updateColumns.Cols {
buf.WriteString(c)
}
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(insertColumns.Kind))
for _, c := range insertColumns.Cols {
buf.WriteString(c)
}
buf.WriteByte('.')
for _, c := range nzDefaults {
buf.WriteString(c)
}
key := buf.String()
strmangle.PutBuffer(buf)
gorpMigrationUpsertCacheMut.RLock()
cache, cached := gorpMigrationUpsertCache[key]
gorpMigrationUpsertCacheMut.RUnlock()
var err error
if !cached {
insert, ret := insertColumns.InsertColumnSet(
gorpMigrationAllColumns,
gorpMigrationColumnsWithDefault,
gorpMigrationColumnsWithoutDefault,
nzDefaults,
)
update := updateColumns.UpdateColumnSet(
gorpMigrationAllColumns,
gorpMigrationPrimaryKeyColumns,
)
if updateOnConflict && len(update) == 0 {
return errors.New("models: unable to upsert gorp_migrations, could not build update column list")
}
conflict := conflictColumns
if len(conflict) == 0 {
conflict = make([]string, len(gorpMigrationPrimaryKeyColumns))
copy(conflict, gorpMigrationPrimaryKeyColumns)
}
cache.query = buildUpsertQuerySQLite(dialect, "\"gorp_migrations\"", updateOnConflict, ret, update, conflict, insert)
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, insert)
if err != nil {
return err
}
if len(ret) != 0 {
cache.retMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, ret)
if err != nil {
return err
}
}
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
var returns []interface{}
if len(cache.retMapping) != 0 {
returns = queries.PtrsFromMapping(value, cache.retMapping)
}
if boil.IsDebug(ctx) {
writer := boil.DebugWriterFrom(ctx)
fmt.Fprintln(writer, cache.query)
fmt.Fprintln(writer, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
if err == sql.ErrNoRows {
err = nil // Postgres doesn't return anything when there's no update
}
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "models: unable to upsert gorp_migrations")
}
if !cached {
gorpMigrationUpsertCacheMut.Lock()
gorpMigrationUpsertCache[key] = cache
gorpMigrationUpsertCacheMut.Unlock()
}
return o.doAfterUpsertHooks(ctx, exec)
}

View File

@@ -0,0 +1,731 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/volatiletech/randomize"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/strmangle"
)
func testGorpMigrationsUpsert(t *testing.T) {
t.Parallel()
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := GorpMigration{}
if err = randomize.Struct(seed, &o, gorpMigrationDBTypes, true); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert GorpMigration: %s", err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, gorpMigrationDBTypes, false, gorpMigrationPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert GorpMigration: %s", err)
}
count, err = GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testGorpMigrations(t *testing.T) {
t.Parallel()
query := GorpMigrations()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testGorpMigrationsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testGorpMigrationsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := GorpMigrations().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testGorpMigrationsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := GorpMigrationSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testGorpMigrationsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := GorpMigrationExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if GorpMigration exists: %s", err)
}
if !e {
t.Errorf("Expected GorpMigrationExists to return true, but got false.")
}
}
func testGorpMigrationsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
gorpMigrationFound, err := FindGorpMigration(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if gorpMigrationFound == nil {
t.Error("want a record, got nil")
}
}
func testGorpMigrationsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = GorpMigrations().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testGorpMigrationsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := GorpMigrations().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testGorpMigrationsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
gorpMigrationOne := &GorpMigration{}
gorpMigrationTwo := &GorpMigration{}
if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := GorpMigrations().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testGorpMigrationsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
gorpMigrationOne := &GorpMigration{}
gorpMigrationTwo := &GorpMigration{}
if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func gorpMigrationBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func gorpMigrationAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
*o = GorpMigration{}
return nil
}
func testGorpMigrationsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &GorpMigration{}
o := &GorpMigration{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, false); err != nil {
t.Errorf("Unable to randomize GorpMigration object: %s", err)
}
AddGorpMigrationHook(boil.BeforeInsertHook, gorpMigrationBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeInsertHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterInsertHook, gorpMigrationAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterInsertHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterSelectHook, gorpMigrationAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterSelectHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.BeforeUpdateHook, gorpMigrationBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeUpdateHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterUpdateHook, gorpMigrationAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterUpdateHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.BeforeDeleteHook, gorpMigrationBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeDeleteHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterDeleteHook, gorpMigrationAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterDeleteHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.BeforeUpsertHook, gorpMigrationBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
gorpMigrationBeforeUpsertHooks = []GorpMigrationHook{}
AddGorpMigrationHook(boil.AfterUpsertHook, gorpMigrationAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
gorpMigrationAfterUpsertHooks = []GorpMigrationHook{}
}
func testGorpMigrationsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testGorpMigrationsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(gorpMigrationColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testGorpMigrationsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testGorpMigrationsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := GorpMigrationSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testGorpMigrationsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := GorpMigrations().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
gorpMigrationDBTypes = map[string]string{`ID`: `VARCHAR(255)`, `AppliedAt`: `DATETIME`}
_ = bytes.MinRead
)
func testGorpMigrationsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testGorpMigrationsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &GorpMigration{}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := GorpMigrations().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(gorpMigrationAllColumns, gorpMigrationPrimaryKeyColumns) {
fields = gorpMigrationAllColumns
} else {
fields = strmangle.SetComplement(
gorpMigrationAllColumns,
gorpMigrationPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := GorpMigrationSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,731 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/volatiletech/randomize"
"github.com/volatiletech/sqlboiler/v4/boil"
"github.com/volatiletech/sqlboiler/v4/queries"
"github.com/volatiletech/strmangle"
)
func testHeadersUpsert(t *testing.T) {
t.Parallel()
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := Header{}
if err = randomize.Struct(seed, &o, headerDBTypes, true); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Header: %s", err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, headerDBTypes, false, headerPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Header: %s", err)
}
count, err = Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testHeaders(t *testing.T) {
t.Parallel()
query := Headers()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testHeadersDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testHeadersQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Headers().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testHeadersSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := HeaderSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testHeadersExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := HeaderExists(ctx, tx, o.Name)
if err != nil {
t.Errorf("Unable to check if Header exists: %s", err)
}
if !e {
t.Errorf("Expected HeaderExists to return true, but got false.")
}
}
func testHeadersFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
headerFound, err := FindHeader(ctx, tx, o.Name)
if err != nil {
t.Error(err)
}
if headerFound == nil {
t.Error("want a record, got nil")
}
}
func testHeadersBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Headers().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testHeadersOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Headers().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testHeadersAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
headerOne := &Header{}
headerTwo := &Header{}
if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Headers().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testHeadersCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
headerOne := &Header{}
headerTwo := &Header{}
if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func headerBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func headerAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
*o = Header{}
return nil
}
func testHeadersHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Header{}
o := &Header{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, headerDBTypes, false); err != nil {
t.Errorf("Unable to randomize Header object: %s", err)
}
AddHeaderHook(boil.BeforeInsertHook, headerBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
headerBeforeInsertHooks = []HeaderHook{}
AddHeaderHook(boil.AfterInsertHook, headerAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
headerAfterInsertHooks = []HeaderHook{}
AddHeaderHook(boil.AfterSelectHook, headerAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
headerAfterSelectHooks = []HeaderHook{}
AddHeaderHook(boil.BeforeUpdateHook, headerBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
headerBeforeUpdateHooks = []HeaderHook{}
AddHeaderHook(boil.AfterUpdateHook, headerAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
headerAfterUpdateHooks = []HeaderHook{}
AddHeaderHook(boil.BeforeDeleteHook, headerBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
headerBeforeDeleteHooks = []HeaderHook{}
AddHeaderHook(boil.AfterDeleteHook, headerAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
headerAfterDeleteHooks = []HeaderHook{}
AddHeaderHook(boil.BeforeUpsertHook, headerBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
headerBeforeUpsertHooks = []HeaderHook{}
AddHeaderHook(boil.AfterUpsertHook, headerAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
headerAfterUpsertHooks = []HeaderHook{}
}
func testHeadersInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testHeadersInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(headerColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testHeadersReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testHeadersReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := HeaderSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testHeadersSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Headers().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
headerDBTypes = map[string]string{`Record`: `INTEGER`, `Lastknownrecord`: `INTEGER`, `Block`: `INTEGER`, `Lastknownblock`: `INTEGER`, `Deleted`: `INTEGER`, `Typeflag`: `INTEGER`, `Name`: `TEXT`, `Linkname`: `TEXT`, `Size`: `INTEGER`, `Mode`: `INTEGER`, `UID`: `INTEGER`, `Gid`: `INTEGER`, `Uname`: `TEXT`, `Gname`: `TEXT`, `Modtime`: `DATE`, `Accesstime`: `DATE`, `Changetime`: `DATE`, `Devmajor`: `INTEGER`, `Devminor`: `INTEGER`, `Paxrecords`: `TEXT`, `Format`: `INTEGER`}
_ = bytes.MinRead
)
func testHeadersUpdate(t *testing.T) {
t.Parallel()
if 0 == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testHeadersSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Header{}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Headers().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Header struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(headerAllColumns, headerPrimaryKeyColumns) {
fields = headerAllColumns
} else {
fields = strmangle.SetComplement(
headerAllColumns,
headerPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := HeaderSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -0,0 +1,93 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"database/sql"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"path/filepath"
"regexp"
_ "github.com/mattn/go-sqlite3"
"github.com/pkg/errors"
"github.com/spf13/viper"
)
var rgxSQLitekey = regexp.MustCompile(`(?mi)((,\n)?\s+foreign key.*?\n)+`)
type sqliteTester struct {
dbConn *sql.DB
dbName string
testDBName string
}
func init() {
dbMain = &sqliteTester{}
}
func (s *sqliteTester) setup() error {
var err error
s.dbName = viper.GetString("sqlite3.dbname")
if len(s.dbName) == 0 {
return errors.New("no dbname specified")
}
s.testDBName = filepath.Join(os.TempDir(), fmt.Sprintf("boil-sqlite3-%d.sql", rand.Int()))
dumpCmd := exec.Command("sqlite3", "-cmd", ".dump", s.dbName)
createCmd := exec.Command("sqlite3", s.testDBName)
r, w := io.Pipe()
dumpCmd.Stdout = w
createCmd.Stdin = newFKeyDestroyer(rgxSQLitekey, r)
if err = dumpCmd.Start(); err != nil {
return errors.Wrap(err, "failed to start sqlite3 dump command")
}
if err = createCmd.Start(); err != nil {
return errors.Wrap(err, "failed to start sqlite3 create command")
}
if err = dumpCmd.Wait(); err != nil {
fmt.Println(err)
return errors.Wrap(err, "failed to wait for sqlite3 dump command")
}
w.Close() // After dumpCmd is done, close the write end of the pipe
if err = createCmd.Wait(); err != nil {
fmt.Println(err)
return errors.Wrap(err, "failed to wait for sqlite3 create command")
}
return nil
}
func (s *sqliteTester) teardown() error {
if s.dbConn != nil {
s.dbConn.Close()
}
return os.Remove(s.testDBName)
}
func (s *sqliteTester) conn() (*sql.DB, error) {
if s.dbConn != nil {
return s.dbConn, nil
}
var err error
s.dbConn, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?_loc=UTC", s.testDBName))
if err != nil {
return nil, err
}
return s.dbConn, nil
}

View File

@@ -0,0 +1,12 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import "testing"
func TestUpsert(t *testing.T) {
t.Run("GorpMigrations", testGorpMigrationsUpsert)
t.Run("Headers", testHeadersUpsert)
}

View File

@@ -0,0 +1,61 @@
// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package models
import (
"fmt"
"strings"
"github.com/volatiletech/sqlboiler/v4/drivers"
"github.com/volatiletech/strmangle"
)
// buildUpsertQuerySQLite builds a SQL statement string using the upsertData provided.
func buildUpsertQuerySQLite(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string) string {
conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict)
whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist)
ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret)
buf := strmangle.GetBuffer()
defer strmangle.PutBuffer(buf)
columns := "DEFAULT VALUES"
if len(whitelist) != 0 {
columns = fmt.Sprintf("(%s) VALUES (%s)",
strings.Join(whitelist, ", "),
strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1))
}
fmt.Fprintf(
buf,
"INSERT INTO %s %s ON CONFLICT ",
tableName,
columns,
)
if !updateOnConflict || len(update) == 0 {
buf.WriteString("DO NOTHING")
} else {
buf.WriteByte('(')
buf.WriteString(strings.Join(conflict, ", "))
buf.WriteString(") DO UPDATE SET ")
for i, v := range update {
if i != 0 {
buf.WriteByte(',')
}
quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v)
buf.WriteString(quoted)
buf.WriteString(" = EXCLUDED.")
buf.WriteString(quoted)
}
}
if len(ret) != 0 {
buf.WriteString(" RETURNING ")
buf.WriteString(strings.Join(ret, ", "))
}
return buf.String()
}