refactor: Decompose syscalls into pkg, start adding utility commands
This commit is contained in:
@@ -1,212 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.14.0
|
||||
// source: metadata.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Action int32
|
||||
|
||||
const (
|
||||
Action_CREATE Action = 0
|
||||
Action_UPDATE Action = 1
|
||||
Action_DELETE Action = 2
|
||||
)
|
||||
|
||||
// Enum value maps for Action.
|
||||
var (
|
||||
Action_name = map[int32]string{
|
||||
0: "CREATE",
|
||||
1: "UPDATE",
|
||||
2: "DELETE",
|
||||
}
|
||||
Action_value = map[string]int32{
|
||||
"CREATE": 0,
|
||||
"UPDATE": 1,
|
||||
"DELETE": 2,
|
||||
}
|
||||
)
|
||||
|
||||
func (x Action) Enum() *Action {
|
||||
p := new(Action)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x Action) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (Action) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_metadata_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (Action) Type() protoreflect.EnumType {
|
||||
return &file_metadata_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x Action) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Action.Descriptor instead.
|
||||
func (Action) EnumDescriptor() ([]byte, []int) {
|
||||
return file_metadata_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type Header struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Action Action `protobuf:"varint,1,opt,name=Action,proto3,enum=com.pojtinger.felicitas.stfs.Action" json:"Action,omitempty"`
|
||||
Replaces string `protobuf:"bytes,2,opt,name=Replaces,proto3" json:"Replaces,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Header) Reset() {
|
||||
*x = Header{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_metadata_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Header) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Header) ProtoMessage() {}
|
||||
|
||||
func (x *Header) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_metadata_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Header.ProtoReflect.Descriptor instead.
|
||||
func (*Header) Descriptor() ([]byte, []int) {
|
||||
return file_metadata_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Header) GetAction() Action {
|
||||
if x != nil {
|
||||
return x.Action
|
||||
}
|
||||
return Action_CREATE
|
||||
}
|
||||
|
||||
func (x *Header) GetReplaces() string {
|
||||
if x != nil {
|
||||
return x.Replaces
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_metadata_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_metadata_proto_rawDesc = []byte{
|
||||
0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6a, 0x74, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x2e,
|
||||
0x66, 0x65, 0x6c, 0x69, 0x78, 0x2e, 0x73, 0x74, 0x66, 0x73, 0x22, 0x5e, 0x0a, 0x06, 0x48, 0x65,
|
||||
0x61, 0x64, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6a, 0x74, 0x69,
|
||||
0x6e, 0x67, 0x65, 0x72, 0x2e, 0x66, 0x65, 0x6c, 0x69, 0x78, 0x2e, 0x73, 0x74, 0x66, 0x73, 0x2e,
|
||||
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x2a, 0x2c, 0x0a, 0x06, 0x41, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x00,
|
||||
0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
|
||||
0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x6f, 0x6a, 0x6e, 0x74, 0x66, 0x78, 0x2f, 0x73,
|
||||
0x74, 0x66, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_metadata_proto_rawDescOnce sync.Once
|
||||
file_metadata_proto_rawDescData = file_metadata_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_metadata_proto_rawDescGZIP() []byte {
|
||||
file_metadata_proto_rawDescOnce.Do(func() {
|
||||
file_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_metadata_proto_rawDescData)
|
||||
})
|
||||
return file_metadata_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_metadata_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_metadata_proto_goTypes = []interface{}{
|
||||
(Action)(0), // 0: com.pojtinger.felicitas.stfs.Action
|
||||
(*Header)(nil), // 1: com.pojtinger.felicitas.stfs.Header
|
||||
}
|
||||
var file_metadata_proto_depIdxs = []int32{
|
||||
0, // 0: com.pojtinger.felicitas.stfs.Header.Action:type_name -> com.pojtinger.felicitas.stfs.Action
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_metadata_proto_init() }
|
||||
func file_metadata_proto_init() {
|
||||
if File_metadata_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Header); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_metadata_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_metadata_proto_goTypes,
|
||||
DependencyIndexes: file_metadata_proto_depIdxs,
|
||||
EnumInfos: file_metadata_proto_enumTypes,
|
||||
MessageInfos: file_metadata_proto_msgTypes,
|
||||
}.Build()
|
||||
File_metadata_proto = out.File
|
||||
file_metadata_proto_rawDesc = nil
|
||||
file_metadata_proto_goTypes = nil
|
||||
file_metadata_proto_depIdxs = nil
|
||||
}
|
||||
98
pkg/controllers/tape.go
Normal file
98
pkg/controllers/tape.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// See https://github.com/benmcclelland/mtio
|
||||
const (
|
||||
mtioCpos = 0x80086d03 // Get tape position
|
||||
mtioCtop = 0x40086d01 // Do magnetic tape operation
|
||||
|
||||
mtFsf = 1 // Forward space over FileMark, position at first record of next file
|
||||
mtEom = 12 // Goto end of recorded media (for appending files)
|
||||
mtSeek = 22 // Seek to block
|
||||
|
||||
BlockSize = 512
|
||||
)
|
||||
|
||||
// position is struct for MTIOCPOS
|
||||
type position struct {
|
||||
blkNo int64 // Current block number
|
||||
}
|
||||
|
||||
// operation is struct for MTIOCTOP
|
||||
type operation struct {
|
||||
op int16 // Operation ID
|
||||
pad int16 // Padding to match C structures
|
||||
count int32 // Operation count
|
||||
}
|
||||
|
||||
func GoToEndOfTape(f *os.File) error {
|
||||
if _, _, err := syscall.Syscall(
|
||||
syscall.SYS_IOCTL,
|
||||
f.Fd(),
|
||||
mtioCtop,
|
||||
uintptr(unsafe.Pointer(
|
||||
&operation{
|
||||
op: mtEom,
|
||||
},
|
||||
)),
|
||||
); err != 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GoToNextFileOnTape(f *os.File) error {
|
||||
if _, _, err := syscall.Syscall(
|
||||
syscall.SYS_IOCTL,
|
||||
f.Fd(),
|
||||
mtioCtop,
|
||||
uintptr(unsafe.Pointer(
|
||||
&operation{
|
||||
op: mtFsf,
|
||||
count: 1,
|
||||
},
|
||||
)),
|
||||
); err != 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCurrentRecordFromTape(f *os.File) (int64, error) {
|
||||
pos := &position{}
|
||||
if _, _, err := syscall.Syscall(
|
||||
syscall.SYS_IOCTL,
|
||||
f.Fd(),
|
||||
mtioCpos,
|
||||
uintptr(unsafe.Pointer(pos)),
|
||||
); err != 0 {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return pos.blkNo, nil
|
||||
}
|
||||
|
||||
func SeekToRecordOnTape(f *os.File, record int32) error {
|
||||
if _, _, err := syscall.Syscall(
|
||||
syscall.SYS_IOCTL,
|
||||
f.Fd(),
|
||||
mtioCtop,
|
||||
uintptr(unsafe.Pointer(
|
||||
&operation{
|
||||
op: mtSeek,
|
||||
count: record,
|
||||
},
|
||||
)),
|
||||
); err != 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,286 +0,0 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// ../../db/sqlite/migrations/metadata/1636892915.sql
|
||||
|
||||
package metadata
|
||||
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info fileInfoEx
|
||||
}
|
||||
|
||||
type fileInfoEx interface {
|
||||
os.FileInfo
|
||||
MD5Checksum() string
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
md5checksum string
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) MD5Checksum() string {
|
||||
return fi.md5checksum
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _bindataDbSqliteMigrationsMetadata1636892915Sql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x55\xc1\x72\xdb\x36\x10\x3d\x93\x5f\xb1\xb7\x5a\x53\x49\x3f\x90\x93\x1b\x4f\xd3\xcc\xc4\x9e\x8c\x6d\x4d\x7b\x05\x89\x25\xb5\x35\x88\x65\x17\x4b\xbb\xcc\xd7\x77\x16\x20\x13\x29\x95\x3a\xd3\x9c\x08\x02\xef\x3d\x3c\x00\x6f\x81\xdd\x0e\x7e\x1e\xa8\x17\xa7\x08\x87\xb1\x6e\x05\xad\xa5\xae\x09\x08\x47\x74\x1e\x25\xc1\x4d\x5d\xed\x76\xf0\x3c\x8f\xd8\x05\xd7\x03\x25\xd0\x23\x82\xce\x23\x02\x77\x0b\x0a\x30\xaa\xcc\xfb\x82\x3c\x22\x7c\x41\x61\x78\x75\x61\x42\xc3\xbb\x49\x79\x70\x4a\xad\x0b\x61\x86\x51\x78\x60\x45\x0f\xca\x80\xa4\x47\x94\x2c\xfe\x88\x3d\x70\x69\xde\x91\x64\x25\x8f\x23\x46\x4f\xb1\x07\x8e\x79\xd2\x51\x30\x61\x6c\xf3\xc4\x0e\x54\x1c\x05\x1b\x4d\xc1\xa5\x23\x50\x84\x07\x37\xe0\xbe\xae\xf4\xab\xd7\xa8\xd8\xa3\x40\x64\x85\x38\x85\xb0\xcd\xb2\x86\x32\x85\x8e\x02\x16\xe3\x75\x15\xad\x4f\xf1\x6f\xfd\x8a\x85\x51\x68\x70\x32\xc3\x0b\xce\x85\xf7\xec\xa4\x47\x85\xb8\xd0\x03\xc5\x17\xb8\x79\x75\x81\x3c\x74\x8b\xf3\x4f\xd6\xb7\xb4\x9f\xe6\xc1\x20\x9b\xba\xb2\xcf\xbf\x27\x28\xa2\x9f\xb8\xb7\x7d\x29\x66\x12\x7d\x41\x5b\x47\x33\x2b\xa6\xba\x5a\x7e\x2f\xad\xe1\x33\xca\x40\x29\x11\x47\x70\xd1\xc3\xc0\x1e\xa1\x21\x4d\x75\x95\x9b\x97\x49\x87\x84\x02\x1f\xef\xcc\x3c\xbf\x45\x94\xba\x9a\xc8\x5f\xc1\x7e\x10\x9e\xc6\x73\x70\x7f\x15\x9c\x85\xd7\x7d\x59\xa5\xaf\xad\xb8\x28\x7f\x87\xee\xaf\xa1\x3f\x76\xf9\xe4\x7f\x65\x19\x9c\x5a\x98\xa6\x98\x46\x6c\xa9\x23\xf4\x5b\x1b\x8a\xf0\xbb\x90\xa2\xec\xf3\xe7\xb7\x12\x47\xe1\x29\xfa\x04\xf7\xec\x9f\x69\xc0\x2c\xa4\x9c\x85\x22\x3a\xc1\xa4\x90\xb0\xe5\xe8\xf3\xe6\x51\x1f\x59\xb0\xc4\xfa\xb6\x6d\x31\x25\x23\xe5\xa1\xf7\x47\x17\x7b\xcc\xbf\x1d\x61\xf0\x29\x47\xbc\xa4\x81\x61\x4a\x67\x04\x96\x13\xfc\x16\x8a\xcd\xf9\xd4\xbe\x4b\xf0\xf9\xf6\x0f\x03\x7e\x78\x38\xec\x4f\x65\xd2\xd4\xec\x16\x4b\x82\x89\xc3\xa4\xc4\xf1\x3f\x34\x0a\xf9\x9e\x3d\x75\xd4\x3a\x03\x83\xe6\x95\x0e\xec\xad\x01\xde\x0a\xf9\x7c\x2b\x8b\xd5\x8c\x83\x1b\xc1\xbf\x26\xb2\x55\x2f\x25\xf8\xcd\x17\xa4\x69\x1c\x59\x74\x53\x57\x2e\x33\xae\xe9\x95\xc5\xfe\x1f\xbd\x36\x33\xae\xe9\xdd\xbb\x3f\x59\xc0\xe3\x2b\xb5\x08\x71\x1a\x1a\x94\xef\xeb\xeb\xfd\xd1\xc9\x5a\x5f\xbf\x04\x6e\xad\xba\x3c\xbe\x0e\x99\x79\x39\x9b\xf7\x14\x7f\x54\x35\x33\x2f\xab\x2e\x87\xb1\x46\xb1\x84\xa7\x2b\x9d\x5c\x22\xab\x4e\x96\xdb\xf1\x24\x34\x47\x4a\x16\xe2\x84\x0a\xcd\x0c\x8f\x65\xf8\xc1\x62\xef\x12\x38\x68\x30\xe9\x0e\xbb\x8e\x45\xa1\x9f\xec\xb4\x9c\x9e\x48\x97\x53\x7f\x22\xbb\x02\xad\xb7\xf0\x21\x50\x83\x92\xef\x56\x41\xe7\x13\x24\x1e\x6c\x6f\xe3\xae\xe5\x61\x0c\xe4\xa2\xe6\xeb\x25\x15\xeb\x94\xcb\x68\xe4\x94\xc8\xae\x79\xdb\x04\x35\x5b\xca\xd0\xac\x29\x3b\xc4\x97\xc8\x6f\xf1\x9b\xf1\xa5\x0c\xbb\x4b\x65\x08\x6f\x57\xaa\x90\x12\xd8\x95\x8f\xbe\x4c\x9c\x8b\x95\xd4\xf2\xbe\xec\x17\x49\xd2\x55\xf3\x86\xca\x1d\xcf\x62\x54\xee\xe0\xf0\xf4\x7c\xfb\xb8\xb5\x20\x6d\x2d\x46\x9b\xac\xd1\xba\x31\x3f\x4e\xdc\x01\xc6\x96\xf3\xe3\x90\xdd\x2f\x53\xde\x24\x5c\xd7\xb0\xd9\xd7\xd5\xea\xf7\xe2\x19\xda\x43\x25\xd8\xb2\xf8\xf5\x7d\x51\x37\x62\x5d\x2d\x7d\xd7\x49\xe5\x54\x7f\x4a\xd0\x58\x58\x60\x31\x5e\x68\x75\xb5\x76\x9e\xb3\xeb\xcd\xbb\xfa\xf4\xb5\xbd\xe3\xb7\x58\x7b\xe1\xf1\xfc\xb5\x7d\xf7\x4f\x00\x00\x00\xff\xff\xcb\xa0\x8d\xeb\x92\x07\x00\x00")
|
||||
|
||||
func bindataDbSqliteMigrationsMetadata1636892915SqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_bindataDbSqliteMigrationsMetadata1636892915Sql,
|
||||
"../../db/sqlite/migrations/metadata/1636892915.sql",
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
|
||||
func bindataDbSqliteMigrationsMetadata1636892915Sql() (*asset, error) {
|
||||
bytes, err := bindataDbSqliteMigrationsMetadata1636892915SqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{
|
||||
name: "../../db/sqlite/migrations/metadata/1636892915.sql",
|
||||
size: 1938,
|
||||
md5checksum: "",
|
||||
mode: os.FileMode(420),
|
||||
modTime: time.Unix(1636916488, 0),
|
||||
}
|
||||
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
//
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist}
|
||||
}
|
||||
|
||||
//
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
// nolint: deadcode
|
||||
//
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
//
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or could not be loaded.
|
||||
//
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist}
|
||||
}
|
||||
|
||||
//
|
||||
// AssetNames returns the names of the assets.
|
||||
// nolint: deadcode
|
||||
//
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
//
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
//
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"../../db/sqlite/migrations/metadata/1636892915.sql": bindataDbSqliteMigrationsMetadata1636892915Sql,
|
||||
}
|
||||
|
||||
//
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
//
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, &os.PathError{
|
||||
Op: "open",
|
||||
Path: name,
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, &os.PathError{
|
||||
Op: "open",
|
||||
Path: name,
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{Func: nil, Children: map[string]*bintree{
|
||||
"..": {Func: nil, Children: map[string]*bintree{
|
||||
"..": {Func: nil, Children: map[string]*bintree{
|
||||
"db": {Func: nil, Children: map[string]*bintree{
|
||||
"sqlite": {Func: nil, Children: map[string]*bintree{
|
||||
"migrations": {Func: nil, Children: map[string]*bintree{
|
||||
"metadata": {Func: nil, Children: map[string]*bintree{
|
||||
"1636892915.sql": {Func: bindataDbSqliteMigrationsMetadata1636892915Sql, Children: map[string]*bintree{}},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
)
|
||||
|
||||
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
|
||||
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
|
||||
|
||||
const outputDirDepth = 7
|
||||
|
||||
var (
|
||||
dbMain tester
|
||||
)
|
||||
|
||||
type tester interface {
|
||||
setup() error
|
||||
conn() (*sql.DB, error)
|
||||
teardown() error
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if dbMain == nil {
|
||||
fmt.Println("no dbMain tester interface was ready")
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
flag.Parse()
|
||||
|
||||
var err error
|
||||
|
||||
// Load configuration
|
||||
err = initViper()
|
||||
if err != nil {
|
||||
fmt.Println("unable to load config file")
|
||||
os.Exit(-2)
|
||||
}
|
||||
|
||||
// Set DebugMode so we can see generated sql statements
|
||||
boil.DebugMode = *flagDebugMode
|
||||
|
||||
if err = dbMain.setup(); err != nil {
|
||||
fmt.Println("Unable to execute setup:", err)
|
||||
os.Exit(-4)
|
||||
}
|
||||
|
||||
conn, err := dbMain.conn()
|
||||
if err != nil {
|
||||
fmt.Println("failed to get connection:", err)
|
||||
}
|
||||
|
||||
var code int
|
||||
boil.SetDB(conn)
|
||||
code = m.Run()
|
||||
|
||||
if err = dbMain.teardown(); err != nil {
|
||||
fmt.Println("Unable to execute teardown:", err)
|
||||
os.Exit(-5)
|
||||
}
|
||||
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func initViper() error {
|
||||
if flagConfigFile != nil && *flagConfigFile != "" {
|
||||
viper.SetConfigFile(*flagConfigFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
viper.SetConfigName("sqlboiler")
|
||||
|
||||
configHome := os.Getenv("XDG_CONFIG_HOME")
|
||||
homePath := os.Getenv("HOME")
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
wd = strings.Repeat("../", outputDirDepth)
|
||||
} else {
|
||||
wd = wd + strings.Repeat("/..", outputDirDepth)
|
||||
}
|
||||
|
||||
configPaths := []string{wd}
|
||||
if len(configHome) > 0 {
|
||||
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
|
||||
} else {
|
||||
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
|
||||
}
|
||||
|
||||
for _, p := range configPaths {
|
||||
viper.AddConfigPath(p)
|
||||
}
|
||||
|
||||
// Ignore errors here, fall back to defaults and validation to provide errs
|
||||
_ = viper.ReadInConfig()
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/volatiletech/sqlboiler/v4/drivers"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries/qm"
|
||||
)
|
||||
|
||||
var dialect = drivers.Dialect{
|
||||
LQ: 0x22,
|
||||
RQ: 0x22,
|
||||
|
||||
UseIndexPlaceholders: false,
|
||||
UseLastInsertID: true,
|
||||
UseSchema: false,
|
||||
UseDefaultKeyword: true,
|
||||
UseAutoColumns: false,
|
||||
UseTopClause: false,
|
||||
UseOutputClause: false,
|
||||
UseCaseWhenExistsClause: false,
|
||||
}
|
||||
|
||||
// NewQuery initializes a new Query using the passed in QueryMods
|
||||
func NewQuery(mods ...qm.QueryMod) *queries.Query {
|
||||
q := &queries.Query{}
|
||||
queries.SetDialect(q, &dialect)
|
||||
qm.Apply(q, mods...)
|
||||
|
||||
return q
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"regexp"
|
||||
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
)
|
||||
|
||||
var dbNameRand *rand.Rand
|
||||
|
||||
func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Cannot create a transactor: %s", err))
|
||||
}
|
||||
return transactor
|
||||
}
|
||||
|
||||
func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
|
||||
return &fKeyDestroyer{
|
||||
reader: reader,
|
||||
rgx: regex,
|
||||
}
|
||||
}
|
||||
|
||||
type fKeyDestroyer struct {
|
||||
reader io.Reader
|
||||
buf *bytes.Buffer
|
||||
rgx *regexp.Regexp
|
||||
}
|
||||
|
||||
func (f *fKeyDestroyer) Read(b []byte) (int, error) {
|
||||
if f.buf == nil {
|
||||
all, err := ioutil.ReadAll(f.reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
|
||||
all = f.rgx.ReplaceAll(all, []byte{})
|
||||
f.buf = bytes.NewBuffer(all)
|
||||
}
|
||||
|
||||
return f.buf.Read(b)
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import "testing"
|
||||
|
||||
// This test suite runs each operation test in parallel.
|
||||
// Example, if your database has 3 tables, the suite will run:
|
||||
// table1, table2 and table3 Delete in parallel
|
||||
// table1, table2 and table3 Insert in parallel, and so forth.
|
||||
// It does NOT run each operation group in parallel.
|
||||
// Separating the tests thusly grants avoidance of Postgres deadlocks.
|
||||
func TestParent(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrations)
|
||||
t.Run("Headers", testHeaders)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsDelete)
|
||||
t.Run("Headers", testHeadersDelete)
|
||||
}
|
||||
|
||||
func TestQueryDeleteAll(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsQueryDeleteAll)
|
||||
t.Run("Headers", testHeadersQueryDeleteAll)
|
||||
}
|
||||
|
||||
func TestSliceDeleteAll(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsSliceDeleteAll)
|
||||
t.Run("Headers", testHeadersSliceDeleteAll)
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsExists)
|
||||
t.Run("Headers", testHeadersExists)
|
||||
}
|
||||
|
||||
func TestFind(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsFind)
|
||||
t.Run("Headers", testHeadersFind)
|
||||
}
|
||||
|
||||
func TestBind(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsBind)
|
||||
t.Run("Headers", testHeadersBind)
|
||||
}
|
||||
|
||||
func TestOne(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsOne)
|
||||
t.Run("Headers", testHeadersOne)
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsAll)
|
||||
t.Run("Headers", testHeadersAll)
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsCount)
|
||||
t.Run("Headers", testHeadersCount)
|
||||
}
|
||||
|
||||
func TestHooks(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsHooks)
|
||||
t.Run("Headers", testHeadersHooks)
|
||||
}
|
||||
|
||||
func TestInsert(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsInsert)
|
||||
t.Run("GorpMigrations", testGorpMigrationsInsertWhitelist)
|
||||
t.Run("Headers", testHeadersInsert)
|
||||
t.Run("Headers", testHeadersInsertWhitelist)
|
||||
}
|
||||
|
||||
// TestToOne tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToOne(t *testing.T) {}
|
||||
|
||||
// TestOneToOne tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestOneToOne(t *testing.T) {}
|
||||
|
||||
// TestToMany tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToMany(t *testing.T) {}
|
||||
|
||||
// TestToOneSet tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToOneSet(t *testing.T) {}
|
||||
|
||||
// TestToOneRemove tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToOneRemove(t *testing.T) {}
|
||||
|
||||
// TestOneToOneSet tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestOneToOneSet(t *testing.T) {}
|
||||
|
||||
// TestOneToOneRemove tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestOneToOneRemove(t *testing.T) {}
|
||||
|
||||
// TestToManyAdd tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToManyAdd(t *testing.T) {}
|
||||
|
||||
// TestToManySet tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToManySet(t *testing.T) {}
|
||||
|
||||
// TestToManyRemove tests cannot be run in parallel
|
||||
// or deadlocks can occur.
|
||||
func TestToManyRemove(t *testing.T) {}
|
||||
|
||||
func TestReload(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsReload)
|
||||
t.Run("Headers", testHeadersReload)
|
||||
}
|
||||
|
||||
func TestReloadAll(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsReloadAll)
|
||||
t.Run("Headers", testHeadersReloadAll)
|
||||
}
|
||||
|
||||
func TestSelect(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsSelect)
|
||||
t.Run("Headers", testHeadersSelect)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsUpdate)
|
||||
t.Run("Headers", testHeadersUpdate)
|
||||
}
|
||||
|
||||
func TestSliceUpdateAll(t *testing.T) {
|
||||
t.Run("GorpMigrations", testGorpMigrationsSliceUpdateAll)
|
||||
t.Run("Headers", testHeadersSliceUpdateAll)
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
var TableNames = struct {
|
||||
GorpMigrations string
|
||||
Headers string
|
||||
}{
|
||||
GorpMigrations: "gorp_migrations",
|
||||
Headers: "headers",
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/friendsofgo/errors"
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
"github.com/volatiletech/strmangle"
|
||||
)
|
||||
|
||||
// M type is for providing columns and column values to UpdateAll.
|
||||
type M map[string]interface{}
|
||||
|
||||
// ErrSyncFail occurs during insert when the record could not be retrieved in
|
||||
// order to populate default value information. This usually happens when LastInsertId
|
||||
// fails or there was a primary key configuration that was not resolvable.
|
||||
var ErrSyncFail = errors.New("models: failed to synchronize data after insert")
|
||||
|
||||
type insertCache struct {
|
||||
query string
|
||||
retQuery string
|
||||
valueMapping []uint64
|
||||
retMapping []uint64
|
||||
}
|
||||
|
||||
type updateCache struct {
|
||||
query string
|
||||
valueMapping []uint64
|
||||
}
|
||||
|
||||
func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
|
||||
buf := strmangle.GetBuffer()
|
||||
|
||||
buf.WriteString(strconv.Itoa(cols.Kind))
|
||||
for _, w := range cols.Cols {
|
||||
buf.WriteString(w)
|
||||
}
|
||||
|
||||
if len(nzDefaults) != 0 {
|
||||
buf.WriteByte('.')
|
||||
}
|
||||
for _, nz := range nzDefaults {
|
||||
buf.WriteString(nz)
|
||||
}
|
||||
|
||||
str := buf.String()
|
||||
strmangle.PutBuffer(buf)
|
||||
return str
|
||||
}
|
||||
@@ -1,828 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/friendsofgo/errors"
|
||||
"github.com/volatiletech/null/v8"
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries/qm"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
|
||||
"github.com/volatiletech/strmangle"
|
||||
)
|
||||
|
||||
// GorpMigration is an object representing the database table.
|
||||
type GorpMigration struct {
|
||||
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
|
||||
AppliedAt null.Time `boil:"applied_at" json:"applied_at,omitempty" toml:"applied_at" yaml:"applied_at,omitempty"`
|
||||
|
||||
R *gorpMigrationR `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||
L gorpMigrationL `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||
}
|
||||
|
||||
var GorpMigrationColumns = struct {
|
||||
ID string
|
||||
AppliedAt string
|
||||
}{
|
||||
ID: "id",
|
||||
AppliedAt: "applied_at",
|
||||
}
|
||||
|
||||
var GorpMigrationTableColumns = struct {
|
||||
ID string
|
||||
AppliedAt string
|
||||
}{
|
||||
ID: "gorp_migrations.id",
|
||||
AppliedAt: "gorp_migrations.applied_at",
|
||||
}
|
||||
|
||||
// Generated where
|
||||
|
||||
type whereHelperstring struct{ field string }
|
||||
|
||||
func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
|
||||
func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
|
||||
func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
|
||||
func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
|
||||
func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
|
||||
func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
|
||||
func (w whereHelperstring) IN(slice []string) qm.QueryMod {
|
||||
values := make([]interface{}, 0, len(slice))
|
||||
for _, value := range slice {
|
||||
values = append(values, value)
|
||||
}
|
||||
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
|
||||
}
|
||||
func (w whereHelperstring) NIN(slice []string) qm.QueryMod {
|
||||
values := make([]interface{}, 0, len(slice))
|
||||
for _, value := range slice {
|
||||
values = append(values, value)
|
||||
}
|
||||
return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
|
||||
}
|
||||
|
||||
type whereHelpernull_Time struct{ field string }
|
||||
|
||||
func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod {
|
||||
return qmhelper.WhereNullEQ(w.field, false, x)
|
||||
}
|
||||
func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod {
|
||||
return qmhelper.WhereNullEQ(w.field, true, x)
|
||||
}
|
||||
func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.LT, x)
|
||||
}
|
||||
func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.LTE, x)
|
||||
}
|
||||
func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.GT, x)
|
||||
}
|
||||
func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.GTE, x)
|
||||
}
|
||||
|
||||
func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
|
||||
func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
|
||||
|
||||
var GorpMigrationWhere = struct {
|
||||
ID whereHelperstring
|
||||
AppliedAt whereHelpernull_Time
|
||||
}{
|
||||
ID: whereHelperstring{field: "\"gorp_migrations\".\"id\""},
|
||||
AppliedAt: whereHelpernull_Time{field: "\"gorp_migrations\".\"applied_at\""},
|
||||
}
|
||||
|
||||
// GorpMigrationRels is where relationship names are stored.
|
||||
var GorpMigrationRels = struct {
|
||||
}{}
|
||||
|
||||
// gorpMigrationR is where relationships are stored.
|
||||
type gorpMigrationR struct {
|
||||
}
|
||||
|
||||
// NewStruct creates a new relationship struct
|
||||
func (*gorpMigrationR) NewStruct() *gorpMigrationR {
|
||||
return &gorpMigrationR{}
|
||||
}
|
||||
|
||||
// gorpMigrationL is where Load methods for each relationship are stored.
|
||||
type gorpMigrationL struct{}
|
||||
|
||||
var (
|
||||
gorpMigrationAllColumns = []string{"id", "applied_at"}
|
||||
gorpMigrationColumnsWithoutDefault = []string{"id", "applied_at"}
|
||||
gorpMigrationColumnsWithDefault = []string{}
|
||||
gorpMigrationPrimaryKeyColumns = []string{"id"}
|
||||
)
|
||||
|
||||
type (
|
||||
// GorpMigrationSlice is an alias for a slice of pointers to GorpMigration.
|
||||
// This should almost always be used instead of []GorpMigration.
|
||||
GorpMigrationSlice []*GorpMigration
|
||||
// GorpMigrationHook is the signature for custom GorpMigration hook methods
|
||||
GorpMigrationHook func(context.Context, boil.ContextExecutor, *GorpMigration) error
|
||||
|
||||
gorpMigrationQuery struct {
|
||||
*queries.Query
|
||||
}
|
||||
)
|
||||
|
||||
// Cache for insert, update and upsert
|
||||
var (
|
||||
gorpMigrationType = reflect.TypeOf(&GorpMigration{})
|
||||
gorpMigrationMapping = queries.MakeStructMapping(gorpMigrationType)
|
||||
gorpMigrationPrimaryKeyMapping, _ = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, gorpMigrationPrimaryKeyColumns)
|
||||
gorpMigrationInsertCacheMut sync.RWMutex
|
||||
gorpMigrationInsertCache = make(map[string]insertCache)
|
||||
gorpMigrationUpdateCacheMut sync.RWMutex
|
||||
gorpMigrationUpdateCache = make(map[string]updateCache)
|
||||
gorpMigrationUpsertCacheMut sync.RWMutex
|
||||
gorpMigrationUpsertCache = make(map[string]insertCache)
|
||||
)
|
||||
|
||||
var (
|
||||
// Force time package dependency for automated UpdatedAt/CreatedAt.
|
||||
_ = time.Second
|
||||
// Force qmhelper dependency for where clause generation (which doesn't
|
||||
// always happen)
|
||||
_ = qmhelper.Where
|
||||
)
|
||||
|
||||
var gorpMigrationBeforeInsertHooks []GorpMigrationHook
|
||||
var gorpMigrationBeforeUpdateHooks []GorpMigrationHook
|
||||
var gorpMigrationBeforeDeleteHooks []GorpMigrationHook
|
||||
var gorpMigrationBeforeUpsertHooks []GorpMigrationHook
|
||||
|
||||
var gorpMigrationAfterInsertHooks []GorpMigrationHook
|
||||
var gorpMigrationAfterSelectHooks []GorpMigrationHook
|
||||
var gorpMigrationAfterUpdateHooks []GorpMigrationHook
|
||||
var gorpMigrationAfterDeleteHooks []GorpMigrationHook
|
||||
var gorpMigrationAfterUpsertHooks []GorpMigrationHook
|
||||
|
||||
// doBeforeInsertHooks executes all "before insert" hooks.
|
||||
func (o *GorpMigration) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationBeforeInsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doBeforeUpdateHooks executes all "before Update" hooks.
|
||||
func (o *GorpMigration) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationBeforeUpdateHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doBeforeDeleteHooks executes all "before Delete" hooks.
|
||||
func (o *GorpMigration) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationBeforeDeleteHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doBeforeUpsertHooks executes all "before Upsert" hooks.
|
||||
func (o *GorpMigration) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationBeforeUpsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterInsertHooks executes all "after Insert" hooks.
|
||||
func (o *GorpMigration) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationAfterInsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterSelectHooks executes all "after Select" hooks.
|
||||
func (o *GorpMigration) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationAfterSelectHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterUpdateHooks executes all "after Update" hooks.
|
||||
func (o *GorpMigration) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationAfterUpdateHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterDeleteHooks executes all "after Delete" hooks.
|
||||
func (o *GorpMigration) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationAfterDeleteHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterUpsertHooks executes all "after Upsert" hooks.
|
||||
func (o *GorpMigration) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range gorpMigrationAfterUpsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddGorpMigrationHook registers your hook function for all future operations.
|
||||
func AddGorpMigrationHook(hookPoint boil.HookPoint, gorpMigrationHook GorpMigrationHook) {
|
||||
switch hookPoint {
|
||||
case boil.BeforeInsertHook:
|
||||
gorpMigrationBeforeInsertHooks = append(gorpMigrationBeforeInsertHooks, gorpMigrationHook)
|
||||
case boil.BeforeUpdateHook:
|
||||
gorpMigrationBeforeUpdateHooks = append(gorpMigrationBeforeUpdateHooks, gorpMigrationHook)
|
||||
case boil.BeforeDeleteHook:
|
||||
gorpMigrationBeforeDeleteHooks = append(gorpMigrationBeforeDeleteHooks, gorpMigrationHook)
|
||||
case boil.BeforeUpsertHook:
|
||||
gorpMigrationBeforeUpsertHooks = append(gorpMigrationBeforeUpsertHooks, gorpMigrationHook)
|
||||
case boil.AfterInsertHook:
|
||||
gorpMigrationAfterInsertHooks = append(gorpMigrationAfterInsertHooks, gorpMigrationHook)
|
||||
case boil.AfterSelectHook:
|
||||
gorpMigrationAfterSelectHooks = append(gorpMigrationAfterSelectHooks, gorpMigrationHook)
|
||||
case boil.AfterUpdateHook:
|
||||
gorpMigrationAfterUpdateHooks = append(gorpMigrationAfterUpdateHooks, gorpMigrationHook)
|
||||
case boil.AfterDeleteHook:
|
||||
gorpMigrationAfterDeleteHooks = append(gorpMigrationAfterDeleteHooks, gorpMigrationHook)
|
||||
case boil.AfterUpsertHook:
|
||||
gorpMigrationAfterUpsertHooks = append(gorpMigrationAfterUpsertHooks, gorpMigrationHook)
|
||||
}
|
||||
}
|
||||
|
||||
// One returns a single gorpMigration record from the query.
|
||||
func (q gorpMigrationQuery) One(ctx context.Context, exec boil.ContextExecutor) (*GorpMigration, error) {
|
||||
o := &GorpMigration{}
|
||||
|
||||
queries.SetLimit(q.Query, 1)
|
||||
|
||||
err := q.Bind(ctx, exec, o)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == sql.ErrNoRows {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return nil, errors.Wrap(err, "models: failed to execute a one query for gorp_migrations")
|
||||
}
|
||||
|
||||
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
|
||||
return o, err
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// All returns all GorpMigration records from the query.
|
||||
func (q gorpMigrationQuery) All(ctx context.Context, exec boil.ContextExecutor) (GorpMigrationSlice, error) {
|
||||
var o []*GorpMigration
|
||||
|
||||
err := q.Bind(ctx, exec, &o)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "models: failed to assign all query results to GorpMigration slice")
|
||||
}
|
||||
|
||||
if len(gorpMigrationAfterSelectHooks) != 0 {
|
||||
for _, obj := range o {
|
||||
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||
return o, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Count returns the count of all GorpMigration records in the query.
|
||||
func (q gorpMigrationQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
var count int64
|
||||
|
||||
queries.SetSelect(q.Query, nil)
|
||||
queries.SetCount(q.Query)
|
||||
|
||||
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to count gorp_migrations rows")
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Exists checks if the row exists in the table.
|
||||
func (q gorpMigrationQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
|
||||
var count int64
|
||||
|
||||
queries.SetSelect(q.Query, nil)
|
||||
queries.SetCount(q.Query)
|
||||
queries.SetLimit(q.Query, 1)
|
||||
|
||||
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "models: failed to check if gorp_migrations exists")
|
||||
}
|
||||
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
// GorpMigrations retrieves all the records using an executor.
|
||||
func GorpMigrations(mods ...qm.QueryMod) gorpMigrationQuery {
|
||||
mods = append(mods, qm.From("\"gorp_migrations\""))
|
||||
return gorpMigrationQuery{NewQuery(mods...)}
|
||||
}
|
||||
|
||||
// FindGorpMigration retrieves a single record by ID with an executor.
|
||||
// If selectCols is empty Find will return all columns.
|
||||
func FindGorpMigration(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*GorpMigration, error) {
|
||||
gorpMigrationObj := &GorpMigration{}
|
||||
|
||||
sel := "*"
|
||||
if len(selectCols) > 0 {
|
||||
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
|
||||
}
|
||||
query := fmt.Sprintf(
|
||||
"select %s from \"gorp_migrations\" where \"id\"=?", sel,
|
||||
)
|
||||
|
||||
q := queries.Raw(query, iD)
|
||||
|
||||
err := q.Bind(ctx, exec, gorpMigrationObj)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == sql.ErrNoRows {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return nil, errors.Wrap(err, "models: unable to select from gorp_migrations")
|
||||
}
|
||||
|
||||
if err = gorpMigrationObj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||
return gorpMigrationObj, err
|
||||
}
|
||||
|
||||
return gorpMigrationObj, nil
|
||||
}
|
||||
|
||||
// Insert a single record using an executor.
|
||||
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
|
||||
func (o *GorpMigration) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
|
||||
if o == nil {
|
||||
return errors.New("models: no gorp_migrations provided for insertion")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nzDefaults := queries.NonZeroDefaultSet(gorpMigrationColumnsWithDefault, o)
|
||||
|
||||
key := makeCacheKey(columns, nzDefaults)
|
||||
gorpMigrationInsertCacheMut.RLock()
|
||||
cache, cached := gorpMigrationInsertCache[key]
|
||||
gorpMigrationInsertCacheMut.RUnlock()
|
||||
|
||||
if !cached {
|
||||
wl, returnColumns := columns.InsertColumnSet(
|
||||
gorpMigrationAllColumns,
|
||||
gorpMigrationColumnsWithDefault,
|
||||
gorpMigrationColumnsWithoutDefault,
|
||||
nzDefaults,
|
||||
)
|
||||
|
||||
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, wl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache.retMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, returnColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(wl) != 0 {
|
||||
cache.query = fmt.Sprintf("INSERT INTO \"gorp_migrations\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
|
||||
} else {
|
||||
cache.query = "INSERT INTO \"gorp_migrations\" %sDEFAULT VALUES%s"
|
||||
}
|
||||
|
||||
var queryOutput, queryReturning string
|
||||
|
||||
if len(cache.retMapping) != 0 {
|
||||
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"gorp_migrations\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, gorpMigrationPrimaryKeyColumns))
|
||||
}
|
||||
|
||||
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
|
||||
}
|
||||
|
||||
value := reflect.Indirect(reflect.ValueOf(o))
|
||||
vals := queries.ValuesFromMapping(value, cache.valueMapping)
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, cache.query)
|
||||
fmt.Fprintln(writer, vals)
|
||||
}
|
||||
_, err = exec.ExecContext(ctx, cache.query, vals...)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "models: unable to insert into gorp_migrations")
|
||||
}
|
||||
|
||||
var identifierCols []interface{}
|
||||
|
||||
if len(cache.retMapping) == 0 {
|
||||
goto CacheNoHooks
|
||||
}
|
||||
|
||||
identifierCols = []interface{}{
|
||||
o.ID,
|
||||
}
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, cache.retQuery)
|
||||
fmt.Fprintln(writer, identifierCols...)
|
||||
}
|
||||
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "models: unable to populate default values for gorp_migrations")
|
||||
}
|
||||
|
||||
CacheNoHooks:
|
||||
if !cached {
|
||||
gorpMigrationInsertCacheMut.Lock()
|
||||
gorpMigrationInsertCache[key] = cache
|
||||
gorpMigrationInsertCacheMut.Unlock()
|
||||
}
|
||||
|
||||
return o.doAfterInsertHooks(ctx, exec)
|
||||
}
|
||||
|
||||
// Update uses an executor to update the GorpMigration.
|
||||
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
|
||||
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
|
||||
func (o *GorpMigration) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
|
||||
var err error
|
||||
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
key := makeCacheKey(columns, nil)
|
||||
gorpMigrationUpdateCacheMut.RLock()
|
||||
cache, cached := gorpMigrationUpdateCache[key]
|
||||
gorpMigrationUpdateCacheMut.RUnlock()
|
||||
|
||||
if !cached {
|
||||
wl := columns.UpdateColumnSet(
|
||||
gorpMigrationAllColumns,
|
||||
gorpMigrationPrimaryKeyColumns,
|
||||
)
|
||||
|
||||
if !columns.IsWhitelist() {
|
||||
wl = strmangle.SetComplement(wl, []string{"created_at"})
|
||||
}
|
||||
if len(wl) == 0 {
|
||||
return 0, errors.New("models: unable to update gorp_migrations, could not build whitelist")
|
||||
}
|
||||
|
||||
cache.query = fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s",
|
||||
strmangle.SetParamNames("\"", "\"", 0, wl),
|
||||
strmangle.WhereClause("\"", "\"", 0, gorpMigrationPrimaryKeyColumns),
|
||||
)
|
||||
cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, append(wl, gorpMigrationPrimaryKeyColumns...))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, cache.query)
|
||||
fmt.Fprintln(writer, values)
|
||||
}
|
||||
var result sql.Result
|
||||
result, err = exec.ExecContext(ctx, cache.query, values...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to update gorp_migrations row")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by update for gorp_migrations")
|
||||
}
|
||||
|
||||
if !cached {
|
||||
gorpMigrationUpdateCacheMut.Lock()
|
||||
gorpMigrationUpdateCache[key] = cache
|
||||
gorpMigrationUpdateCacheMut.Unlock()
|
||||
}
|
||||
|
||||
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
|
||||
}
|
||||
|
||||
// UpdateAll updates all rows with the specified column values.
|
||||
func (q gorpMigrationQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||
queries.SetUpdate(q.Query, cols)
|
||||
|
||||
result, err := q.Query.ExecContext(ctx, exec)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to update all for gorp_migrations")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for gorp_migrations")
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// UpdateAll updates all rows with the specified column values, using an executor.
|
||||
func (o GorpMigrationSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||
ln := int64(len(o))
|
||||
if ln == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(cols) == 0 {
|
||||
return 0, errors.New("models: update all requires at least one column argument")
|
||||
}
|
||||
|
||||
colNames := make([]string, len(cols))
|
||||
args := make([]interface{}, len(cols))
|
||||
|
||||
i := 0
|
||||
for name, value := range cols {
|
||||
colNames[i] = name
|
||||
args[i] = value
|
||||
i++
|
||||
}
|
||||
|
||||
// Append all of the primary key values for each column
|
||||
for _, obj := range o {
|
||||
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
|
||||
args = append(args, pkeyArgs...)
|
||||
}
|
||||
|
||||
sql := fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s",
|
||||
strmangle.SetParamNames("\"", "\"", 0, colNames),
|
||||
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o)))
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, args...)
|
||||
}
|
||||
result, err := exec.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to update all in gorpMigration slice")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all gorpMigration")
|
||||
}
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// Delete deletes a single GorpMigration record with an executor.
|
||||
// Delete will match against the primary key column to find the record to delete.
|
||||
func (o *GorpMigration) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
if o == nil {
|
||||
return 0, errors.New("models: no GorpMigration provided for delete")
|
||||
}
|
||||
|
||||
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), gorpMigrationPrimaryKeyMapping)
|
||||
sql := "DELETE FROM \"gorp_migrations\" WHERE \"id\"=?"
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, args...)
|
||||
}
|
||||
result, err := exec.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to delete from gorp_migrations")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for gorp_migrations")
|
||||
}
|
||||
|
||||
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// DeleteAll deletes all matching rows.
|
||||
func (q gorpMigrationQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
if q.Query == nil {
|
||||
return 0, errors.New("models: no gorpMigrationQuery provided for delete all")
|
||||
}
|
||||
|
||||
queries.SetDelete(q.Query)
|
||||
|
||||
result, err := q.Query.ExecContext(ctx, exec)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to delete all from gorp_migrations")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations")
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// DeleteAll deletes all rows in the slice, using an executor.
|
||||
func (o GorpMigrationSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
if len(o) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(gorpMigrationBeforeDeleteHooks) != 0 {
|
||||
for _, obj := range o {
|
||||
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var args []interface{}
|
||||
for _, obj := range o {
|
||||
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
|
||||
args = append(args, pkeyArgs...)
|
||||
}
|
||||
|
||||
sql := "DELETE FROM \"gorp_migrations\" WHERE " +
|
||||
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o))
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, args)
|
||||
}
|
||||
result, err := exec.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to delete all from gorpMigration slice")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations")
|
||||
}
|
||||
|
||||
if len(gorpMigrationAfterDeleteHooks) != 0 {
|
||||
for _, obj := range o {
|
||||
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// Reload refetches the object from the database
|
||||
// using the primary keys with an executor.
|
||||
func (o *GorpMigration) Reload(ctx context.Context, exec boil.ContextExecutor) error {
|
||||
ret, err := FindGorpMigration(ctx, exec, o.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*o = *ret
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReloadAll refetches every row with matching primary key column values
|
||||
// and overwrites the original object slice with the newly updated slice.
|
||||
func (o *GorpMigrationSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
|
||||
if o == nil || len(*o) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
slice := GorpMigrationSlice{}
|
||||
var args []interface{}
|
||||
for _, obj := range *o {
|
||||
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping)
|
||||
args = append(args, pkeyArgs...)
|
||||
}
|
||||
|
||||
sql := "SELECT \"gorp_migrations\".* FROM \"gorp_migrations\" WHERE " +
|
||||
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(*o))
|
||||
|
||||
q := queries.Raw(sql, args...)
|
||||
|
||||
err := q.Bind(ctx, exec, &slice)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "models: unable to reload all in GorpMigrationSlice")
|
||||
}
|
||||
|
||||
*o = slice
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GorpMigrationExists checks if the GorpMigration row exists.
|
||||
func GorpMigrationExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
|
||||
var exists bool
|
||||
sql := "select exists(select 1 from \"gorp_migrations\" where \"id\"=? limit 1)"
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, iD)
|
||||
}
|
||||
row := exec.QueryRowContext(ctx, sql, iD)
|
||||
|
||||
err := row.Scan(&exists)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "models: unable to check if gorp_migrations exists")
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
@@ -1,684 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/volatiletech/randomize"
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries"
|
||||
"github.com/volatiletech/strmangle"
|
||||
)
|
||||
|
||||
var (
|
||||
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
|
||||
// so force a package dependency in case they don't.
|
||||
_ = queries.Equal
|
||||
)
|
||||
|
||||
func testGorpMigrations(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
query := GorpMigrations()
|
||||
|
||||
if query.Query == nil {
|
||||
t.Error("expected a query, got nothing")
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if rowsAff, err := o.Delete(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Error("want zero records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsQueryDeleteAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if rowsAff, err := GorpMigrations().DeleteAll(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Error("want zero records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsSliceDeleteAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice := GorpMigrationSlice{o}
|
||||
|
||||
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Error("want zero records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsExists(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
e, err := GorpMigrationExists(ctx, tx, o.ID)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to check if GorpMigration exists: %s", err)
|
||||
}
|
||||
if !e {
|
||||
t.Errorf("Expected GorpMigrationExists to return true, but got false.")
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsFind(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
gorpMigrationFound, err := FindGorpMigration(ctx, tx, o.ID)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if gorpMigrationFound == nil {
|
||||
t.Error("want a record, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsBind(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err = GorpMigrations().Bind(ctx, tx, o); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsOne(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if x, err := GorpMigrations().One(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if x == nil {
|
||||
t.Error("expected to get a non nil record")
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
gorpMigrationOne := &GorpMigration{}
|
||||
gorpMigrationTwo := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice, err := GorpMigrations().All(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(slice) != 2 {
|
||||
t.Error("want 2 records, got:", len(slice))
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsCount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
seed := randomize.NewSeed()
|
||||
gorpMigrationOne := &GorpMigration{}
|
||||
gorpMigrationTwo := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 2 {
|
||||
t.Error("want 2 records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func gorpMigrationBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gorpMigrationAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error {
|
||||
*o = GorpMigration{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testGorpMigrationsHooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
empty := &GorpMigration{}
|
||||
o := &GorpMigration{}
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, false); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration object: %s", err)
|
||||
}
|
||||
|
||||
AddGorpMigrationHook(boil.BeforeInsertHook, gorpMigrationBeforeInsertHook)
|
||||
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationBeforeInsertHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.AfterInsertHook, gorpMigrationAfterInsertHook)
|
||||
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationAfterInsertHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.AfterSelectHook, gorpMigrationAfterSelectHook)
|
||||
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationAfterSelectHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.BeforeUpdateHook, gorpMigrationBeforeUpdateHook)
|
||||
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationBeforeUpdateHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.AfterUpdateHook, gorpMigrationAfterUpdateHook)
|
||||
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationAfterUpdateHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.BeforeDeleteHook, gorpMigrationBeforeDeleteHook)
|
||||
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationBeforeDeleteHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.AfterDeleteHook, gorpMigrationAfterDeleteHook)
|
||||
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationAfterDeleteHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.BeforeUpsertHook, gorpMigrationBeforeUpsertHook)
|
||||
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationBeforeUpsertHooks = []GorpMigrationHook{}
|
||||
|
||||
AddGorpMigrationHook(boil.AfterUpsertHook, gorpMigrationAfterUpsertHook)
|
||||
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
gorpMigrationAfterUpsertHooks = []GorpMigrationHook{}
|
||||
}
|
||||
|
||||
func testGorpMigrationsInsert(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsInsertWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Whitelist(gorpMigrationColumnsWithoutDefault...)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsReload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err = o.Reload(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsReloadAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice := GorpMigrationSlice{o}
|
||||
|
||||
if err = slice.ReloadAll(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsSelect(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice, err := GorpMigrations().All(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(slice) != 1 {
|
||||
t.Error("want one record, got:", len(slice))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
gorpMigrationDBTypes = map[string]string{`ID`: `VARCHAR(255)`, `AppliedAt`: `DATETIME`}
|
||||
_ = bytes.MinRead
|
||||
)
|
||||
|
||||
func testGorpMigrationsUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if 0 == len(gorpMigrationPrimaryKeyColumns) {
|
||||
t.Skip("Skipping table with no primary key columns")
|
||||
}
|
||||
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
|
||||
t.Skip("Skipping table with only primary key columns")
|
||||
}
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only affect one row but affected", rowsAff)
|
||||
}
|
||||
}
|
||||
|
||||
func testGorpMigrationsSliceUpdateAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) {
|
||||
t.Skip("Skipping table with only primary key columns")
|
||||
}
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &GorpMigration{}
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := GorpMigrations().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
|
||||
if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil {
|
||||
t.Errorf("Unable to randomize GorpMigration struct: %s", err)
|
||||
}
|
||||
|
||||
// Remove Primary keys and unique columns from what we plan to update
|
||||
var fields []string
|
||||
if strmangle.StringSliceMatch(gorpMigrationAllColumns, gorpMigrationPrimaryKeyColumns) {
|
||||
fields = gorpMigrationAllColumns
|
||||
} else {
|
||||
fields = strmangle.SetComplement(
|
||||
gorpMigrationAllColumns,
|
||||
gorpMigrationPrimaryKeyColumns,
|
||||
)
|
||||
}
|
||||
|
||||
value := reflect.Indirect(reflect.ValueOf(o))
|
||||
typ := reflect.TypeOf(o).Elem()
|
||||
n := typ.NumField()
|
||||
|
||||
updateMap := M{}
|
||||
for _, col := range fields {
|
||||
for i := 0; i < n; i++ {
|
||||
f := typ.Field(i)
|
||||
if f.Tag.Get("boil") == col {
|
||||
updateMap[col] = value.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
slice := GorpMigrationSlice{o}
|
||||
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("wanted one record updated but got", rowsAff)
|
||||
}
|
||||
}
|
||||
@@ -1,929 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/friendsofgo/errors"
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries/qm"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
|
||||
"github.com/volatiletech/strmangle"
|
||||
)
|
||||
|
||||
// Header is an object representing the database table.
|
||||
type Header struct {
|
||||
Typeflag int64 `boil:"typeflag" json:"typeflag" toml:"typeflag" yaml:"typeflag"`
|
||||
Name string `boil:"name" json:"name" toml:"name" yaml:"name"`
|
||||
Linkname string `boil:"linkname" json:"linkname" toml:"linkname" yaml:"linkname"`
|
||||
Size int64 `boil:"size" json:"size" toml:"size" yaml:"size"`
|
||||
Mode int64 `boil:"mode" json:"mode" toml:"mode" yaml:"mode"`
|
||||
UID int64 `boil:"uid" json:"uid" toml:"uid" yaml:"uid"`
|
||||
Gid int64 `boil:"gid" json:"gid" toml:"gid" yaml:"gid"`
|
||||
Uname string `boil:"uname" json:"uname" toml:"uname" yaml:"uname"`
|
||||
Gname string `boil:"gname" json:"gname" toml:"gname" yaml:"gname"`
|
||||
Modtime time.Time `boil:"modtime" json:"modtime" toml:"modtime" yaml:"modtime"`
|
||||
Accesstime time.Time `boil:"accesstime" json:"accesstime" toml:"accesstime" yaml:"accesstime"`
|
||||
Changetime time.Time `boil:"changetime" json:"changetime" toml:"changetime" yaml:"changetime"`
|
||||
Devmajor int64 `boil:"devmajor" json:"devmajor" toml:"devmajor" yaml:"devmajor"`
|
||||
Devminor int64 `boil:"devminor" json:"devminor" toml:"devminor" yaml:"devminor"`
|
||||
Format int64 `boil:"format" json:"format" toml:"format" yaml:"format"`
|
||||
Record int64 `boil:"record" json:"record" toml:"record" yaml:"record"`
|
||||
Block int64 `boil:"block" json:"block" toml:"block" yaml:"block"`
|
||||
|
||||
R *headerR `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||
L headerL `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||
}
|
||||
|
||||
var HeaderColumns = struct {
|
||||
Typeflag string
|
||||
Name string
|
||||
Linkname string
|
||||
Size string
|
||||
Mode string
|
||||
UID string
|
||||
Gid string
|
||||
Uname string
|
||||
Gname string
|
||||
Modtime string
|
||||
Accesstime string
|
||||
Changetime string
|
||||
Devmajor string
|
||||
Devminor string
|
||||
Format string
|
||||
Record string
|
||||
Block string
|
||||
}{
|
||||
Typeflag: "typeflag",
|
||||
Name: "name",
|
||||
Linkname: "linkname",
|
||||
Size: "size",
|
||||
Mode: "mode",
|
||||
UID: "uid",
|
||||
Gid: "gid",
|
||||
Uname: "uname",
|
||||
Gname: "gname",
|
||||
Modtime: "modtime",
|
||||
Accesstime: "accesstime",
|
||||
Changetime: "changetime",
|
||||
Devmajor: "devmajor",
|
||||
Devminor: "devminor",
|
||||
Format: "format",
|
||||
Record: "record",
|
||||
Block: "block",
|
||||
}
|
||||
|
||||
var HeaderTableColumns = struct {
|
||||
Typeflag string
|
||||
Name string
|
||||
Linkname string
|
||||
Size string
|
||||
Mode string
|
||||
UID string
|
||||
Gid string
|
||||
Uname string
|
||||
Gname string
|
||||
Modtime string
|
||||
Accesstime string
|
||||
Changetime string
|
||||
Devmajor string
|
||||
Devminor string
|
||||
Format string
|
||||
Record string
|
||||
Block string
|
||||
}{
|
||||
Typeflag: "headers.typeflag",
|
||||
Name: "headers.name",
|
||||
Linkname: "headers.linkname",
|
||||
Size: "headers.size",
|
||||
Mode: "headers.mode",
|
||||
UID: "headers.uid",
|
||||
Gid: "headers.gid",
|
||||
Uname: "headers.uname",
|
||||
Gname: "headers.gname",
|
||||
Modtime: "headers.modtime",
|
||||
Accesstime: "headers.accesstime",
|
||||
Changetime: "headers.changetime",
|
||||
Devmajor: "headers.devmajor",
|
||||
Devminor: "headers.devminor",
|
||||
Format: "headers.format",
|
||||
Record: "headers.record",
|
||||
Block: "headers.block",
|
||||
}
|
||||
|
||||
// Generated where
|
||||
|
||||
type whereHelperint64 struct{ field string }
|
||||
|
||||
func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
|
||||
func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
|
||||
func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
|
||||
func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
|
||||
func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
|
||||
func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
|
||||
func (w whereHelperint64) IN(slice []int64) qm.QueryMod {
|
||||
values := make([]interface{}, 0, len(slice))
|
||||
for _, value := range slice {
|
||||
values = append(values, value)
|
||||
}
|
||||
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
|
||||
}
|
||||
func (w whereHelperint64) NIN(slice []int64) qm.QueryMod {
|
||||
values := make([]interface{}, 0, len(slice))
|
||||
for _, value := range slice {
|
||||
values = append(values, value)
|
||||
}
|
||||
return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
|
||||
}
|
||||
|
||||
type whereHelpertime_Time struct{ field string }
|
||||
|
||||
func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.EQ, x)
|
||||
}
|
||||
func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.NEQ, x)
|
||||
}
|
||||
func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.LT, x)
|
||||
}
|
||||
func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.LTE, x)
|
||||
}
|
||||
func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.GT, x)
|
||||
}
|
||||
func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod {
|
||||
return qmhelper.Where(w.field, qmhelper.GTE, x)
|
||||
}
|
||||
|
||||
var HeaderWhere = struct {
|
||||
Typeflag whereHelperint64
|
||||
Name whereHelperstring
|
||||
Linkname whereHelperstring
|
||||
Size whereHelperint64
|
||||
Mode whereHelperint64
|
||||
UID whereHelperint64
|
||||
Gid whereHelperint64
|
||||
Uname whereHelperstring
|
||||
Gname whereHelperstring
|
||||
Modtime whereHelpertime_Time
|
||||
Accesstime whereHelpertime_Time
|
||||
Changetime whereHelpertime_Time
|
||||
Devmajor whereHelperint64
|
||||
Devminor whereHelperint64
|
||||
Format whereHelperint64
|
||||
Record whereHelperint64
|
||||
Block whereHelperint64
|
||||
}{
|
||||
Typeflag: whereHelperint64{field: "\"headers\".\"typeflag\""},
|
||||
Name: whereHelperstring{field: "\"headers\".\"name\""},
|
||||
Linkname: whereHelperstring{field: "\"headers\".\"linkname\""},
|
||||
Size: whereHelperint64{field: "\"headers\".\"size\""},
|
||||
Mode: whereHelperint64{field: "\"headers\".\"mode\""},
|
||||
UID: whereHelperint64{field: "\"headers\".\"uid\""},
|
||||
Gid: whereHelperint64{field: "\"headers\".\"gid\""},
|
||||
Uname: whereHelperstring{field: "\"headers\".\"uname\""},
|
||||
Gname: whereHelperstring{field: "\"headers\".\"gname\""},
|
||||
Modtime: whereHelpertime_Time{field: "\"headers\".\"modtime\""},
|
||||
Accesstime: whereHelpertime_Time{field: "\"headers\".\"accesstime\""},
|
||||
Changetime: whereHelpertime_Time{field: "\"headers\".\"changetime\""},
|
||||
Devmajor: whereHelperint64{field: "\"headers\".\"devmajor\""},
|
||||
Devminor: whereHelperint64{field: "\"headers\".\"devminor\""},
|
||||
Format: whereHelperint64{field: "\"headers\".\"format\""},
|
||||
Record: whereHelperint64{field: "\"headers\".\"record\""},
|
||||
Block: whereHelperint64{field: "\"headers\".\"block\""},
|
||||
}
|
||||
|
||||
// HeaderRels is where relationship names are stored.
|
||||
var HeaderRels = struct {
|
||||
}{}
|
||||
|
||||
// headerR is where relationships are stored.
|
||||
type headerR struct {
|
||||
}
|
||||
|
||||
// NewStruct creates a new relationship struct
|
||||
func (*headerR) NewStruct() *headerR {
|
||||
return &headerR{}
|
||||
}
|
||||
|
||||
// headerL is where Load methods for each relationship are stored.
|
||||
type headerL struct{}
|
||||
|
||||
var (
|
||||
headerAllColumns = []string{"typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "format", "record", "block"}
|
||||
headerColumnsWithoutDefault = []string{"typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "format", "record", "block"}
|
||||
headerColumnsWithDefault = []string{}
|
||||
headerPrimaryKeyColumns = []string{"name"}
|
||||
)
|
||||
|
||||
type (
|
||||
// HeaderSlice is an alias for a slice of pointers to Header.
|
||||
// This should almost always be used instead of []Header.
|
||||
HeaderSlice []*Header
|
||||
// HeaderHook is the signature for custom Header hook methods
|
||||
HeaderHook func(context.Context, boil.ContextExecutor, *Header) error
|
||||
|
||||
headerQuery struct {
|
||||
*queries.Query
|
||||
}
|
||||
)
|
||||
|
||||
// Cache for insert, update and upsert
|
||||
var (
|
||||
headerType = reflect.TypeOf(&Header{})
|
||||
headerMapping = queries.MakeStructMapping(headerType)
|
||||
headerPrimaryKeyMapping, _ = queries.BindMapping(headerType, headerMapping, headerPrimaryKeyColumns)
|
||||
headerInsertCacheMut sync.RWMutex
|
||||
headerInsertCache = make(map[string]insertCache)
|
||||
headerUpdateCacheMut sync.RWMutex
|
||||
headerUpdateCache = make(map[string]updateCache)
|
||||
headerUpsertCacheMut sync.RWMutex
|
||||
headerUpsertCache = make(map[string]insertCache)
|
||||
)
|
||||
|
||||
var (
|
||||
// Force time package dependency for automated UpdatedAt/CreatedAt.
|
||||
_ = time.Second
|
||||
// Force qmhelper dependency for where clause generation (which doesn't
|
||||
// always happen)
|
||||
_ = qmhelper.Where
|
||||
)
|
||||
|
||||
var headerBeforeInsertHooks []HeaderHook
|
||||
var headerBeforeUpdateHooks []HeaderHook
|
||||
var headerBeforeDeleteHooks []HeaderHook
|
||||
var headerBeforeUpsertHooks []HeaderHook
|
||||
|
||||
var headerAfterInsertHooks []HeaderHook
|
||||
var headerAfterSelectHooks []HeaderHook
|
||||
var headerAfterUpdateHooks []HeaderHook
|
||||
var headerAfterDeleteHooks []HeaderHook
|
||||
var headerAfterUpsertHooks []HeaderHook
|
||||
|
||||
// doBeforeInsertHooks executes all "before insert" hooks.
|
||||
func (o *Header) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerBeforeInsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doBeforeUpdateHooks executes all "before Update" hooks.
|
||||
func (o *Header) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerBeforeUpdateHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doBeforeDeleteHooks executes all "before Delete" hooks.
|
||||
func (o *Header) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerBeforeDeleteHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doBeforeUpsertHooks executes all "before Upsert" hooks.
|
||||
func (o *Header) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerBeforeUpsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterInsertHooks executes all "after Insert" hooks.
|
||||
func (o *Header) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerAfterInsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterSelectHooks executes all "after Select" hooks.
|
||||
func (o *Header) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerAfterSelectHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterUpdateHooks executes all "after Update" hooks.
|
||||
func (o *Header) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerAfterUpdateHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterDeleteHooks executes all "after Delete" hooks.
|
||||
func (o *Header) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerAfterDeleteHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAfterUpsertHooks executes all "after Upsert" hooks.
|
||||
func (o *Header) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||
if boil.HooksAreSkipped(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, hook := range headerAfterUpsertHooks {
|
||||
if err := hook(ctx, exec, o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddHeaderHook registers your hook function for all future operations.
|
||||
func AddHeaderHook(hookPoint boil.HookPoint, headerHook HeaderHook) {
|
||||
switch hookPoint {
|
||||
case boil.BeforeInsertHook:
|
||||
headerBeforeInsertHooks = append(headerBeforeInsertHooks, headerHook)
|
||||
case boil.BeforeUpdateHook:
|
||||
headerBeforeUpdateHooks = append(headerBeforeUpdateHooks, headerHook)
|
||||
case boil.BeforeDeleteHook:
|
||||
headerBeforeDeleteHooks = append(headerBeforeDeleteHooks, headerHook)
|
||||
case boil.BeforeUpsertHook:
|
||||
headerBeforeUpsertHooks = append(headerBeforeUpsertHooks, headerHook)
|
||||
case boil.AfterInsertHook:
|
||||
headerAfterInsertHooks = append(headerAfterInsertHooks, headerHook)
|
||||
case boil.AfterSelectHook:
|
||||
headerAfterSelectHooks = append(headerAfterSelectHooks, headerHook)
|
||||
case boil.AfterUpdateHook:
|
||||
headerAfterUpdateHooks = append(headerAfterUpdateHooks, headerHook)
|
||||
case boil.AfterDeleteHook:
|
||||
headerAfterDeleteHooks = append(headerAfterDeleteHooks, headerHook)
|
||||
case boil.AfterUpsertHook:
|
||||
headerAfterUpsertHooks = append(headerAfterUpsertHooks, headerHook)
|
||||
}
|
||||
}
|
||||
|
||||
// One returns a single header record from the query.
|
||||
func (q headerQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Header, error) {
|
||||
o := &Header{}
|
||||
|
||||
queries.SetLimit(q.Query, 1)
|
||||
|
||||
err := q.Bind(ctx, exec, o)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == sql.ErrNoRows {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return nil, errors.Wrap(err, "models: failed to execute a one query for headers")
|
||||
}
|
||||
|
||||
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
|
||||
return o, err
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// All returns all Header records from the query.
|
||||
func (q headerQuery) All(ctx context.Context, exec boil.ContextExecutor) (HeaderSlice, error) {
|
||||
var o []*Header
|
||||
|
||||
err := q.Bind(ctx, exec, &o)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "models: failed to assign all query results to Header slice")
|
||||
}
|
||||
|
||||
if len(headerAfterSelectHooks) != 0 {
|
||||
for _, obj := range o {
|
||||
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||
return o, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Count returns the count of all Header records in the query.
|
||||
func (q headerQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
var count int64
|
||||
|
||||
queries.SetSelect(q.Query, nil)
|
||||
queries.SetCount(q.Query)
|
||||
|
||||
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to count headers rows")
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Exists checks if the row exists in the table.
|
||||
func (q headerQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
|
||||
var count int64
|
||||
|
||||
queries.SetSelect(q.Query, nil)
|
||||
queries.SetCount(q.Query)
|
||||
queries.SetLimit(q.Query, 1)
|
||||
|
||||
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "models: failed to check if headers exists")
|
||||
}
|
||||
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
// Headers retrieves all the records using an executor.
|
||||
func Headers(mods ...qm.QueryMod) headerQuery {
|
||||
mods = append(mods, qm.From("\"headers\""))
|
||||
return headerQuery{NewQuery(mods...)}
|
||||
}
|
||||
|
||||
// FindHeader retrieves a single record by ID with an executor.
|
||||
// If selectCols is empty Find will return all columns.
|
||||
func FindHeader(ctx context.Context, exec boil.ContextExecutor, name string, selectCols ...string) (*Header, error) {
|
||||
headerObj := &Header{}
|
||||
|
||||
sel := "*"
|
||||
if len(selectCols) > 0 {
|
||||
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
|
||||
}
|
||||
query := fmt.Sprintf(
|
||||
"select %s from \"headers\" where \"name\"=?", sel,
|
||||
)
|
||||
|
||||
q := queries.Raw(query, name)
|
||||
|
||||
err := q.Bind(ctx, exec, headerObj)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == sql.ErrNoRows {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
return nil, errors.Wrap(err, "models: unable to select from headers")
|
||||
}
|
||||
|
||||
if err = headerObj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||
return headerObj, err
|
||||
}
|
||||
|
||||
return headerObj, nil
|
||||
}
|
||||
|
||||
// Insert a single record using an executor.
|
||||
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
|
||||
func (o *Header) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
|
||||
if o == nil {
|
||||
return errors.New("models: no headers provided for insertion")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nzDefaults := queries.NonZeroDefaultSet(headerColumnsWithDefault, o)
|
||||
|
||||
key := makeCacheKey(columns, nzDefaults)
|
||||
headerInsertCacheMut.RLock()
|
||||
cache, cached := headerInsertCache[key]
|
||||
headerInsertCacheMut.RUnlock()
|
||||
|
||||
if !cached {
|
||||
wl, returnColumns := columns.InsertColumnSet(
|
||||
headerAllColumns,
|
||||
headerColumnsWithDefault,
|
||||
headerColumnsWithoutDefault,
|
||||
nzDefaults,
|
||||
)
|
||||
|
||||
cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, wl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache.retMapping, err = queries.BindMapping(headerType, headerMapping, returnColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(wl) != 0 {
|
||||
cache.query = fmt.Sprintf("INSERT INTO \"headers\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
|
||||
} else {
|
||||
cache.query = "INSERT INTO \"headers\" %sDEFAULT VALUES%s"
|
||||
}
|
||||
|
||||
var queryOutput, queryReturning string
|
||||
|
||||
if len(cache.retMapping) != 0 {
|
||||
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"headers\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, headerPrimaryKeyColumns))
|
||||
}
|
||||
|
||||
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
|
||||
}
|
||||
|
||||
value := reflect.Indirect(reflect.ValueOf(o))
|
||||
vals := queries.ValuesFromMapping(value, cache.valueMapping)
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, cache.query)
|
||||
fmt.Fprintln(writer, vals)
|
||||
}
|
||||
_, err = exec.ExecContext(ctx, cache.query, vals...)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "models: unable to insert into headers")
|
||||
}
|
||||
|
||||
var identifierCols []interface{}
|
||||
|
||||
if len(cache.retMapping) == 0 {
|
||||
goto CacheNoHooks
|
||||
}
|
||||
|
||||
identifierCols = []interface{}{
|
||||
o.Name,
|
||||
}
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, cache.retQuery)
|
||||
fmt.Fprintln(writer, identifierCols...)
|
||||
}
|
||||
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "models: unable to populate default values for headers")
|
||||
}
|
||||
|
||||
CacheNoHooks:
|
||||
if !cached {
|
||||
headerInsertCacheMut.Lock()
|
||||
headerInsertCache[key] = cache
|
||||
headerInsertCacheMut.Unlock()
|
||||
}
|
||||
|
||||
return o.doAfterInsertHooks(ctx, exec)
|
||||
}
|
||||
|
||||
// Update uses an executor to update the Header.
|
||||
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
|
||||
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
|
||||
func (o *Header) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
|
||||
var err error
|
||||
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
key := makeCacheKey(columns, nil)
|
||||
headerUpdateCacheMut.RLock()
|
||||
cache, cached := headerUpdateCache[key]
|
||||
headerUpdateCacheMut.RUnlock()
|
||||
|
||||
if !cached {
|
||||
wl := columns.UpdateColumnSet(
|
||||
headerAllColumns,
|
||||
headerPrimaryKeyColumns,
|
||||
)
|
||||
|
||||
if !columns.IsWhitelist() {
|
||||
wl = strmangle.SetComplement(wl, []string{"created_at"})
|
||||
}
|
||||
if len(wl) == 0 {
|
||||
return 0, errors.New("models: unable to update headers, could not build whitelist")
|
||||
}
|
||||
|
||||
cache.query = fmt.Sprintf("UPDATE \"headers\" SET %s WHERE %s",
|
||||
strmangle.SetParamNames("\"", "\"", 0, wl),
|
||||
strmangle.WhereClause("\"", "\"", 0, headerPrimaryKeyColumns),
|
||||
)
|
||||
cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, append(wl, headerPrimaryKeyColumns...))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, cache.query)
|
||||
fmt.Fprintln(writer, values)
|
||||
}
|
||||
var result sql.Result
|
||||
result, err = exec.ExecContext(ctx, cache.query, values...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to update headers row")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by update for headers")
|
||||
}
|
||||
|
||||
if !cached {
|
||||
headerUpdateCacheMut.Lock()
|
||||
headerUpdateCache[key] = cache
|
||||
headerUpdateCacheMut.Unlock()
|
||||
}
|
||||
|
||||
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
|
||||
}
|
||||
|
||||
// UpdateAll updates all rows with the specified column values.
|
||||
func (q headerQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||
queries.SetUpdate(q.Query, cols)
|
||||
|
||||
result, err := q.Query.ExecContext(ctx, exec)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to update all for headers")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for headers")
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// UpdateAll updates all rows with the specified column values, using an executor.
|
||||
func (o HeaderSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||
ln := int64(len(o))
|
||||
if ln == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(cols) == 0 {
|
||||
return 0, errors.New("models: update all requires at least one column argument")
|
||||
}
|
||||
|
||||
colNames := make([]string, len(cols))
|
||||
args := make([]interface{}, len(cols))
|
||||
|
||||
i := 0
|
||||
for name, value := range cols {
|
||||
colNames[i] = name
|
||||
args[i] = value
|
||||
i++
|
||||
}
|
||||
|
||||
// Append all of the primary key values for each column
|
||||
for _, obj := range o {
|
||||
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping)
|
||||
args = append(args, pkeyArgs...)
|
||||
}
|
||||
|
||||
sql := fmt.Sprintf("UPDATE \"headers\" SET %s WHERE %s",
|
||||
strmangle.SetParamNames("\"", "\"", 0, colNames),
|
||||
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(o)))
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, args...)
|
||||
}
|
||||
result, err := exec.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to update all in header slice")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all header")
|
||||
}
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// Delete deletes a single Header record with an executor.
|
||||
// Delete will match against the primary key column to find the record to delete.
|
||||
func (o *Header) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
if o == nil {
|
||||
return 0, errors.New("models: no Header provided for delete")
|
||||
}
|
||||
|
||||
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), headerPrimaryKeyMapping)
|
||||
sql := "DELETE FROM \"headers\" WHERE \"name\"=?"
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, args...)
|
||||
}
|
||||
result, err := exec.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to delete from headers")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for headers")
|
||||
}
|
||||
|
||||
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// DeleteAll deletes all matching rows.
|
||||
func (q headerQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
if q.Query == nil {
|
||||
return 0, errors.New("models: no headerQuery provided for delete all")
|
||||
}
|
||||
|
||||
queries.SetDelete(q.Query)
|
||||
|
||||
result, err := q.Query.ExecContext(ctx, exec)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to delete all from headers")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for headers")
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// DeleteAll deletes all rows in the slice, using an executor.
|
||||
func (o HeaderSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||
if len(o) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(headerBeforeDeleteHooks) != 0 {
|
||||
for _, obj := range o {
|
||||
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var args []interface{}
|
||||
for _, obj := range o {
|
||||
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping)
|
||||
args = append(args, pkeyArgs...)
|
||||
}
|
||||
|
||||
sql := "DELETE FROM \"headers\" WHERE " +
|
||||
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(o))
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, args)
|
||||
}
|
||||
result, err := exec.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: unable to delete all from header slice")
|
||||
}
|
||||
|
||||
rowsAff, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for headers")
|
||||
}
|
||||
|
||||
if len(headerAfterDeleteHooks) != 0 {
|
||||
for _, obj := range o {
|
||||
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rowsAff, nil
|
||||
}
|
||||
|
||||
// Reload refetches the object from the database
|
||||
// using the primary keys with an executor.
|
||||
func (o *Header) Reload(ctx context.Context, exec boil.ContextExecutor) error {
|
||||
ret, err := FindHeader(ctx, exec, o.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*o = *ret
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReloadAll refetches every row with matching primary key column values
|
||||
// and overwrites the original object slice with the newly updated slice.
|
||||
func (o *HeaderSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
|
||||
if o == nil || len(*o) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
slice := HeaderSlice{}
|
||||
var args []interface{}
|
||||
for _, obj := range *o {
|
||||
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping)
|
||||
args = append(args, pkeyArgs...)
|
||||
}
|
||||
|
||||
sql := "SELECT \"headers\".* FROM \"headers\" WHERE " +
|
||||
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(*o))
|
||||
|
||||
q := queries.Raw(sql, args...)
|
||||
|
||||
err := q.Bind(ctx, exec, &slice)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "models: unable to reload all in HeaderSlice")
|
||||
}
|
||||
|
||||
*o = slice
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeaderExists checks if the Header row exists.
|
||||
func HeaderExists(ctx context.Context, exec boil.ContextExecutor, name string) (bool, error) {
|
||||
var exists bool
|
||||
sql := "select exists(select 1 from \"headers\" where \"name\"=? limit 1)"
|
||||
|
||||
if boil.IsDebug(ctx) {
|
||||
writer := boil.DebugWriterFrom(ctx)
|
||||
fmt.Fprintln(writer, sql)
|
||||
fmt.Fprintln(writer, name)
|
||||
}
|
||||
row := exec.QueryRowContext(ctx, sql, name)
|
||||
|
||||
err := row.Scan(&exists)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "models: unable to check if headers exists")
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
@@ -1,684 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/volatiletech/randomize"
|
||||
"github.com/volatiletech/sqlboiler/v4/boil"
|
||||
"github.com/volatiletech/sqlboiler/v4/queries"
|
||||
"github.com/volatiletech/strmangle"
|
||||
)
|
||||
|
||||
var (
|
||||
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
|
||||
// so force a package dependency in case they don't.
|
||||
_ = queries.Equal
|
||||
)
|
||||
|
||||
func testHeaders(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
query := Headers()
|
||||
|
||||
if query.Query == nil {
|
||||
t.Error("expected a query, got nothing")
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if rowsAff, err := o.Delete(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Error("want zero records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersQueryDeleteAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if rowsAff, err := Headers().DeleteAll(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Error("want zero records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersSliceDeleteAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice := HeaderSlice{o}
|
||||
|
||||
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 0 {
|
||||
t.Error("want zero records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersExists(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
e, err := HeaderExists(ctx, tx, o.Name)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to check if Header exists: %s", err)
|
||||
}
|
||||
if !e {
|
||||
t.Errorf("Expected HeaderExists to return true, but got false.")
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersFind(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
headerFound, err := FindHeader(ctx, tx, o.Name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if headerFound == nil {
|
||||
t.Error("want a record, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersBind(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err = Headers().Bind(ctx, tx, o); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersOne(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if x, err := Headers().One(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
} else if x == nil {
|
||||
t.Error("expected to get a non nil record")
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
headerOne := &Header{}
|
||||
headerTwo := &Header{}
|
||||
if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice, err := Headers().All(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(slice) != 2 {
|
||||
t.Error("want 2 records, got:", len(slice))
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersCount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
seed := randomize.NewSeed()
|
||||
headerOne := &Header{}
|
||||
headerTwo := &Header{}
|
||||
if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 2 {
|
||||
t.Error("want 2 records, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func headerBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func headerAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error {
|
||||
*o = Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testHeadersHooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
empty := &Header{}
|
||||
o := &Header{}
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, false); err != nil {
|
||||
t.Errorf("Unable to randomize Header object: %s", err)
|
||||
}
|
||||
|
||||
AddHeaderHook(boil.BeforeInsertHook, headerBeforeInsertHook)
|
||||
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerBeforeInsertHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.AfterInsertHook, headerAfterInsertHook)
|
||||
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerAfterInsertHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.AfterSelectHook, headerAfterSelectHook)
|
||||
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerAfterSelectHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.BeforeUpdateHook, headerBeforeUpdateHook)
|
||||
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerBeforeUpdateHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.AfterUpdateHook, headerAfterUpdateHook)
|
||||
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerAfterUpdateHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.BeforeDeleteHook, headerBeforeDeleteHook)
|
||||
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerBeforeDeleteHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.AfterDeleteHook, headerAfterDeleteHook)
|
||||
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerAfterDeleteHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.BeforeUpsertHook, headerBeforeUpsertHook)
|
||||
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerBeforeUpsertHooks = []HeaderHook{}
|
||||
|
||||
AddHeaderHook(boil.AfterUpsertHook, headerAfterUpsertHook)
|
||||
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
|
||||
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(o, empty) {
|
||||
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
|
||||
}
|
||||
headerAfterUpsertHooks = []HeaderHook{}
|
||||
}
|
||||
|
||||
func testHeadersInsert(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersInsertWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Whitelist(headerColumnsWithoutDefault...)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersReload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err = o.Reload(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersReloadAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice := HeaderSlice{o}
|
||||
|
||||
if err = slice.ReloadAll(ctx, tx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersSelect(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
slice, err := Headers().All(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(slice) != 1 {
|
||||
t.Error("want one record, got:", len(slice))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
headerDBTypes = map[string]string{`Typeflag`: `INTEGER`, `Name`: `TEXT`, `Linkname`: `TEXT`, `Size`: `INTEGER`, `Mode`: `INTEGER`, `UID`: `INTEGER`, `Gid`: `INTEGER`, `Uname`: `TEXT`, `Gname`: `TEXT`, `Modtime`: `DATE`, `Accesstime`: `DATE`, `Changetime`: `DATE`, `Devmajor`: `INTEGER`, `Devminor`: `INTEGER`, `Format`: `INTEGER`, `Record`: `INTEGER`, `Block`: `INTEGER`}
|
||||
_ = bytes.MinRead
|
||||
)
|
||||
|
||||
func testHeadersUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if 0 == len(headerPrimaryKeyColumns) {
|
||||
t.Skip("Skipping table with no primary key columns")
|
||||
}
|
||||
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
|
||||
t.Skip("Skipping table with only primary key columns")
|
||||
}
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("should only affect one row but affected", rowsAff)
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadersSliceUpdateAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if len(headerAllColumns) == len(headerPrimaryKeyColumns) {
|
||||
t.Skip("Skipping table with only primary key columns")
|
||||
}
|
||||
|
||||
seed := randomize.NewSeed()
|
||||
var err error
|
||||
o := &Header{}
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
count, err := Headers().Count(ctx, tx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Error("want one record, got:", count)
|
||||
}
|
||||
|
||||
if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil {
|
||||
t.Errorf("Unable to randomize Header struct: %s", err)
|
||||
}
|
||||
|
||||
// Remove Primary keys and unique columns from what we plan to update
|
||||
var fields []string
|
||||
if strmangle.StringSliceMatch(headerAllColumns, headerPrimaryKeyColumns) {
|
||||
fields = headerAllColumns
|
||||
} else {
|
||||
fields = strmangle.SetComplement(
|
||||
headerAllColumns,
|
||||
headerPrimaryKeyColumns,
|
||||
)
|
||||
}
|
||||
|
||||
value := reflect.Indirect(reflect.ValueOf(o))
|
||||
typ := reflect.TypeOf(o).Elem()
|
||||
n := typ.NumField()
|
||||
|
||||
updateMap := M{}
|
||||
for _, col := range fields {
|
||||
for i := 0; i < n; i++ {
|
||||
f := typ.Field(i)
|
||||
if f.Tag.Get("boil") == col {
|
||||
updateMap[col] = value.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
slice := HeaderSlice{o}
|
||||
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
|
||||
t.Error(err)
|
||||
} else if rowsAff != 1 {
|
||||
t.Error("wanted one record updated but got", rowsAff)
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
// Code generated by SQLBoiler 4.7.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var rgxSQLitekey = regexp.MustCompile(`(?mi)((,\n)?\s+foreign key.*?\n)+`)
|
||||
|
||||
type sqliteTester struct {
|
||||
dbConn *sql.DB
|
||||
|
||||
dbName string
|
||||
testDBName string
|
||||
}
|
||||
|
||||
func init() {
|
||||
dbMain = &sqliteTester{}
|
||||
}
|
||||
|
||||
func (s *sqliteTester) setup() error {
|
||||
var err error
|
||||
|
||||
s.dbName = viper.GetString("sqlite3.dbname")
|
||||
if len(s.dbName) == 0 {
|
||||
return errors.New("no dbname specified")
|
||||
}
|
||||
|
||||
s.testDBName = filepath.Join(os.TempDir(), fmt.Sprintf("boil-sqlite3-%d.sql", rand.Int()))
|
||||
|
||||
dumpCmd := exec.Command("sqlite3", "-cmd", ".dump", s.dbName)
|
||||
createCmd := exec.Command("sqlite3", s.testDBName)
|
||||
|
||||
r, w := io.Pipe()
|
||||
dumpCmd.Stdout = w
|
||||
createCmd.Stdin = newFKeyDestroyer(rgxSQLitekey, r)
|
||||
|
||||
if err = dumpCmd.Start(); err != nil {
|
||||
return errors.Wrap(err, "failed to start sqlite3 dump command")
|
||||
}
|
||||
if err = createCmd.Start(); err != nil {
|
||||
return errors.Wrap(err, "failed to start sqlite3 create command")
|
||||
}
|
||||
|
||||
if err = dumpCmd.Wait(); err != nil {
|
||||
fmt.Println(err)
|
||||
return errors.Wrap(err, "failed to wait for sqlite3 dump command")
|
||||
}
|
||||
|
||||
w.Close() // After dumpCmd is done, close the write end of the pipe
|
||||
|
||||
if err = createCmd.Wait(); err != nil {
|
||||
fmt.Println(err)
|
||||
return errors.Wrap(err, "failed to wait for sqlite3 create command")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sqliteTester) teardown() error {
|
||||
if s.dbConn != nil {
|
||||
s.dbConn.Close()
|
||||
}
|
||||
|
||||
return os.Remove(s.testDBName)
|
||||
}
|
||||
|
||||
func (s *sqliteTester) conn() (*sql.DB, error) {
|
||||
if s.dbConn != nil {
|
||||
return s.dbConn, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
s.dbConn, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?_loc=UTC", s.testDBName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.dbConn, nil
|
||||
}
|
||||
17
pkg/readers/counter.go
Normal file
17
pkg/readers/counter.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package readers
|
||||
|
||||
import "io"
|
||||
|
||||
type Counter struct {
|
||||
Reader io.Reader
|
||||
|
||||
BytesRead int
|
||||
}
|
||||
|
||||
func (r *Counter) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Reader.Read(p)
|
||||
|
||||
r.BytesRead += n
|
||||
|
||||
return n, err
|
||||
}
|
||||
Reference in New Issue
Block a user