diff --git a/Makefile b/Makefile index 7a34ff7..a8e0379 100644 --- a/Makefile +++ b/Makefile @@ -39,5 +39,6 @@ clean: depend: go install github.com/rubenv/sql-migrate/sql-migrate@latest go install github.com/volatiletech/sqlboiler/v4@latest + go install github.com/volatiletech/sqlboiler-sqlite3@latest sql-migrate up -env="production" -config configs/sql-migrate/metadata.yaml go generate ./... \ No newline at end of file diff --git a/go.mod b/go.mod index ec252c6..51287b6 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( aead.dev/minisign v0.2.0 filippo.io/age v1.0.0 github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3 - github.com/ProtonMail/gopenpgp/v2 v2.3.0 + github.com/ProtonMail/gopenpgp/v2 v2.3.1 github.com/andybalholm/brotli v1.0.4 github.com/cosnicolaou/pbzip2 v1.0.1 github.com/dsnet/compress v0.0.1 @@ -16,12 +16,12 @@ require ( github.com/klauspost/compress v1.13.6 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-sqlite3 v1.14.9 - github.com/pierrec/lz4/v4 v4.1.11 + github.com/pierrec/lz4/v4 v4.1.12 github.com/pkg/errors v0.9.1 github.com/rubenv/sql-migrate v0.0.0-20211023115951-9f02b1e13857 github.com/spf13/afero v1.6.0 - github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.9.0 + github.com/spf13/cobra v1.3.0 + github.com/spf13/viper v1.10.1 github.com/volatiletech/null/v8 v8.1.2 github.com/volatiletech/randomize v0.0.1 github.com/volatiletech/sqlboiler/v4 v4.8.3 @@ -35,7 +35,7 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/magiconair/properties v1.8.5 // indirect - github.com/mitchellh/mapstructure v1.4.2 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -45,10 +45,10 @@ require ( github.com/volatiletech/inflect v0.0.1 // indirect github.com/ziutek/mymysql v1.5.4 // indirect golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect - golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 // indirect - golang.org/x/text v0.3.6 // indirect + golang.org/x/sys v0.0.0-20211210111614-af8b64212486 // indirect + golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/gorp.v1 v1.7.2 // indirect - gopkg.in/ini.v1 v1.63.2 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9a59077..a5d629a 100644 --- a/go.sum +++ b/go.sum @@ -25,6 +25,10 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -35,6 +39,7 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -52,6 +57,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -61,8 +67,8 @@ github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3 h1:XcF0cTDJei github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/ProtonMail/go-mime v0.0.0-20190923161245-9b5a4261663a h1:W6RrgN/sTxg1msqzFFb+G80MFmpjMw61IU+slm+wln4= github.com/ProtonMail/go-mime v0.0.0-20190923161245-9b5a4261663a/go.mod h1:NYt+V3/4rEeDuaev/zw1zCq8uqVEuPHzDPo3OZrlGJ4= -github.com/ProtonMail/gopenpgp/v2 v2.3.0 h1:eniutitHk02Yn3GtaDfJTVm/Ca1e8s6zkS0SpeaocXI= -github.com/ProtonMail/gopenpgp/v2 v2.3.0/go.mod h1:F62x0m3akQuisX36pOgAtKOHZ1E7/MpnX8bZWCK+5dA= +github.com/ProtonMail/gopenpgp/v2 v2.3.1 h1:bvmcH8GBJx9uwYGJxf8NSl2lTh4hIu9LJ2iWJzF3XHg= +github.com/ProtonMail/gopenpgp/v2 v2.3.1/go.mod h1:F62x0m3akQuisX36pOgAtKOHZ1E7/MpnX8bZWCK+5dA= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -79,6 +85,7 @@ github.com/apmckinlay/gsuneido v0.0.0-20180907175622-1f10244968e3/go.mod h1:hJna github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -93,17 +100,27 @@ github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqO github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -116,6 +133,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cosnicolaou/pbzip2 v1.0.1 h1:f5Ix000Rtl9tr0Ne33wNLtljGl2nAyR4ZirJrz9qg+0= github.com/cosnicolaou/pbzip2 v1.0.1/go.mod h1:cE04zhBMvwMrCLhsx6aLYh9cGsU9GyFB0oo/GmO+SkY= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -138,10 +156,14 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/ericlagergren/decimal v0.0.0-20181231230500-73749d4874d5/go.mod h1:1yj25TwtUlJ+pfOu9apAVaM1RWfZGg+aFpd4hPQZekQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fclairamb/ftpserverlib v0.16.0 h1:uATwg14csmYcYaf3n7G16FCzuqYYI28PjBL4Jsk+yXo= github.com/fclairamb/ftpserverlib v0.16.0/go.mod h1:+Doq95UijHTIaJcWREhyu9dyQOqyoULbVU3OXgs8wEI= github.com/fclairamb/go-log v0.1.0 h1:fNoqk8w62i4EDEuRzDgHdDVTqMYSyr3DS981R7F2x/Y= @@ -190,6 +212,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -256,6 +279,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -267,16 +291,23 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -286,18 +317,23 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -309,8 +345,10 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -346,6 +384,7 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.1-0.20191011153232-f91d3411e481/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -358,11 +397,14 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -371,6 +413,7 @@ github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -384,13 +427,15 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= @@ -409,6 +454,7 @@ github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWEr github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= @@ -416,8 +462,8 @@ github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1 h1:VGcrWe3yk6o+t7BdVNy5UDPWa4OZuDWtE1W1ZbS7Kyw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4/v4 v4.1.11 h1:LVs17FAZJFOjgmJXl9Tf13WfLUvZq7/RjfEJrnwZ9OE= -github.com/pierrec/lz4/v4 v4.1.11/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= +github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -431,6 +477,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -440,11 +487,13 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -456,8 +505,11 @@ github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rubenv/sql-migrate v0.0.0-20211023115951-9f02b1e13857 h1:nI2V0EI64bEYpbyOmwYfk0DYu26j0k4LhC7YS4tKkhA= github.com/rubenv/sql-migrate v0.0.0-20211023115951-9f02b1e13857/go.mod h1:HFLT6i9iR4QBOF5rdCyjddC9t59ArqWJV2xx+jwcCMo= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo= github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY= @@ -474,6 +526,7 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -481,8 +534,9 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -491,8 +545,10 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -508,6 +564,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/volatiletech/inflect v0.0.1 h1:2a6FcMQyhmPZcLa+uet3VJ8gLn/9svWhJxJYwvE8KsU= @@ -531,8 +588,11 @@ github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -604,6 +664,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -646,7 +707,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -663,6 +726,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -727,6 +792,7 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -741,10 +807,16 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1 h1:kwrAHlwJ0DUBZwQ238v+Uod/3eZ8B2K5rYsUHBQvzmI= -golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= @@ -756,8 +828,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -853,7 +926,13 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -913,6 +992,17 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -939,6 +1029,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -966,8 +1059,9 @@ gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= diff --git a/internal/db/sqlite/migrations/metadata/migrations.go b/internal/db/sqlite/migrations/metadata/migrations.go new file mode 100644 index 0000000..f3a427e --- /dev/null +++ b/internal/db/sqlite/migrations/metadata/migrations.go @@ -0,0 +1,115 @@ +package metadata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "strings" +) + +func bindata_read(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + return buf.Bytes(), nil +} + +var _db_sqlite_migrations_metadata_1637447083_sql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x56\xcd\x72\xdb\x36\x10\x3e\xd7\x4f\xb1\x93\x4b\xec\xa9\xa8\x73\xa7\x99\x1e\xdc\xb8\x71\x3d\x13\x3b\x19\xd9\xaa\x73\x34\x48\x2c\x49\x54\x20\x16\x5d\x80\x92\x99\xa7\xef\x2c\x40\x2a\xb2\x14\xc9\xe9\xf4\x24\x0a\xd8\xfd\xf6\xff\x5b\x14\x05\xfc\xdc\x99\x86\x55\x44\x58\xfa\xb3\x8a\x51\xbe\xa2\x2a\x2d\x42\x8b\x4a\x23\x07\x38\x3f\x03\x00\x28\x0a\x58\x60\x45\xac\x81\x6a\x88\xad\x09\xe3\x3d\x90\x83\xd8\x8a\x8e\xc7\x24\xc8\x59\xca\xb8\x88\x0d\x32\x38\x8a\xe0\x7a\x6b\x67\xdf\x43\x41\xb0\x2a\x44\xe8\xbd\x16\xb3\x13\xe0\x69\x7c\xd1\x58\x39\xda\xb8\xd7\x0c\xfd\x6e\xa9\x5a\xed\xa3\x99\x8c\x96\x75\x93\x64\x99\xc4\x7e\x04\xe5\x47\xbc\x3d\xc4\xdf\xfa\xfb\x8a\xa1\x9b\x1a\x02\xc6\x59\x52\x1f\xc1\x5a\x15\xa0\x44\x74\xa0\xd1\x62\x44\x9d\x72\xa1\x3c\xce\xa0\xec\x23\x3c\xed\x65\xe2\x09\x94\xd3\x3b\xa7\xc9\xde\x13\x28\x46\x08\xd1\x58\x2b\xae\x32\x5a\x5c\x2b\x57\xe5\x54\x4e\xb0\x47\x7d\x7a\x18\x3c\xd6\x56\x35\x60\x42\x2e\xc2\xe0\x51\x60\x46\xff\xd0\x45\x1e\xe6\x5b\xe1\x16\xe1\x2b\x32\xc1\x5a\xd9\x1e\x45\x45\xf5\x91\x3a\x15\x4d\xa5\xac\x1d\xc0\x33\x75\x24\xe6\x22\x01\x9a\xd8\x22\x27\xfc\x05\x36\x40\xf9\xf3\xca\xf0\x04\xa6\xd1\xa3\xd3\xc6\x35\x53\xfd\x3d\x63\x40\x57\x25\xf3\x0a\x22\x2b\x63\xe5\x36\x58\x15\x5a\xc9\xfa\x9d\xea\x30\xbb\x12\xb7\x4e\x1f\x0b\x4b\x64\x05\xa7\x36\x16\x73\x10\xe9\xc6\xc9\x71\xc4\xe7\xb8\xd5\x00\xcf\xa6\x53\x3c\xc0\x0a\x87\x6f\x49\x51\xdc\x60\xcc\xd2\x54\x83\x35\x6e\x05\xe7\x6b\x65\x8d\x86\x7a\x0c\xe4\xa3\x9c\x8d\xdf\xf7\x43\x27\x22\x17\xb9\x19\x8c\x5b\x1d\x9a\xd9\x42\x7f\xa4\x46\x92\x95\x1d\x0b\xe6\x2b\x4a\x64\xe5\x10\x31\x24\x89\xf1\xe4\x48\x54\x9f\x91\x3b\x13\x82\x21\x97\x1a\xa1\x23\x8d\x50\x9a\x98\x55\xd3\xbf\xa3\xaa\xcb\x80\x0c\x37\x57\x12\x0e\x6d\x1c\xe6\x2a\xf4\xe6\x44\x67\x5c\x33\xf5\xfe\x40\xa5\x39\xa5\x92\x8c\x4c\x59\xdb\x31\x73\x22\x1f\xd9\xca\xa1\x4e\x73\x42\xe7\x26\x8f\xea\x07\xe2\x4e\x45\xe9\xc2\xde\x05\x8f\x95\xa9\x0d\xea\x34\x5d\x0e\x1e\xd9\x44\xe4\x79\xfa\xf9\x33\xb7\x32\x53\xef\x74\x80\x5b\xd2\x0f\xa6\xc3\x09\x2b\x52\xc2\x72\xa8\x18\x43\x84\x80\x15\x39\x9d\xb2\x6b\x1a\x47\x8c\x79\x2a\x2e\xab\x0a\x43\x10\xbd\x74\xf5\xbe\x55\xae\xc1\xf4\xb7\x36\x68\x75\x98\x26\x64\xdb\x40\x04\x7d\x78\xa1\x46\xbc\xa3\x35\x83\xec\xef\xb0\x1b\x87\x0a\xf0\xf9\xf2\x8b\x08\x5e\xdf\x2d\xe7\x7b\x48\xa1\x2f\x8b\xd1\x37\xc6\x40\xb6\x8f\x86\xdc\x09\x98\xad\xfe\x2d\x69\x53\x9b\x4a\x89\x3c\xc4\x29\xf0\x8e\xb4\x7c\x43\xa2\xb9\x83\xfc\x66\xb7\x93\x34\x9c\x33\xfe\xd3\x1b\xc9\xc3\x38\xd0\xdf\x7c\x84\xd0\x7b\x4f\x1c\x73\xdf\xab\xa4\x74\x02\x35\x87\xff\x1f\x51\xab\xa4\x74\x02\xf5\x56\xfd\x4d\x0c\x1a\xd7\xa6\x42\x70\x7d\x57\x22\xef\x8f\xea\xfb\x56\xf1\x34\xaa\x89\xeb\x2f\x46\x6a\x5c\x77\x49\xf9\x68\x3b\xdf\x1a\xf7\x3f\xb0\x93\xf2\xf1\x51\xbe\xfc\x92\xd7\x64\x48\x34\x0a\x9d\xf2\x32\x00\x92\x07\x7c\x8e\xe8\x34\xea\x89\x83\x33\xf7\x1f\xf4\x98\x0c\x5b\xa1\xb1\x36\x0e\xf5\x24\x03\xa1\xa5\xde\x6a\x68\xd5\x1a\x85\xd1\xc2\xb4\xd6\x6a\xb2\x96\x36\x42\xa7\x35\x71\xf7\xeb\x88\xf1\xd3\x5f\x7f\xdc\x5d\x7d\x5a\xcc\x57\x38\x6c\xa6\x65\x56\x14\xf0\xd8\x22\x23\xe4\x3b\xf1\x2e\x50\x87\x69\x44\x83\x57\x55\x22\x2c\x65\x2d\xf4\xde\x23\x57\x2a\xe0\x2c\xcd\xc4\x88\x01\x9d\x1a\x26\x1c\x09\xba\x22\x17\xd5\xb8\x32\xdf\xfe\xf6\x56\x0a\xca\xaa\x8a\x92\x49\x9c\x37\xf3\x19\xbc\xb9\xfe\xf4\xf1\xf2\xee\x7a\xee\x57\xcd\x7c\x8d\x2c\xe4\xf6\xe6\xe2\xc5\xc6\x59\xe1\x90\x2c\xe4\xa5\x33\x46\x58\x4a\x2f\xb8\x02\x3b\x1f\x07\x58\x3e\x7c\x28\x7e\x81\x10\xd9\xb8\xe6\x20\x4f\x8f\x47\xf8\xc0\x04\x90\xad\x25\x94\x21\x59\x9f\x32\xa8\x91\xcd\x1a\x35\xd4\x4c\x9d\x78\x3d\xc1\x50\xea\xd3\x3c\xee\x92\x82\x11\x25\xaa\x55\xda\x5c\x15\xea\xbc\xbb\xd6\xb9\x9b\x17\xbb\x45\xf3\xea\x79\x82\xff\x3e\xa3\x8d\xb3\x3b\xb1\x58\x18\x6b\x96\x0e\xc7\x0a\x46\xc5\x63\x43\x1c\x90\x8d\x3c\x4e\xa4\x4c\x18\xa1\x1c\x60\x91\x85\xee\xc4\x90\x92\xd6\x2a\x31\xc4\x02\xeb\x9a\x38\x42\xd3\xcb\x64\xab\xb8\x63\x60\x9b\xeb\x7b\x23\x01\xc8\x45\x86\x00\x6b\x4a\xe4\xb4\xd8\x19\x95\x9e\xfa\x80\x5c\x51\x51\xe7\xad\x51\x2e\xa6\x35\x16\xb6\x61\x98\xc4\xc6\x9e\x42\x30\xf2\xbc\x94\x21\x49\x2f\xa7\x48\x52\xaf\x1c\xe4\xd2\xa5\xa7\xcb\x7e\x10\x37\xf5\x6e\xcc\x2f\x39\x1d\x36\xaf\x96\x70\x4b\xe7\x22\x69\xa2\x70\xe6\x98\x44\xc3\x21\x4e\xb0\xe7\x63\x23\x12\x8f\xef\xba\xe5\xfd\xc3\xe5\x22\xd5\x7f\x26\xdc\x73\x31\xc1\x54\xca\xa7\x07\x32\xd5\x80\xae\xa2\xf4\x4a\x49\x91\x8c\x86\xcf\x03\x4e\xf1\x8c\xbd\x3a\x39\xbe\x37\xef\x67\x17\xef\xce\x76\x5f\xe0\x57\xb4\x71\x67\x9a\xc9\xbf\x7c\x81\xbf\xfb\x37\x00\x00\xff\xff\x7a\x41\x6c\x96\xa6\x0b\x00\x00") + +func db_sqlite_migrations_metadata_1637447083_sql() ([]byte, error) { + return bindata_read( + _db_sqlite_migrations_metadata_1637447083_sql, + "../../db/sqlite/migrations/metadata/1637447083.sql", + ) +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + return f() + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() ([]byte, error){ + "../../db/sqlite/migrations/metadata/1637447083.sql": db_sqlite_migrations_metadata_1637447083_sql, +} +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for name := range node.Children { + rv = append(rv, name) + } + return rv, nil +} + +type _bintree_t struct { + Func func() ([]byte, error) + Children map[string]*_bintree_t +} +var _bintree = &_bintree_t{nil, map[string]*_bintree_t{ + "..": &_bintree_t{nil, map[string]*_bintree_t{ + "..": &_bintree_t{nil, map[string]*_bintree_t{ + "db": &_bintree_t{nil, map[string]*_bintree_t{ + "sqlite": &_bintree_t{nil, map[string]*_bintree_t{ + "migrations": &_bintree_t{nil, map[string]*_bintree_t{ + "metadata": &_bintree_t{nil, map[string]*_bintree_t{ + "1637447083.sql": &_bintree_t{db_sqlite_migrations_metadata_1637447083_sql, map[string]*_bintree_t{ + }}, + }}, + }}, + }}, + }}, + }}, + }}, +}} diff --git a/internal/db/sqlite/models/metadata/boil_main_test.go b/internal/db/sqlite/models/metadata/boil_main_test.go new file mode 100644 index 0000000..361c0f2 --- /dev/null +++ b/internal/db/sqlite/models/metadata/boil_main_test.go @@ -0,0 +1,119 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "database/sql" + "flag" + "fmt" + "math/rand" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/spf13/viper" + "github.com/volatiletech/sqlboiler/v4/boil" +) + +var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements") +var flagConfigFile = flag.String("test.config", "", "Overrides the default config") + +const outputDirDepth = 5 + +var ( + dbMain tester +) + +type tester interface { + setup() error + conn() (*sql.DB, error) + teardown() error +} + +func TestMain(m *testing.M) { + if dbMain == nil { + fmt.Println("no dbMain tester interface was ready") + os.Exit(-1) + } + + rand.Seed(time.Now().UnixNano()) + + flag.Parse() + + var err error + + // Load configuration + err = initViper() + if err != nil { + fmt.Println("unable to load config file") + os.Exit(-2) + } + + // Set DebugMode so we can see generated sql statements + boil.DebugMode = *flagDebugMode + + if err = dbMain.setup(); err != nil { + fmt.Println("Unable to execute setup:", err) + os.Exit(-4) + } + + conn, err := dbMain.conn() + if err != nil { + fmt.Println("failed to get connection:", err) + } + + var code int + boil.SetDB(conn) + code = m.Run() + + if err = dbMain.teardown(); err != nil { + fmt.Println("Unable to execute teardown:", err) + os.Exit(-5) + } + + os.Exit(code) +} + +func initViper() error { + if flagConfigFile != nil && *flagConfigFile != "" { + viper.SetConfigFile(*flagConfigFile) + if err := viper.ReadInConfig(); err != nil { + return err + } + return nil + } + + var err error + + viper.SetConfigName("sqlboiler") + + configHome := os.Getenv("XDG_CONFIG_HOME") + homePath := os.Getenv("HOME") + wd, err := os.Getwd() + if err != nil { + wd = strings.Repeat("../", outputDirDepth) + } else { + wd = wd + strings.Repeat("/..", outputDirDepth) + } + + configPaths := []string{wd} + if len(configHome) > 0 { + configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler")) + } else { + configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler")) + } + + for _, p := range configPaths { + viper.AddConfigPath(p) + } + + // Ignore errors here, fall back to defaults and validation to provide errs + _ = viper.ReadInConfig() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.AutomaticEnv() + + return nil +} diff --git a/internal/db/sqlite/models/metadata/boil_queries.go b/internal/db/sqlite/models/metadata/boil_queries.go new file mode 100644 index 0000000..c636a35 --- /dev/null +++ b/internal/db/sqlite/models/metadata/boil_queries.go @@ -0,0 +1,33 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "github.com/volatiletech/sqlboiler/v4/drivers" + "github.com/volatiletech/sqlboiler/v4/queries" + "github.com/volatiletech/sqlboiler/v4/queries/qm" +) + +var dialect = drivers.Dialect{ + LQ: 0x22, + RQ: 0x22, + + UseIndexPlaceholders: false, + UseLastInsertID: false, + UseSchema: false, + UseDefaultKeyword: true, + UseAutoColumns: false, + UseTopClause: false, + UseOutputClause: false, + UseCaseWhenExistsClause: false, +} + +// NewQuery initializes a new Query using the passed in QueryMods +func NewQuery(mods ...qm.QueryMod) *queries.Query { + q := &queries.Query{} + queries.SetDialect(q, &dialect) + qm.Apply(q, mods...) + + return q +} diff --git a/internal/db/sqlite/models/metadata/boil_queries_test.go b/internal/db/sqlite/models/metadata/boil_queries_test.go new file mode 100644 index 0000000..9b856e3 --- /dev/null +++ b/internal/db/sqlite/models/metadata/boil_queries_test.go @@ -0,0 +1,52 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "regexp" + + "github.com/volatiletech/sqlboiler/v4/boil" +) + +var dbNameRand *rand.Rand + +func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor { + if err != nil { + panic(fmt.Sprintf("Cannot create a transactor: %s", err)) + } + return transactor +} + +func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader { + return &fKeyDestroyer{ + reader: reader, + rgx: regex, + } +} + +type fKeyDestroyer struct { + reader io.Reader + buf *bytes.Buffer + rgx *regexp.Regexp +} + +func (f *fKeyDestroyer) Read(b []byte) (int, error) { + if f.buf == nil { + all, err := ioutil.ReadAll(f.reader) + if err != nil { + return 0, err + } + + all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1) + all = f.rgx.ReplaceAll(all, []byte{}) + f.buf = bytes.NewBuffer(all) + } + + return f.buf.Read(b) +} diff --git a/internal/db/sqlite/models/metadata/boil_suites_test.go b/internal/db/sqlite/models/metadata/boil_suites_test.go new file mode 100644 index 0000000..77db085 --- /dev/null +++ b/internal/db/sqlite/models/metadata/boil_suites_test.go @@ -0,0 +1,139 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import "testing" + +// This test suite runs each operation test in parallel. +// Example, if your database has 3 tables, the suite will run: +// table1, table2 and table3 Delete in parallel +// table1, table2 and table3 Insert in parallel, and so forth. +// It does NOT run each operation group in parallel. +// Separating the tests thusly grants avoidance of Postgres deadlocks. +func TestParent(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrations) + t.Run("Headers", testHeaders) +} + +func TestDelete(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsDelete) + t.Run("Headers", testHeadersDelete) +} + +func TestQueryDeleteAll(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsQueryDeleteAll) + t.Run("Headers", testHeadersQueryDeleteAll) +} + +func TestSliceDeleteAll(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsSliceDeleteAll) + t.Run("Headers", testHeadersSliceDeleteAll) +} + +func TestExists(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsExists) + t.Run("Headers", testHeadersExists) +} + +func TestFind(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsFind) + t.Run("Headers", testHeadersFind) +} + +func TestBind(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsBind) + t.Run("Headers", testHeadersBind) +} + +func TestOne(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsOne) + t.Run("Headers", testHeadersOne) +} + +func TestAll(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsAll) + t.Run("Headers", testHeadersAll) +} + +func TestCount(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsCount) + t.Run("Headers", testHeadersCount) +} + +func TestHooks(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsHooks) + t.Run("Headers", testHeadersHooks) +} + +func TestInsert(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsInsert) + t.Run("GorpMigrations", testGorpMigrationsInsertWhitelist) + t.Run("Headers", testHeadersInsert) + t.Run("Headers", testHeadersInsertWhitelist) +} + +// TestToOne tests cannot be run in parallel +// or deadlocks can occur. +func TestToOne(t *testing.T) {} + +// TestOneToOne tests cannot be run in parallel +// or deadlocks can occur. +func TestOneToOne(t *testing.T) {} + +// TestToMany tests cannot be run in parallel +// or deadlocks can occur. +func TestToMany(t *testing.T) {} + +// TestToOneSet tests cannot be run in parallel +// or deadlocks can occur. +func TestToOneSet(t *testing.T) {} + +// TestToOneRemove tests cannot be run in parallel +// or deadlocks can occur. +func TestToOneRemove(t *testing.T) {} + +// TestOneToOneSet tests cannot be run in parallel +// or deadlocks can occur. +func TestOneToOneSet(t *testing.T) {} + +// TestOneToOneRemove tests cannot be run in parallel +// or deadlocks can occur. +func TestOneToOneRemove(t *testing.T) {} + +// TestToManyAdd tests cannot be run in parallel +// or deadlocks can occur. +func TestToManyAdd(t *testing.T) {} + +// TestToManySet tests cannot be run in parallel +// or deadlocks can occur. +func TestToManySet(t *testing.T) {} + +// TestToManyRemove tests cannot be run in parallel +// or deadlocks can occur. +func TestToManyRemove(t *testing.T) {} + +func TestReload(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsReload) + t.Run("Headers", testHeadersReload) +} + +func TestReloadAll(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsReloadAll) + t.Run("Headers", testHeadersReloadAll) +} + +func TestSelect(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsSelect) + t.Run("Headers", testHeadersSelect) +} + +func TestUpdate(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsUpdate) + t.Run("Headers", testHeadersUpdate) +} + +func TestSliceUpdateAll(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsSliceUpdateAll) + t.Run("Headers", testHeadersSliceUpdateAll) +} diff --git a/internal/db/sqlite/models/metadata/boil_table_names.go b/internal/db/sqlite/models/metadata/boil_table_names.go new file mode 100644 index 0000000..c99ef26 --- /dev/null +++ b/internal/db/sqlite/models/metadata/boil_table_names.go @@ -0,0 +1,12 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +var TableNames = struct { + GorpMigrations string + Headers string +}{ + GorpMigrations: "gorp_migrations", + Headers: "headers", +} diff --git a/internal/db/sqlite/models/metadata/boil_types.go b/internal/db/sqlite/models/metadata/boil_types.go new file mode 100644 index 0000000..a55f017 --- /dev/null +++ b/internal/db/sqlite/models/metadata/boil_types.go @@ -0,0 +1,52 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "strconv" + + "github.com/friendsofgo/errors" + "github.com/volatiletech/sqlboiler/v4/boil" + "github.com/volatiletech/strmangle" +) + +// M type is for providing columns and column values to UpdateAll. +type M map[string]interface{} + +// ErrSyncFail occurs during insert when the record could not be retrieved in +// order to populate default value information. This usually happens when LastInsertId +// fails or there was a primary key configuration that was not resolvable. +var ErrSyncFail = errors.New("models: failed to synchronize data after insert") + +type insertCache struct { + query string + retQuery string + valueMapping []uint64 + retMapping []uint64 +} + +type updateCache struct { + query string + valueMapping []uint64 +} + +func makeCacheKey(cols boil.Columns, nzDefaults []string) string { + buf := strmangle.GetBuffer() + + buf.WriteString(strconv.Itoa(cols.Kind)) + for _, w := range cols.Cols { + buf.WriteString(w) + } + + if len(nzDefaults) != 0 { + buf.WriteByte('.') + } + for _, nz := range nzDefaults { + buf.WriteString(nz) + } + + str := buf.String() + strmangle.PutBuffer(buf) + return str +} diff --git a/internal/db/sqlite/models/metadata/gorp_migrations.go b/internal/db/sqlite/models/metadata/gorp_migrations.go new file mode 100644 index 0000000..8fc53f4 --- /dev/null +++ b/internal/db/sqlite/models/metadata/gorp_migrations.go @@ -0,0 +1,928 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/friendsofgo/errors" + "github.com/volatiletech/null/v8" + "github.com/volatiletech/sqlboiler/v4/boil" + "github.com/volatiletech/sqlboiler/v4/queries" + "github.com/volatiletech/sqlboiler/v4/queries/qm" + "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" + "github.com/volatiletech/strmangle" +) + +// GorpMigration is an object representing the database table. +type GorpMigration struct { + ID string `boil:"id" json:"id" toml:"id" yaml:"id"` + AppliedAt null.Time `boil:"applied_at" json:"applied_at,omitempty" toml:"applied_at" yaml:"applied_at,omitempty"` + + R *gorpMigrationR `boil:"-" json:"-" toml:"-" yaml:"-"` + L gorpMigrationL `boil:"-" json:"-" toml:"-" yaml:"-"` +} + +var GorpMigrationColumns = struct { + ID string + AppliedAt string +}{ + ID: "id", + AppliedAt: "applied_at", +} + +var GorpMigrationTableColumns = struct { + ID string + AppliedAt string +}{ + ID: "gorp_migrations.id", + AppliedAt: "gorp_migrations.applied_at", +} + +// Generated where + +type whereHelperstring struct{ field string } + +func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } +func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } +func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } +func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } +func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } +func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } +func (w whereHelperstring) IN(slice []string) qm.QueryMod { + values := make([]interface{}, 0, len(slice)) + for _, value := range slice { + values = append(values, value) + } + return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) +} +func (w whereHelperstring) NIN(slice []string) qm.QueryMod { + values := make([]interface{}, 0, len(slice)) + for _, value := range slice { + values = append(values, value) + } + return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) +} + +type whereHelpernull_Time struct{ field string } + +func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod { + return qmhelper.WhereNullEQ(w.field, false, x) +} +func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod { + return qmhelper.WhereNullEQ(w.field, true, x) +} +func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LT, x) +} +func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LTE, x) +} +func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GT, x) +} +func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GTE, x) +} + +func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } +func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } + +var GorpMigrationWhere = struct { + ID whereHelperstring + AppliedAt whereHelpernull_Time +}{ + ID: whereHelperstring{field: "\"gorp_migrations\".\"id\""}, + AppliedAt: whereHelpernull_Time{field: "\"gorp_migrations\".\"applied_at\""}, +} + +// GorpMigrationRels is where relationship names are stored. +var GorpMigrationRels = struct { +}{} + +// gorpMigrationR is where relationships are stored. +type gorpMigrationR struct { +} + +// NewStruct creates a new relationship struct +func (*gorpMigrationR) NewStruct() *gorpMigrationR { + return &gorpMigrationR{} +} + +// gorpMigrationL is where Load methods for each relationship are stored. +type gorpMigrationL struct{} + +var ( + gorpMigrationAllColumns = []string{"id", "applied_at"} + gorpMigrationColumnsWithoutDefault = []string{"id", "applied_at"} + gorpMigrationColumnsWithDefault = []string{} + gorpMigrationPrimaryKeyColumns = []string{"id"} +) + +type ( + // GorpMigrationSlice is an alias for a slice of pointers to GorpMigration. + // This should almost always be used instead of []GorpMigration. + GorpMigrationSlice []*GorpMigration + // GorpMigrationHook is the signature for custom GorpMigration hook methods + GorpMigrationHook func(context.Context, boil.ContextExecutor, *GorpMigration) error + + gorpMigrationQuery struct { + *queries.Query + } +) + +// Cache for insert, update and upsert +var ( + gorpMigrationType = reflect.TypeOf(&GorpMigration{}) + gorpMigrationMapping = queries.MakeStructMapping(gorpMigrationType) + gorpMigrationPrimaryKeyMapping, _ = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, gorpMigrationPrimaryKeyColumns) + gorpMigrationInsertCacheMut sync.RWMutex + gorpMigrationInsertCache = make(map[string]insertCache) + gorpMigrationUpdateCacheMut sync.RWMutex + gorpMigrationUpdateCache = make(map[string]updateCache) + gorpMigrationUpsertCacheMut sync.RWMutex + gorpMigrationUpsertCache = make(map[string]insertCache) +) + +var ( + // Force time package dependency for automated UpdatedAt/CreatedAt. + _ = time.Second + // Force qmhelper dependency for where clause generation (which doesn't + // always happen) + _ = qmhelper.Where +) + +var gorpMigrationBeforeInsertHooks []GorpMigrationHook +var gorpMigrationBeforeUpdateHooks []GorpMigrationHook +var gorpMigrationBeforeDeleteHooks []GorpMigrationHook +var gorpMigrationBeforeUpsertHooks []GorpMigrationHook + +var gorpMigrationAfterInsertHooks []GorpMigrationHook +var gorpMigrationAfterSelectHooks []GorpMigrationHook +var gorpMigrationAfterUpdateHooks []GorpMigrationHook +var gorpMigrationAfterDeleteHooks []GorpMigrationHook +var gorpMigrationAfterUpsertHooks []GorpMigrationHook + +// doBeforeInsertHooks executes all "before insert" hooks. +func (o *GorpMigration) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationBeforeInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpdateHooks executes all "before Update" hooks. +func (o *GorpMigration) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationBeforeUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeDeleteHooks executes all "before Delete" hooks. +func (o *GorpMigration) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationBeforeDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpsertHooks executes all "before Upsert" hooks. +func (o *GorpMigration) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationBeforeUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterInsertHooks executes all "after Insert" hooks. +func (o *GorpMigration) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationAfterInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterSelectHooks executes all "after Select" hooks. +func (o *GorpMigration) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationAfterSelectHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpdateHooks executes all "after Update" hooks. +func (o *GorpMigration) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationAfterUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterDeleteHooks executes all "after Delete" hooks. +func (o *GorpMigration) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationAfterDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpsertHooks executes all "after Upsert" hooks. +func (o *GorpMigration) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range gorpMigrationAfterUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// AddGorpMigrationHook registers your hook function for all future operations. +func AddGorpMigrationHook(hookPoint boil.HookPoint, gorpMigrationHook GorpMigrationHook) { + switch hookPoint { + case boil.BeforeInsertHook: + gorpMigrationBeforeInsertHooks = append(gorpMigrationBeforeInsertHooks, gorpMigrationHook) + case boil.BeforeUpdateHook: + gorpMigrationBeforeUpdateHooks = append(gorpMigrationBeforeUpdateHooks, gorpMigrationHook) + case boil.BeforeDeleteHook: + gorpMigrationBeforeDeleteHooks = append(gorpMigrationBeforeDeleteHooks, gorpMigrationHook) + case boil.BeforeUpsertHook: + gorpMigrationBeforeUpsertHooks = append(gorpMigrationBeforeUpsertHooks, gorpMigrationHook) + case boil.AfterInsertHook: + gorpMigrationAfterInsertHooks = append(gorpMigrationAfterInsertHooks, gorpMigrationHook) + case boil.AfterSelectHook: + gorpMigrationAfterSelectHooks = append(gorpMigrationAfterSelectHooks, gorpMigrationHook) + case boil.AfterUpdateHook: + gorpMigrationAfterUpdateHooks = append(gorpMigrationAfterUpdateHooks, gorpMigrationHook) + case boil.AfterDeleteHook: + gorpMigrationAfterDeleteHooks = append(gorpMigrationAfterDeleteHooks, gorpMigrationHook) + case boil.AfterUpsertHook: + gorpMigrationAfterUpsertHooks = append(gorpMigrationAfterUpsertHooks, gorpMigrationHook) + } +} + +// One returns a single gorpMigration record from the query. +func (q gorpMigrationQuery) One(ctx context.Context, exec boil.ContextExecutor) (*GorpMigration, error) { + o := &GorpMigration{} + + queries.SetLimit(q.Query, 1) + + err := q.Bind(ctx, exec, o) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "models: failed to execute a one query for gorp_migrations") + } + + if err := o.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + + return o, nil +} + +// All returns all GorpMigration records from the query. +func (q gorpMigrationQuery) All(ctx context.Context, exec boil.ContextExecutor) (GorpMigrationSlice, error) { + var o []*GorpMigration + + err := q.Bind(ctx, exec, &o) + if err != nil { + return nil, errors.Wrap(err, "models: failed to assign all query results to GorpMigration slice") + } + + if len(gorpMigrationAfterSelectHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + } + } + + return o, nil +} + +// Count returns the count of all GorpMigration records in the query. +func (q gorpMigrationQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return 0, errors.Wrap(err, "models: failed to count gorp_migrations rows") + } + + return count, nil +} + +// Exists checks if the row exists in the table. +func (q gorpMigrationQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + queries.SetLimit(q.Query, 1) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return false, errors.Wrap(err, "models: failed to check if gorp_migrations exists") + } + + return count > 0, nil +} + +// GorpMigrations retrieves all the records using an executor. +func GorpMigrations(mods ...qm.QueryMod) gorpMigrationQuery { + mods = append(mods, qm.From("\"gorp_migrations\"")) + return gorpMigrationQuery{NewQuery(mods...)} +} + +// FindGorpMigration retrieves a single record by ID with an executor. +// If selectCols is empty Find will return all columns. +func FindGorpMigration(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*GorpMigration, error) { + gorpMigrationObj := &GorpMigration{} + + sel := "*" + if len(selectCols) > 0 { + sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") + } + query := fmt.Sprintf( + "select %s from \"gorp_migrations\" where \"id\"=?", sel, + ) + + q := queries.Raw(query, iD) + + err := q.Bind(ctx, exec, gorpMigrationObj) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "models: unable to select from gorp_migrations") + } + + if err = gorpMigrationObj.doAfterSelectHooks(ctx, exec); err != nil { + return gorpMigrationObj, err + } + + return gorpMigrationObj, nil +} + +// Insert a single record using an executor. +// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. +func (o *GorpMigration) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { + if o == nil { + return errors.New("models: no gorp_migrations provided for insertion") + } + + var err error + + if err := o.doBeforeInsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(gorpMigrationColumnsWithDefault, o) + + key := makeCacheKey(columns, nzDefaults) + gorpMigrationInsertCacheMut.RLock() + cache, cached := gorpMigrationInsertCache[key] + gorpMigrationInsertCacheMut.RUnlock() + + if !cached { + wl, returnColumns := columns.InsertColumnSet( + gorpMigrationAllColumns, + gorpMigrationColumnsWithDefault, + gorpMigrationColumnsWithoutDefault, + nzDefaults, + ) + + cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, wl) + if err != nil { + return err + } + cache.retMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, returnColumns) + if err != nil { + return err + } + if len(wl) != 0 { + cache.query = fmt.Sprintf("INSERT INTO \"gorp_migrations\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) + } else { + cache.query = "INSERT INTO \"gorp_migrations\" %sDEFAULT VALUES%s" + } + + var queryOutput, queryReturning string + + if len(cache.retMapping) != 0 { + queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) + } + + cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, cache.query) + fmt.Fprintln(writer, vals) + } + + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + + if err != nil { + return errors.Wrap(err, "models: unable to insert into gorp_migrations") + } + + if !cached { + gorpMigrationInsertCacheMut.Lock() + gorpMigrationInsertCache[key] = cache + gorpMigrationInsertCacheMut.Unlock() + } + + return o.doAfterInsertHooks(ctx, exec) +} + +// Update uses an executor to update the GorpMigration. +// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. +// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. +func (o *GorpMigration) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { + var err error + if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { + return 0, err + } + key := makeCacheKey(columns, nil) + gorpMigrationUpdateCacheMut.RLock() + cache, cached := gorpMigrationUpdateCache[key] + gorpMigrationUpdateCacheMut.RUnlock() + + if !cached { + wl := columns.UpdateColumnSet( + gorpMigrationAllColumns, + gorpMigrationPrimaryKeyColumns, + ) + + if !columns.IsWhitelist() { + wl = strmangle.SetComplement(wl, []string{"created_at"}) + } + if len(wl) == 0 { + return 0, errors.New("models: unable to update gorp_migrations, could not build whitelist") + } + + cache.query = fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 0, wl), + strmangle.WhereClause("\"", "\"", 0, gorpMigrationPrimaryKeyColumns), + ) + cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, append(wl, gorpMigrationPrimaryKeyColumns...)) + if err != nil { + return 0, err + } + } + + values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, cache.query) + fmt.Fprintln(writer, values) + } + var result sql.Result + result, err = exec.ExecContext(ctx, cache.query, values...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to update gorp_migrations row") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by update for gorp_migrations") + } + + if !cached { + gorpMigrationUpdateCacheMut.Lock() + gorpMigrationUpdateCache[key] = cache + gorpMigrationUpdateCacheMut.Unlock() + } + + return rowsAff, o.doAfterUpdateHooks(ctx, exec) +} + +// UpdateAll updates all rows with the specified column values. +func (q gorpMigrationQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + queries.SetUpdate(q.Query, cols) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "models: unable to update all for gorp_migrations") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: unable to retrieve rows affected for gorp_migrations") + } + + return rowsAff, nil +} + +// UpdateAll updates all rows with the specified column values, using an executor. +func (o GorpMigrationSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + ln := int64(len(o)) + if ln == 0 { + return 0, nil + } + + if len(cols) == 0 { + return 0, errors.New("models: update all requires at least one column argument") + } + + colNames := make([]string, len(cols)) + args := make([]interface{}, len(cols)) + + i := 0 + for name, value := range cols { + colNames[i] = name + args[i] = value + i++ + } + + // Append all of the primary key values for each column + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := fmt.Sprintf("UPDATE \"gorp_migrations\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 0, colNames), + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o))) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, args...) + } + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to update all in gorpMigration slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all gorpMigration") + } + return rowsAff, nil +} + +// Delete deletes a single GorpMigration record with an executor. +// Delete will match against the primary key column to find the record to delete. +func (o *GorpMigration) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if o == nil { + return 0, errors.New("models: no GorpMigration provided for delete") + } + + if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), gorpMigrationPrimaryKeyMapping) + sql := "DELETE FROM \"gorp_migrations\" WHERE \"id\"=?" + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, args...) + } + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to delete from gorp_migrations") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by delete for gorp_migrations") + } + + if err := o.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + return rowsAff, nil +} + +// DeleteAll deletes all matching rows. +func (q gorpMigrationQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if q.Query == nil { + return 0, errors.New("models: no gorpMigrationQuery provided for delete all") + } + + queries.SetDelete(q.Query) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "models: unable to delete all from gorp_migrations") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations") + } + + return rowsAff, nil +} + +// DeleteAll deletes all rows in the slice, using an executor. +func (o GorpMigrationSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if len(o) == 0 { + return 0, nil + } + + if len(gorpMigrationBeforeDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + var args []interface{} + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "DELETE FROM \"gorp_migrations\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(o)) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, args) + } + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to delete all from gorpMigration slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for gorp_migrations") + } + + if len(gorpMigrationAfterDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + return rowsAff, nil +} + +// Reload refetches the object from the database +// using the primary keys with an executor. +func (o *GorpMigration) Reload(ctx context.Context, exec boil.ContextExecutor) error { + ret, err := FindGorpMigration(ctx, exec, o.ID) + if err != nil { + return err + } + + *o = *ret + return nil +} + +// ReloadAll refetches every row with matching primary key column values +// and overwrites the original object slice with the newly updated slice. +func (o *GorpMigrationSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { + if o == nil || len(*o) == 0 { + return nil + } + + slice := GorpMigrationSlice{} + var args []interface{} + for _, obj := range *o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), gorpMigrationPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "SELECT \"gorp_migrations\".* FROM \"gorp_migrations\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, gorpMigrationPrimaryKeyColumns, len(*o)) + + q := queries.Raw(sql, args...) + + err := q.Bind(ctx, exec, &slice) + if err != nil { + return errors.Wrap(err, "models: unable to reload all in GorpMigrationSlice") + } + + *o = slice + + return nil +} + +// GorpMigrationExists checks if the GorpMigration row exists. +func GorpMigrationExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) { + var exists bool + sql := "select exists(select 1 from \"gorp_migrations\" where \"id\"=? limit 1)" + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, iD) + } + row := exec.QueryRowContext(ctx, sql, iD) + + err := row.Scan(&exists) + if err != nil { + return false, errors.Wrap(err, "models: unable to check if gorp_migrations exists") + } + + return exists, nil +} + +// Upsert attempts an insert using an executor, and does an update or ignore on conflict. +// See boil.Columns documentation for how to properly use updateColumns and insertColumns. +func (o *GorpMigration) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { + if o == nil { + return errors.New("models: no gorp_migrations provided for upsert") + } + + if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(gorpMigrationColumnsWithDefault, o) + + // Build cache key in-line uglily - mysql vs psql problems + buf := strmangle.GetBuffer() + if updateOnConflict { + buf.WriteByte('t') + } else { + buf.WriteByte('f') + } + buf.WriteByte('.') + for _, c := range conflictColumns { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(updateColumns.Kind)) + for _, c := range updateColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(insertColumns.Kind)) + for _, c := range insertColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + for _, c := range nzDefaults { + buf.WriteString(c) + } + key := buf.String() + strmangle.PutBuffer(buf) + + gorpMigrationUpsertCacheMut.RLock() + cache, cached := gorpMigrationUpsertCache[key] + gorpMigrationUpsertCacheMut.RUnlock() + + var err error + + if !cached { + insert, ret := insertColumns.InsertColumnSet( + gorpMigrationAllColumns, + gorpMigrationColumnsWithDefault, + gorpMigrationColumnsWithoutDefault, + nzDefaults, + ) + update := updateColumns.UpdateColumnSet( + gorpMigrationAllColumns, + gorpMigrationPrimaryKeyColumns, + ) + + if updateOnConflict && len(update) == 0 { + return errors.New("models: unable to upsert gorp_migrations, could not build update column list") + } + + conflict := conflictColumns + if len(conflict) == 0 { + conflict = make([]string, len(gorpMigrationPrimaryKeyColumns)) + copy(conflict, gorpMigrationPrimaryKeyColumns) + } + cache.query = buildUpsertQuerySQLite(dialect, "\"gorp_migrations\"", updateOnConflict, ret, update, conflict, insert) + + cache.valueMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, insert) + if err != nil { + return err + } + if len(ret) != 0 { + cache.retMapping, err = queries.BindMapping(gorpMigrationType, gorpMigrationMapping, ret) + if err != nil { + return err + } + } + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + var returns []interface{} + if len(cache.retMapping) != 0 { + returns = queries.PtrsFromMapping(value, cache.retMapping) + } + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, cache.query) + fmt.Fprintln(writer, vals) + } + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) + if err == sql.ErrNoRows { + err = nil // Postgres doesn't return anything when there's no update + } + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + if err != nil { + return errors.Wrap(err, "models: unable to upsert gorp_migrations") + } + + if !cached { + gorpMigrationUpsertCacheMut.Lock() + gorpMigrationUpsertCache[key] = cache + gorpMigrationUpsertCacheMut.Unlock() + } + + return o.doAfterUpsertHooks(ctx, exec) +} diff --git a/internal/db/sqlite/models/metadata/gorp_migrations_test.go b/internal/db/sqlite/models/metadata/gorp_migrations_test.go new file mode 100644 index 0000000..7937fe2 --- /dev/null +++ b/internal/db/sqlite/models/metadata/gorp_migrations_test.go @@ -0,0 +1,731 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "bytes" + "context" + "reflect" + "testing" + + "github.com/volatiletech/randomize" + "github.com/volatiletech/sqlboiler/v4/boil" + "github.com/volatiletech/sqlboiler/v4/queries" + "github.com/volatiletech/strmangle" +) + +func testGorpMigrationsUpsert(t *testing.T) { + t.Parallel() + if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + // Attempt the INSERT side of an UPSERT + o := GorpMigration{} + if err = randomize.Struct(seed, &o, gorpMigrationDBTypes, true); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert GorpMigration: %s", err) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } + + // Attempt the UPDATE side of an UPSERT + if err = randomize.Struct(seed, &o, gorpMigrationDBTypes, false, gorpMigrationPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert GorpMigration: %s", err) + } + + count, err = GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } +} + +var ( + // Relationships sometimes use the reflection helper queries.Equal/queries.Assign + // so force a package dependency in case they don't. + _ = queries.Equal +) + +func testGorpMigrations(t *testing.T) { + t.Parallel() + + query := GorpMigrations() + + if query.Query == nil { + t.Error("expected a query, got nothing") + } +} + +func testGorpMigrationsDelete(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := o.Delete(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testGorpMigrationsQueryDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := GorpMigrations().DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testGorpMigrationsSliceDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := GorpMigrationSlice{o} + + if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testGorpMigrationsExists(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + e, err := GorpMigrationExists(ctx, tx, o.ID) + if err != nil { + t.Errorf("Unable to check if GorpMigration exists: %s", err) + } + if !e { + t.Errorf("Expected GorpMigrationExists to return true, but got false.") + } +} + +func testGorpMigrationsFind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + gorpMigrationFound, err := FindGorpMigration(ctx, tx, o.ID) + if err != nil { + t.Error(err) + } + + if gorpMigrationFound == nil { + t.Error("want a record, got nil") + } +} + +func testGorpMigrationsBind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = GorpMigrations().Bind(ctx, tx, o); err != nil { + t.Error(err) + } +} + +func testGorpMigrationsOne(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if x, err := GorpMigrations().One(ctx, tx); err != nil { + t.Error(err) + } else if x == nil { + t.Error("expected to get a non nil record") + } +} + +func testGorpMigrationsAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + gorpMigrationOne := &GorpMigration{} + gorpMigrationTwo := &GorpMigration{} + if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := GorpMigrations().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 2 { + t.Error("want 2 records, got:", len(slice)) + } +} + +func testGorpMigrationsCount(t *testing.T) { + t.Parallel() + + var err error + seed := randomize.NewSeed() + gorpMigrationOne := &GorpMigration{} + gorpMigrationTwo := &GorpMigration{} + if err = randomize.Struct(seed, gorpMigrationOne, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + if err = randomize.Struct(seed, gorpMigrationTwo, gorpMigrationDBTypes, false, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = gorpMigrationOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = gorpMigrationTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 2 { + t.Error("want 2 records, got:", count) + } +} + +func gorpMigrationBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func gorpMigrationAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *GorpMigration) error { + *o = GorpMigration{} + return nil +} + +func testGorpMigrationsHooks(t *testing.T) { + t.Parallel() + + var err error + + ctx := context.Background() + empty := &GorpMigration{} + o := &GorpMigration{} + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, false); err != nil { + t.Errorf("Unable to randomize GorpMigration object: %s", err) + } + + AddGorpMigrationHook(boil.BeforeInsertHook, gorpMigrationBeforeInsertHook) + if err = o.doBeforeInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o) + } + gorpMigrationBeforeInsertHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.AfterInsertHook, gorpMigrationAfterInsertHook) + if err = o.doAfterInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o) + } + gorpMigrationAfterInsertHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.AfterSelectHook, gorpMigrationAfterSelectHook) + if err = o.doAfterSelectHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterSelectHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o) + } + gorpMigrationAfterSelectHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.BeforeUpdateHook, gorpMigrationBeforeUpdateHook) + if err = o.doBeforeUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o) + } + gorpMigrationBeforeUpdateHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.AfterUpdateHook, gorpMigrationAfterUpdateHook) + if err = o.doAfterUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o) + } + gorpMigrationAfterUpdateHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.BeforeDeleteHook, gorpMigrationBeforeDeleteHook) + if err = o.doBeforeDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o) + } + gorpMigrationBeforeDeleteHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.AfterDeleteHook, gorpMigrationAfterDeleteHook) + if err = o.doAfterDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o) + } + gorpMigrationAfterDeleteHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.BeforeUpsertHook, gorpMigrationBeforeUpsertHook) + if err = o.doBeforeUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o) + } + gorpMigrationBeforeUpsertHooks = []GorpMigrationHook{} + + AddGorpMigrationHook(boil.AfterUpsertHook, gorpMigrationAfterUpsertHook) + if err = o.doAfterUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o) + } + gorpMigrationAfterUpsertHooks = []GorpMigrationHook{} +} + +func testGorpMigrationsInsert(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testGorpMigrationsInsertWhitelist(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Whitelist(gorpMigrationColumnsWithoutDefault...)); err != nil { + t.Error(err) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testGorpMigrationsReload(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = o.Reload(ctx, tx); err != nil { + t.Error(err) + } +} + +func testGorpMigrationsReloadAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := GorpMigrationSlice{o} + + if err = slice.ReloadAll(ctx, tx); err != nil { + t.Error(err) + } +} + +func testGorpMigrationsSelect(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := GorpMigrations().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 1 { + t.Error("want one record, got:", len(slice)) + } +} + +var ( + gorpMigrationDBTypes = map[string]string{`ID`: `VARCHAR(255)`, `AppliedAt`: `DATETIME`} + _ = bytes.MinRead +) + +func testGorpMigrationsUpdate(t *testing.T) { + t.Parallel() + + if 0 == len(gorpMigrationPrimaryKeyColumns) { + t.Skip("Skipping table with no primary key columns") + } + if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only affect one row but affected", rowsAff) + } +} + +func testGorpMigrationsSliceUpdateAll(t *testing.T) { + t.Parallel() + + if len(gorpMigrationAllColumns) == len(gorpMigrationPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &GorpMigration{} + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := GorpMigrations().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, gorpMigrationDBTypes, true, gorpMigrationPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize GorpMigration struct: %s", err) + } + + // Remove Primary keys and unique columns from what we plan to update + var fields []string + if strmangle.StringSliceMatch(gorpMigrationAllColumns, gorpMigrationPrimaryKeyColumns) { + fields = gorpMigrationAllColumns + } else { + fields = strmangle.SetComplement( + gorpMigrationAllColumns, + gorpMigrationPrimaryKeyColumns, + ) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + typ := reflect.TypeOf(o).Elem() + n := typ.NumField() + + updateMap := M{} + for _, col := range fields { + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.Tag.Get("boil") == col { + updateMap[col] = value.Field(i).Interface() + } + } + } + + slice := GorpMigrationSlice{o} + if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("wanted one record updated but got", rowsAff) + } +} diff --git a/internal/db/sqlite/models/metadata/headers.go b/internal/db/sqlite/models/metadata/headers.go new file mode 100644 index 0000000..4ecb508 --- /dev/null +++ b/internal/db/sqlite/models/metadata/headers.go @@ -0,0 +1,1057 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/friendsofgo/errors" + "github.com/volatiletech/sqlboiler/v4/boil" + "github.com/volatiletech/sqlboiler/v4/queries" + "github.com/volatiletech/sqlboiler/v4/queries/qm" + "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" + "github.com/volatiletech/strmangle" +) + +// Header is an object representing the database table. +type Header struct { + Record int64 `boil:"record" json:"record" toml:"record" yaml:"record"` + Lastknownrecord int64 `boil:"lastknownrecord" json:"lastknownrecord" toml:"lastknownrecord" yaml:"lastknownrecord"` + Block int64 `boil:"block" json:"block" toml:"block" yaml:"block"` + Lastknownblock int64 `boil:"lastknownblock" json:"lastknownblock" toml:"lastknownblock" yaml:"lastknownblock"` + Deleted int64 `boil:"deleted" json:"deleted" toml:"deleted" yaml:"deleted"` + Typeflag int64 `boil:"typeflag" json:"typeflag" toml:"typeflag" yaml:"typeflag"` + Name string `boil:"name" json:"name" toml:"name" yaml:"name"` + Linkname string `boil:"linkname" json:"linkname" toml:"linkname" yaml:"linkname"` + Size int64 `boil:"size" json:"size" toml:"size" yaml:"size"` + Mode int64 `boil:"mode" json:"mode" toml:"mode" yaml:"mode"` + UID int64 `boil:"uid" json:"uid" toml:"uid" yaml:"uid"` + Gid int64 `boil:"gid" json:"gid" toml:"gid" yaml:"gid"` + Uname string `boil:"uname" json:"uname" toml:"uname" yaml:"uname"` + Gname string `boil:"gname" json:"gname" toml:"gname" yaml:"gname"` + Modtime time.Time `boil:"modtime" json:"modtime" toml:"modtime" yaml:"modtime"` + Accesstime time.Time `boil:"accesstime" json:"accesstime" toml:"accesstime" yaml:"accesstime"` + Changetime time.Time `boil:"changetime" json:"changetime" toml:"changetime" yaml:"changetime"` + Devmajor int64 `boil:"devmajor" json:"devmajor" toml:"devmajor" yaml:"devmajor"` + Devminor int64 `boil:"devminor" json:"devminor" toml:"devminor" yaml:"devminor"` + Paxrecords string `boil:"paxrecords" json:"paxrecords" toml:"paxrecords" yaml:"paxrecords"` + Format int64 `boil:"format" json:"format" toml:"format" yaml:"format"` + + R *headerR `boil:"-" json:"-" toml:"-" yaml:"-"` + L headerL `boil:"-" json:"-" toml:"-" yaml:"-"` +} + +var HeaderColumns = struct { + Record string + Lastknownrecord string + Block string + Lastknownblock string + Deleted string + Typeflag string + Name string + Linkname string + Size string + Mode string + UID string + Gid string + Uname string + Gname string + Modtime string + Accesstime string + Changetime string + Devmajor string + Devminor string + Paxrecords string + Format string +}{ + Record: "record", + Lastknownrecord: "lastknownrecord", + Block: "block", + Lastknownblock: "lastknownblock", + Deleted: "deleted", + Typeflag: "typeflag", + Name: "name", + Linkname: "linkname", + Size: "size", + Mode: "mode", + UID: "uid", + Gid: "gid", + Uname: "uname", + Gname: "gname", + Modtime: "modtime", + Accesstime: "accesstime", + Changetime: "changetime", + Devmajor: "devmajor", + Devminor: "devminor", + Paxrecords: "paxrecords", + Format: "format", +} + +var HeaderTableColumns = struct { + Record string + Lastknownrecord string + Block string + Lastknownblock string + Deleted string + Typeflag string + Name string + Linkname string + Size string + Mode string + UID string + Gid string + Uname string + Gname string + Modtime string + Accesstime string + Changetime string + Devmajor string + Devminor string + Paxrecords string + Format string +}{ + Record: "headers.record", + Lastknownrecord: "headers.lastknownrecord", + Block: "headers.block", + Lastknownblock: "headers.lastknownblock", + Deleted: "headers.deleted", + Typeflag: "headers.typeflag", + Name: "headers.name", + Linkname: "headers.linkname", + Size: "headers.size", + Mode: "headers.mode", + UID: "headers.uid", + Gid: "headers.gid", + Uname: "headers.uname", + Gname: "headers.gname", + Modtime: "headers.modtime", + Accesstime: "headers.accesstime", + Changetime: "headers.changetime", + Devmajor: "headers.devmajor", + Devminor: "headers.devminor", + Paxrecords: "headers.paxrecords", + Format: "headers.format", +} + +// Generated where + +type whereHelperint64 struct{ field string } + +func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } +func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } +func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } +func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } +func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } +func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } +func (w whereHelperint64) IN(slice []int64) qm.QueryMod { + values := make([]interface{}, 0, len(slice)) + for _, value := range slice { + values = append(values, value) + } + return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) +} +func (w whereHelperint64) NIN(slice []int64) qm.QueryMod { + values := make([]interface{}, 0, len(slice)) + for _, value := range slice { + values = append(values, value) + } + return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) +} + +type whereHelpertime_Time struct{ field string } + +func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.EQ, x) +} +func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.NEQ, x) +} +func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LT, x) +} +func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LTE, x) +} +func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GT, x) +} +func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GTE, x) +} + +var HeaderWhere = struct { + Record whereHelperint64 + Lastknownrecord whereHelperint64 + Block whereHelperint64 + Lastknownblock whereHelperint64 + Deleted whereHelperint64 + Typeflag whereHelperint64 + Name whereHelperstring + Linkname whereHelperstring + Size whereHelperint64 + Mode whereHelperint64 + UID whereHelperint64 + Gid whereHelperint64 + Uname whereHelperstring + Gname whereHelperstring + Modtime whereHelpertime_Time + Accesstime whereHelpertime_Time + Changetime whereHelpertime_Time + Devmajor whereHelperint64 + Devminor whereHelperint64 + Paxrecords whereHelperstring + Format whereHelperint64 +}{ + Record: whereHelperint64{field: "\"headers\".\"record\""}, + Lastknownrecord: whereHelperint64{field: "\"headers\".\"lastknownrecord\""}, + Block: whereHelperint64{field: "\"headers\".\"block\""}, + Lastknownblock: whereHelperint64{field: "\"headers\".\"lastknownblock\""}, + Deleted: whereHelperint64{field: "\"headers\".\"deleted\""}, + Typeflag: whereHelperint64{field: "\"headers\".\"typeflag\""}, + Name: whereHelperstring{field: "\"headers\".\"name\""}, + Linkname: whereHelperstring{field: "\"headers\".\"linkname\""}, + Size: whereHelperint64{field: "\"headers\".\"size\""}, + Mode: whereHelperint64{field: "\"headers\".\"mode\""}, + UID: whereHelperint64{field: "\"headers\".\"uid\""}, + Gid: whereHelperint64{field: "\"headers\".\"gid\""}, + Uname: whereHelperstring{field: "\"headers\".\"uname\""}, + Gname: whereHelperstring{field: "\"headers\".\"gname\""}, + Modtime: whereHelpertime_Time{field: "\"headers\".\"modtime\""}, + Accesstime: whereHelpertime_Time{field: "\"headers\".\"accesstime\""}, + Changetime: whereHelpertime_Time{field: "\"headers\".\"changetime\""}, + Devmajor: whereHelperint64{field: "\"headers\".\"devmajor\""}, + Devminor: whereHelperint64{field: "\"headers\".\"devminor\""}, + Paxrecords: whereHelperstring{field: "\"headers\".\"paxrecords\""}, + Format: whereHelperint64{field: "\"headers\".\"format\""}, +} + +// HeaderRels is where relationship names are stored. +var HeaderRels = struct { +}{} + +// headerR is where relationships are stored. +type headerR struct { +} + +// NewStruct creates a new relationship struct +func (*headerR) NewStruct() *headerR { + return &headerR{} +} + +// headerL is where Load methods for each relationship are stored. +type headerL struct{} + +var ( + headerAllColumns = []string{"record", "lastknownrecord", "block", "lastknownblock", "deleted", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format"} + headerColumnsWithoutDefault = []string{"record", "lastknownrecord", "block", "lastknownblock", "deleted", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format"} + headerColumnsWithDefault = []string{} + headerPrimaryKeyColumns = []string{"name"} +) + +type ( + // HeaderSlice is an alias for a slice of pointers to Header. + // This should almost always be used instead of []Header. + HeaderSlice []*Header + // HeaderHook is the signature for custom Header hook methods + HeaderHook func(context.Context, boil.ContextExecutor, *Header) error + + headerQuery struct { + *queries.Query + } +) + +// Cache for insert, update and upsert +var ( + headerType = reflect.TypeOf(&Header{}) + headerMapping = queries.MakeStructMapping(headerType) + headerPrimaryKeyMapping, _ = queries.BindMapping(headerType, headerMapping, headerPrimaryKeyColumns) + headerInsertCacheMut sync.RWMutex + headerInsertCache = make(map[string]insertCache) + headerUpdateCacheMut sync.RWMutex + headerUpdateCache = make(map[string]updateCache) + headerUpsertCacheMut sync.RWMutex + headerUpsertCache = make(map[string]insertCache) +) + +var ( + // Force time package dependency for automated UpdatedAt/CreatedAt. + _ = time.Second + // Force qmhelper dependency for where clause generation (which doesn't + // always happen) + _ = qmhelper.Where +) + +var headerBeforeInsertHooks []HeaderHook +var headerBeforeUpdateHooks []HeaderHook +var headerBeforeDeleteHooks []HeaderHook +var headerBeforeUpsertHooks []HeaderHook + +var headerAfterInsertHooks []HeaderHook +var headerAfterSelectHooks []HeaderHook +var headerAfterUpdateHooks []HeaderHook +var headerAfterDeleteHooks []HeaderHook +var headerAfterUpsertHooks []HeaderHook + +// doBeforeInsertHooks executes all "before insert" hooks. +func (o *Header) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerBeforeInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpdateHooks executes all "before Update" hooks. +func (o *Header) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerBeforeUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeDeleteHooks executes all "before Delete" hooks. +func (o *Header) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerBeforeDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpsertHooks executes all "before Upsert" hooks. +func (o *Header) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerBeforeUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterInsertHooks executes all "after Insert" hooks. +func (o *Header) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerAfterInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterSelectHooks executes all "after Select" hooks. +func (o *Header) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerAfterSelectHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpdateHooks executes all "after Update" hooks. +func (o *Header) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerAfterUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterDeleteHooks executes all "after Delete" hooks. +func (o *Header) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerAfterDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpsertHooks executes all "after Upsert" hooks. +func (o *Header) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range headerAfterUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// AddHeaderHook registers your hook function for all future operations. +func AddHeaderHook(hookPoint boil.HookPoint, headerHook HeaderHook) { + switch hookPoint { + case boil.BeforeInsertHook: + headerBeforeInsertHooks = append(headerBeforeInsertHooks, headerHook) + case boil.BeforeUpdateHook: + headerBeforeUpdateHooks = append(headerBeforeUpdateHooks, headerHook) + case boil.BeforeDeleteHook: + headerBeforeDeleteHooks = append(headerBeforeDeleteHooks, headerHook) + case boil.BeforeUpsertHook: + headerBeforeUpsertHooks = append(headerBeforeUpsertHooks, headerHook) + case boil.AfterInsertHook: + headerAfterInsertHooks = append(headerAfterInsertHooks, headerHook) + case boil.AfterSelectHook: + headerAfterSelectHooks = append(headerAfterSelectHooks, headerHook) + case boil.AfterUpdateHook: + headerAfterUpdateHooks = append(headerAfterUpdateHooks, headerHook) + case boil.AfterDeleteHook: + headerAfterDeleteHooks = append(headerAfterDeleteHooks, headerHook) + case boil.AfterUpsertHook: + headerAfterUpsertHooks = append(headerAfterUpsertHooks, headerHook) + } +} + +// One returns a single header record from the query. +func (q headerQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Header, error) { + o := &Header{} + + queries.SetLimit(q.Query, 1) + + err := q.Bind(ctx, exec, o) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "models: failed to execute a one query for headers") + } + + if err := o.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + + return o, nil +} + +// All returns all Header records from the query. +func (q headerQuery) All(ctx context.Context, exec boil.ContextExecutor) (HeaderSlice, error) { + var o []*Header + + err := q.Bind(ctx, exec, &o) + if err != nil { + return nil, errors.Wrap(err, "models: failed to assign all query results to Header slice") + } + + if len(headerAfterSelectHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + } + } + + return o, nil +} + +// Count returns the count of all Header records in the query. +func (q headerQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return 0, errors.Wrap(err, "models: failed to count headers rows") + } + + return count, nil +} + +// Exists checks if the row exists in the table. +func (q headerQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + queries.SetLimit(q.Query, 1) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return false, errors.Wrap(err, "models: failed to check if headers exists") + } + + return count > 0, nil +} + +// Headers retrieves all the records using an executor. +func Headers(mods ...qm.QueryMod) headerQuery { + mods = append(mods, qm.From("\"headers\"")) + return headerQuery{NewQuery(mods...)} +} + +// FindHeader retrieves a single record by ID with an executor. +// If selectCols is empty Find will return all columns. +func FindHeader(ctx context.Context, exec boil.ContextExecutor, name string, selectCols ...string) (*Header, error) { + headerObj := &Header{} + + sel := "*" + if len(selectCols) > 0 { + sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") + } + query := fmt.Sprintf( + "select %s from \"headers\" where \"name\"=?", sel, + ) + + q := queries.Raw(query, name) + + err := q.Bind(ctx, exec, headerObj) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "models: unable to select from headers") + } + + if err = headerObj.doAfterSelectHooks(ctx, exec); err != nil { + return headerObj, err + } + + return headerObj, nil +} + +// Insert a single record using an executor. +// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. +func (o *Header) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { + if o == nil { + return errors.New("models: no headers provided for insertion") + } + + var err error + + if err := o.doBeforeInsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(headerColumnsWithDefault, o) + + key := makeCacheKey(columns, nzDefaults) + headerInsertCacheMut.RLock() + cache, cached := headerInsertCache[key] + headerInsertCacheMut.RUnlock() + + if !cached { + wl, returnColumns := columns.InsertColumnSet( + headerAllColumns, + headerColumnsWithDefault, + headerColumnsWithoutDefault, + nzDefaults, + ) + + cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, wl) + if err != nil { + return err + } + cache.retMapping, err = queries.BindMapping(headerType, headerMapping, returnColumns) + if err != nil { + return err + } + if len(wl) != 0 { + cache.query = fmt.Sprintf("INSERT INTO \"headers\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) + } else { + cache.query = "INSERT INTO \"headers\" %sDEFAULT VALUES%s" + } + + var queryOutput, queryReturning string + + if len(cache.retMapping) != 0 { + queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) + } + + cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, cache.query) + fmt.Fprintln(writer, vals) + } + + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + + if err != nil { + return errors.Wrap(err, "models: unable to insert into headers") + } + + if !cached { + headerInsertCacheMut.Lock() + headerInsertCache[key] = cache + headerInsertCacheMut.Unlock() + } + + return o.doAfterInsertHooks(ctx, exec) +} + +// Update uses an executor to update the Header. +// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. +// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. +func (o *Header) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { + var err error + if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { + return 0, err + } + key := makeCacheKey(columns, nil) + headerUpdateCacheMut.RLock() + cache, cached := headerUpdateCache[key] + headerUpdateCacheMut.RUnlock() + + if !cached { + wl := columns.UpdateColumnSet( + headerAllColumns, + headerPrimaryKeyColumns, + ) + + if !columns.IsWhitelist() { + wl = strmangle.SetComplement(wl, []string{"created_at"}) + } + if len(wl) == 0 { + return 0, errors.New("models: unable to update headers, could not build whitelist") + } + + cache.query = fmt.Sprintf("UPDATE \"headers\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 0, wl), + strmangle.WhereClause("\"", "\"", 0, headerPrimaryKeyColumns), + ) + cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, append(wl, headerPrimaryKeyColumns...)) + if err != nil { + return 0, err + } + } + + values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, cache.query) + fmt.Fprintln(writer, values) + } + var result sql.Result + result, err = exec.ExecContext(ctx, cache.query, values...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to update headers row") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by update for headers") + } + + if !cached { + headerUpdateCacheMut.Lock() + headerUpdateCache[key] = cache + headerUpdateCacheMut.Unlock() + } + + return rowsAff, o.doAfterUpdateHooks(ctx, exec) +} + +// UpdateAll updates all rows with the specified column values. +func (q headerQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + queries.SetUpdate(q.Query, cols) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "models: unable to update all for headers") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: unable to retrieve rows affected for headers") + } + + return rowsAff, nil +} + +// UpdateAll updates all rows with the specified column values, using an executor. +func (o HeaderSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + ln := int64(len(o)) + if ln == 0 { + return 0, nil + } + + if len(cols) == 0 { + return 0, errors.New("models: update all requires at least one column argument") + } + + colNames := make([]string, len(cols)) + args := make([]interface{}, len(cols)) + + i := 0 + for name, value := range cols { + colNames[i] = name + args[i] = value + i++ + } + + // Append all of the primary key values for each column + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := fmt.Sprintf("UPDATE \"headers\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 0, colNames), + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(o))) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, args...) + } + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to update all in header slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all header") + } + return rowsAff, nil +} + +// Delete deletes a single Header record with an executor. +// Delete will match against the primary key column to find the record to delete. +func (o *Header) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if o == nil { + return 0, errors.New("models: no Header provided for delete") + } + + if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), headerPrimaryKeyMapping) + sql := "DELETE FROM \"headers\" WHERE \"name\"=?" + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, args...) + } + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to delete from headers") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by delete for headers") + } + + if err := o.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + return rowsAff, nil +} + +// DeleteAll deletes all matching rows. +func (q headerQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if q.Query == nil { + return 0, errors.New("models: no headerQuery provided for delete all") + } + + queries.SetDelete(q.Query) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "models: unable to delete all from headers") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for headers") + } + + return rowsAff, nil +} + +// DeleteAll deletes all rows in the slice, using an executor. +func (o HeaderSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if len(o) == 0 { + return 0, nil + } + + if len(headerBeforeDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + var args []interface{} + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "DELETE FROM \"headers\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(o)) + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, args) + } + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "models: unable to delete all from header slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for headers") + } + + if len(headerAfterDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + return rowsAff, nil +} + +// Reload refetches the object from the database +// using the primary keys with an executor. +func (o *Header) Reload(ctx context.Context, exec boil.ContextExecutor) error { + ret, err := FindHeader(ctx, exec, o.Name) + if err != nil { + return err + } + + *o = *ret + return nil +} + +// ReloadAll refetches every row with matching primary key column values +// and overwrites the original object slice with the newly updated slice. +func (o *HeaderSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { + if o == nil || len(*o) == 0 { + return nil + } + + slice := HeaderSlice{} + var args []interface{} + for _, obj := range *o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), headerPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "SELECT \"headers\".* FROM \"headers\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, headerPrimaryKeyColumns, len(*o)) + + q := queries.Raw(sql, args...) + + err := q.Bind(ctx, exec, &slice) + if err != nil { + return errors.Wrap(err, "models: unable to reload all in HeaderSlice") + } + + *o = slice + + return nil +} + +// HeaderExists checks if the Header row exists. +func HeaderExists(ctx context.Context, exec boil.ContextExecutor, name string) (bool, error) { + var exists bool + sql := "select exists(select 1 from \"headers\" where \"name\"=? limit 1)" + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, sql) + fmt.Fprintln(writer, name) + } + row := exec.QueryRowContext(ctx, sql, name) + + err := row.Scan(&exists) + if err != nil { + return false, errors.Wrap(err, "models: unable to check if headers exists") + } + + return exists, nil +} + +// Upsert attempts an insert using an executor, and does an update or ignore on conflict. +// See boil.Columns documentation for how to properly use updateColumns and insertColumns. +func (o *Header) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { + if o == nil { + return errors.New("models: no headers provided for upsert") + } + + if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(headerColumnsWithDefault, o) + + // Build cache key in-line uglily - mysql vs psql problems + buf := strmangle.GetBuffer() + if updateOnConflict { + buf.WriteByte('t') + } else { + buf.WriteByte('f') + } + buf.WriteByte('.') + for _, c := range conflictColumns { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(updateColumns.Kind)) + for _, c := range updateColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(insertColumns.Kind)) + for _, c := range insertColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + for _, c := range nzDefaults { + buf.WriteString(c) + } + key := buf.String() + strmangle.PutBuffer(buf) + + headerUpsertCacheMut.RLock() + cache, cached := headerUpsertCache[key] + headerUpsertCacheMut.RUnlock() + + var err error + + if !cached { + insert, ret := insertColumns.InsertColumnSet( + headerAllColumns, + headerColumnsWithDefault, + headerColumnsWithoutDefault, + nzDefaults, + ) + update := updateColumns.UpdateColumnSet( + headerAllColumns, + headerPrimaryKeyColumns, + ) + + if updateOnConflict && len(update) == 0 { + return errors.New("models: unable to upsert headers, could not build update column list") + } + + conflict := conflictColumns + if len(conflict) == 0 { + conflict = make([]string, len(headerPrimaryKeyColumns)) + copy(conflict, headerPrimaryKeyColumns) + } + cache.query = buildUpsertQuerySQLite(dialect, "\"headers\"", updateOnConflict, ret, update, conflict, insert) + + cache.valueMapping, err = queries.BindMapping(headerType, headerMapping, insert) + if err != nil { + return err + } + if len(ret) != 0 { + cache.retMapping, err = queries.BindMapping(headerType, headerMapping, ret) + if err != nil { + return err + } + } + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + var returns []interface{} + if len(cache.retMapping) != 0 { + returns = queries.PtrsFromMapping(value, cache.retMapping) + } + + if boil.IsDebug(ctx) { + writer := boil.DebugWriterFrom(ctx) + fmt.Fprintln(writer, cache.query) + fmt.Fprintln(writer, vals) + } + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) + if err == sql.ErrNoRows { + err = nil // Postgres doesn't return anything when there's no update + } + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + if err != nil { + return errors.Wrap(err, "models: unable to upsert headers") + } + + if !cached { + headerUpsertCacheMut.Lock() + headerUpsertCache[key] = cache + headerUpsertCacheMut.Unlock() + } + + return o.doAfterUpsertHooks(ctx, exec) +} diff --git a/internal/db/sqlite/models/metadata/headers_test.go b/internal/db/sqlite/models/metadata/headers_test.go new file mode 100644 index 0000000..75f83cf --- /dev/null +++ b/internal/db/sqlite/models/metadata/headers_test.go @@ -0,0 +1,731 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "bytes" + "context" + "reflect" + "testing" + + "github.com/volatiletech/randomize" + "github.com/volatiletech/sqlboiler/v4/boil" + "github.com/volatiletech/sqlboiler/v4/queries" + "github.com/volatiletech/strmangle" +) + +func testHeadersUpsert(t *testing.T) { + t.Parallel() + if len(headerAllColumns) == len(headerPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + // Attempt the INSERT side of an UPSERT + o := Header{} + if err = randomize.Struct(seed, &o, headerDBTypes, true); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert Header: %s", err) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } + + // Attempt the UPDATE side of an UPSERT + if err = randomize.Struct(seed, &o, headerDBTypes, false, headerPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert Header: %s", err) + } + + count, err = Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } +} + +var ( + // Relationships sometimes use the reflection helper queries.Equal/queries.Assign + // so force a package dependency in case they don't. + _ = queries.Equal +) + +func testHeaders(t *testing.T) { + t.Parallel() + + query := Headers() + + if query.Query == nil { + t.Error("expected a query, got nothing") + } +} + +func testHeadersDelete(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := o.Delete(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testHeadersQueryDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := Headers().DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testHeadersSliceDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := HeaderSlice{o} + + if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testHeadersExists(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + e, err := HeaderExists(ctx, tx, o.Name) + if err != nil { + t.Errorf("Unable to check if Header exists: %s", err) + } + if !e { + t.Errorf("Expected HeaderExists to return true, but got false.") + } +} + +func testHeadersFind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + headerFound, err := FindHeader(ctx, tx, o.Name) + if err != nil { + t.Error(err) + } + + if headerFound == nil { + t.Error("want a record, got nil") + } +} + +func testHeadersBind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = Headers().Bind(ctx, tx, o); err != nil { + t.Error(err) + } +} + +func testHeadersOne(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if x, err := Headers().One(ctx, tx); err != nil { + t.Error(err) + } else if x == nil { + t.Error("expected to get a non nil record") + } +} + +func testHeadersAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + headerOne := &Header{} + headerTwo := &Header{} + if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := Headers().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 2 { + t.Error("want 2 records, got:", len(slice)) + } +} + +func testHeadersCount(t *testing.T) { + t.Parallel() + + var err error + seed := randomize.NewSeed() + headerOne := &Header{} + headerTwo := &Header{} + if err = randomize.Struct(seed, headerOne, headerDBTypes, false, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + if err = randomize.Struct(seed, headerTwo, headerDBTypes, false, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = headerOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = headerTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 2 { + t.Error("want 2 records, got:", count) + } +} + +func headerBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func headerAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Header) error { + *o = Header{} + return nil +} + +func testHeadersHooks(t *testing.T) { + t.Parallel() + + var err error + + ctx := context.Background() + empty := &Header{} + o := &Header{} + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, o, headerDBTypes, false); err != nil { + t.Errorf("Unable to randomize Header object: %s", err) + } + + AddHeaderHook(boil.BeforeInsertHook, headerBeforeInsertHook) + if err = o.doBeforeInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o) + } + headerBeforeInsertHooks = []HeaderHook{} + + AddHeaderHook(boil.AfterInsertHook, headerAfterInsertHook) + if err = o.doAfterInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o) + } + headerAfterInsertHooks = []HeaderHook{} + + AddHeaderHook(boil.AfterSelectHook, headerAfterSelectHook) + if err = o.doAfterSelectHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterSelectHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o) + } + headerAfterSelectHooks = []HeaderHook{} + + AddHeaderHook(boil.BeforeUpdateHook, headerBeforeUpdateHook) + if err = o.doBeforeUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o) + } + headerBeforeUpdateHooks = []HeaderHook{} + + AddHeaderHook(boil.AfterUpdateHook, headerAfterUpdateHook) + if err = o.doAfterUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o) + } + headerAfterUpdateHooks = []HeaderHook{} + + AddHeaderHook(boil.BeforeDeleteHook, headerBeforeDeleteHook) + if err = o.doBeforeDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o) + } + headerBeforeDeleteHooks = []HeaderHook{} + + AddHeaderHook(boil.AfterDeleteHook, headerAfterDeleteHook) + if err = o.doAfterDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o) + } + headerAfterDeleteHooks = []HeaderHook{} + + AddHeaderHook(boil.BeforeUpsertHook, headerBeforeUpsertHook) + if err = o.doBeforeUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o) + } + headerBeforeUpsertHooks = []HeaderHook{} + + AddHeaderHook(boil.AfterUpsertHook, headerAfterUpsertHook) + if err = o.doAfterUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o) + } + headerAfterUpsertHooks = []HeaderHook{} +} + +func testHeadersInsert(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testHeadersInsertWhitelist(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Whitelist(headerColumnsWithoutDefault...)); err != nil { + t.Error(err) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testHeadersReload(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = o.Reload(ctx, tx); err != nil { + t.Error(err) + } +} + +func testHeadersReloadAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := HeaderSlice{o} + + if err = slice.ReloadAll(ctx, tx); err != nil { + t.Error(err) + } +} + +func testHeadersSelect(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := Headers().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 1 { + t.Error("want one record, got:", len(slice)) + } +} + +var ( + headerDBTypes = map[string]string{`Record`: `INTEGER`, `Lastknownrecord`: `INTEGER`, `Block`: `INTEGER`, `Lastknownblock`: `INTEGER`, `Deleted`: `INTEGER`, `Typeflag`: `INTEGER`, `Name`: `TEXT`, `Linkname`: `TEXT`, `Size`: `INTEGER`, `Mode`: `INTEGER`, `UID`: `INTEGER`, `Gid`: `INTEGER`, `Uname`: `TEXT`, `Gname`: `TEXT`, `Modtime`: `DATE`, `Accesstime`: `DATE`, `Changetime`: `DATE`, `Devmajor`: `INTEGER`, `Devminor`: `INTEGER`, `Paxrecords`: `TEXT`, `Format`: `INTEGER`} + _ = bytes.MinRead +) + +func testHeadersUpdate(t *testing.T) { + t.Parallel() + + if 0 == len(headerPrimaryKeyColumns) { + t.Skip("Skipping table with no primary key columns") + } + if len(headerAllColumns) == len(headerPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only affect one row but affected", rowsAff) + } +} + +func testHeadersSliceUpdateAll(t *testing.T) { + t.Parallel() + + if len(headerAllColumns) == len(headerPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &Header{} + if err = randomize.Struct(seed, o, headerDBTypes, true, headerColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Headers().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, headerDBTypes, true, headerPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Header struct: %s", err) + } + + // Remove Primary keys and unique columns from what we plan to update + var fields []string + if strmangle.StringSliceMatch(headerAllColumns, headerPrimaryKeyColumns) { + fields = headerAllColumns + } else { + fields = strmangle.SetComplement( + headerAllColumns, + headerPrimaryKeyColumns, + ) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + typ := reflect.TypeOf(o).Elem() + n := typ.NumField() + + updateMap := M{} + for _, col := range fields { + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.Tag.Get("boil") == col { + updateMap[col] = value.Field(i).Interface() + } + } + } + + slice := HeaderSlice{o} + if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("wanted one record updated but got", rowsAff) + } +} diff --git a/internal/db/sqlite/models/metadata/sqlite3_main_test.go b/internal/db/sqlite/models/metadata/sqlite3_main_test.go new file mode 100644 index 0000000..923cc9f --- /dev/null +++ b/internal/db/sqlite/models/metadata/sqlite3_main_test.go @@ -0,0 +1,93 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "database/sql" + "fmt" + "io" + "math/rand" + "os" + "os/exec" + "path/filepath" + "regexp" + + _ "github.com/mattn/go-sqlite3" + "github.com/pkg/errors" + "github.com/spf13/viper" +) + +var rgxSQLitekey = regexp.MustCompile(`(?mi)((,\n)?\s+foreign key.*?\n)+`) + +type sqliteTester struct { + dbConn *sql.DB + + dbName string + testDBName string +} + +func init() { + dbMain = &sqliteTester{} +} + +func (s *sqliteTester) setup() error { + var err error + + s.dbName = viper.GetString("sqlite3.dbname") + if len(s.dbName) == 0 { + return errors.New("no dbname specified") + } + + s.testDBName = filepath.Join(os.TempDir(), fmt.Sprintf("boil-sqlite3-%d.sql", rand.Int())) + + dumpCmd := exec.Command("sqlite3", "-cmd", ".dump", s.dbName) + createCmd := exec.Command("sqlite3", s.testDBName) + + r, w := io.Pipe() + dumpCmd.Stdout = w + createCmd.Stdin = newFKeyDestroyer(rgxSQLitekey, r) + + if err = dumpCmd.Start(); err != nil { + return errors.Wrap(err, "failed to start sqlite3 dump command") + } + if err = createCmd.Start(); err != nil { + return errors.Wrap(err, "failed to start sqlite3 create command") + } + + if err = dumpCmd.Wait(); err != nil { + fmt.Println(err) + return errors.Wrap(err, "failed to wait for sqlite3 dump command") + } + + w.Close() // After dumpCmd is done, close the write end of the pipe + + if err = createCmd.Wait(); err != nil { + fmt.Println(err) + return errors.Wrap(err, "failed to wait for sqlite3 create command") + } + + return nil +} + +func (s *sqliteTester) teardown() error { + if s.dbConn != nil { + s.dbConn.Close() + } + + return os.Remove(s.testDBName) +} + +func (s *sqliteTester) conn() (*sql.DB, error) { + if s.dbConn != nil { + return s.dbConn, nil + } + + var err error + s.dbConn, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?_loc=UTC", s.testDBName)) + if err != nil { + return nil, err + } + + return s.dbConn, nil +} diff --git a/internal/db/sqlite/models/metadata/sqlite3_suites_test.go b/internal/db/sqlite/models/metadata/sqlite3_suites_test.go new file mode 100644 index 0000000..609aac1 --- /dev/null +++ b/internal/db/sqlite/models/metadata/sqlite3_suites_test.go @@ -0,0 +1,12 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import "testing" + +func TestUpsert(t *testing.T) { + t.Run("GorpMigrations", testGorpMigrationsUpsert) + + t.Run("Headers", testHeadersUpsert) +} diff --git a/internal/db/sqlite/models/metadata/sqlite_upsert.go b/internal/db/sqlite/models/metadata/sqlite_upsert.go new file mode 100644 index 0000000..98c2a69 --- /dev/null +++ b/internal/db/sqlite/models/metadata/sqlite_upsert.go @@ -0,0 +1,61 @@ +// Code generated by SQLBoiler 4.8.3 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "fmt" + "strings" + + "github.com/volatiletech/sqlboiler/v4/drivers" + "github.com/volatiletech/strmangle" +) + +// buildUpsertQuerySQLite builds a SQL statement string using the upsertData provided. +func buildUpsertQuerySQLite(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string) string { + conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict) + whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist) + ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret) + + buf := strmangle.GetBuffer() + defer strmangle.PutBuffer(buf) + + columns := "DEFAULT VALUES" + if len(whitelist) != 0 { + columns = fmt.Sprintf("(%s) VALUES (%s)", + strings.Join(whitelist, ", "), + strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1)) + } + + fmt.Fprintf( + buf, + "INSERT INTO %s %s ON CONFLICT ", + tableName, + columns, + ) + + if !updateOnConflict || len(update) == 0 { + buf.WriteString("DO NOTHING") + } else { + buf.WriteByte('(') + buf.WriteString(strings.Join(conflict, ", ")) + buf.WriteString(") DO UPDATE SET ") + + for i, v := range update { + if i != 0 { + buf.WriteByte(',') + } + quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v) + buf.WriteString(quoted) + buf.WriteString(" = EXCLUDED.") + buf.WriteString(quoted) + } + } + + if len(ret) != 0 { + buf.WriteString(" RETURNING ") + buf.WriteString(strings.Join(ret, ", ")) + } + + return buf.String() +}