From 619e0da26818861839698adf267a246c2105f6bc Mon Sep 17 00:00:00 2001 From: Eric Fritz Date: Thu, 19 Sep 2024 21:19:25 -0500 Subject: [PATCH] Squash commits. --- .envrc | 8 + LICENSE | 2 +- README.md | 27 +- batch.go | 71 ++ batch_options.go | 38 + batch_queries.go | 102 +++ batch_test.go | 182 ++++ cmd/migrate/internal/commands/create.go | 91 ++ cmd/migrate/internal/commands/describe.go | 50 + cmd/migrate/internal/commands/drift.go | 67 ++ cmd/migrate/internal/commands/force_write.go | 46 + cmd/migrate/internal/commands/state.go | 131 +++ cmd/migrate/internal/commands/undo.go | 46 + cmd/migrate/internal/commands/up.go | 54 ++ cmd/migrate/internal/database/dial.go | 10 + cmd/migrate/internal/database/runner.go | 25 + cmd/migrate/internal/flags/database_url.go | 48 + cmd/migrate/internal/flags/migrations_dir.go | 12 + cmd/migrate/internal/flags/no_color.go | 26 + cmd/migrate/internal/flags/util.go | 17 + cmd/migrate/internal/logging/logger.go | 20 + cmd/migrate/main.go | 35 + config.go | 7 +- db.go | 126 +-- db_transaction.go | 120 +++ db_transaction_test.go | 203 +++++ db_wrapper.go | 97 ++ describe.go | 76 ++ describe_columns.go | 134 +++ describe_constraints.go | 89 ++ describe_dependencies.go | 91 ++ describe_enums.go | 40 + describe_extensions.go | 35 + describe_functions.go | 55 ++ describe_indexes.go | 116 +++ describe_sequences.go | 61 ++ describe_tables.go | 95 ++ describe_test.go | 29 + describe_triggers.go | 43 + describe_views.go | 37 + dial.go | 34 + drift.go | 388 ++++++++ drift_columns.go | 87 ++ drift_constraints.go | 35 + drift_enums.go | 263 ++++++ drift_enums_test.go | 76 ++ drift_extensions.go | 35 + drift_functions.go | 47 + drift_indexes.go | 47 + drift_sequences.go | 92 ++ drift_tables.go | 59 ++ drift_test.go | 858 ++++++++++++++++++ drift_triggers.go | 33 + drift_views.go | 53 ++ drift_views_test.go | 40 + go.mod | 56 +- go.sum | 617 ++++++------- initializer.go | 25 +- initializer_options.go | 19 + locker.go | 63 ++ locker_test.go | 82 ++ migration.go | 78 -- migration_reader.go | 115 +++ migration_reader_embed.go | 7 + migration_reader_filesystem.go | 105 +++ migration_reader_test.go | 77 ++ migration_runner.go | 559 ++++++++++++ migration_runner_test.go | 314 +++++++ options.go | 26 - paging.go | 59 -- query.go | 144 +++ query_test.go | 142 +++ rows.go | 13 + rows_scanner.go | 37 + rows_slice_scanner.go | 111 +++ rows_slice_scanner_test.go | 137 +++ rows_value_scanner.go | 41 + rows_value_scanner_test.go | 19 + testdata/golden/TestDescribeSchema.golden | 432 +++++++++ .../cic_in_down_migration/1_first/down.sql | 2 + .../cic_in_down_migration/1_first/up.sql | 6 + .../cic_in_down_migration/2_second/down.sql | 3 + .../cic_in_down_migration/2_second/up.sql | 3 + .../cic_in_down_migration/3_third/down.sql | 2 + .../cic_in_down_migration/3_third/up.sql | 2 + .../migrations/cic_pattern/1_first/down.sql | 2 + .../migrations/cic_pattern/1_first/up.sql | 7 + .../migrations/cic_pattern/2_second/down.sql | 2 + .../migrations/cic_pattern/2_second/up.sql | 8 + .../migrations/cic_pattern/3_third/down.sql | 2 + .../migrations/cic_pattern/3_third/up.sql | 8 + .../migrations/cic_pattern/4_fourth/down.sql | 2 + .../migrations/cic_pattern/4_fourth/up.sql | 2 + .../1_first/down.sql | 2 + .../1_first/up.sql | 7 + .../2_second/down.sql | 2 + .../2_second/up.sql | 8 + .../3_third/down.sql | 2 + .../3_third/up.sql | 8 + .../4_fourth/down.sql | 6 + .../4_fourth/up.sql | 9 + .../duplicate_identifiers/1_first/down.sql | 2 + .../duplicate_identifiers/1_first/up.sql | 7 + .../duplicate_identifiers/2_second/down.sql | 2 + .../duplicate_identifiers/2_second/up.sql | 8 + .../duplicate_identifiers/2_third/down.sql | 2 + .../duplicate_identifiers/2_third/up.sql | 8 + testdata/migrations/valid/1_first/down.sql | 2 + testdata/migrations/valid/1_first/up.sql | 7 + testdata/migrations/valid/2_second/down.sql | 2 + testdata/migrations/valid/2_second/up.sql | 8 + testdata/migrations/valid/3_third/down.sql | 2 + testdata/migrations/valid/3_third/up.sql | 8 + testdata/schemas/describe.sql | 110 +++ testing.go | 69 ++ url.go | 35 + util.go | 18 + 117 files changed, 7729 insertions(+), 643 deletions(-) create mode 100644 .envrc create mode 100644 batch.go create mode 100644 batch_options.go create mode 100644 batch_queries.go create mode 100644 batch_test.go create mode 100644 cmd/migrate/internal/commands/create.go create mode 100644 cmd/migrate/internal/commands/describe.go create mode 100644 cmd/migrate/internal/commands/drift.go create mode 100644 cmd/migrate/internal/commands/force_write.go create mode 100644 cmd/migrate/internal/commands/state.go create mode 100644 cmd/migrate/internal/commands/undo.go create mode 100644 cmd/migrate/internal/commands/up.go create mode 100644 cmd/migrate/internal/database/dial.go create mode 100644 cmd/migrate/internal/database/runner.go create mode 100644 cmd/migrate/internal/flags/database_url.go create mode 100644 cmd/migrate/internal/flags/migrations_dir.go create mode 100644 cmd/migrate/internal/flags/no_color.go create mode 100644 cmd/migrate/internal/flags/util.go create mode 100644 cmd/migrate/internal/logging/logger.go create mode 100644 cmd/migrate/main.go create mode 100644 db_transaction.go create mode 100644 db_transaction_test.go create mode 100644 db_wrapper.go create mode 100644 describe.go create mode 100644 describe_columns.go create mode 100644 describe_constraints.go create mode 100644 describe_dependencies.go create mode 100644 describe_enums.go create mode 100644 describe_extensions.go create mode 100644 describe_functions.go create mode 100644 describe_indexes.go create mode 100644 describe_sequences.go create mode 100644 describe_tables.go create mode 100644 describe_test.go create mode 100644 describe_triggers.go create mode 100644 describe_views.go create mode 100644 dial.go create mode 100644 drift.go create mode 100644 drift_columns.go create mode 100644 drift_constraints.go create mode 100644 drift_enums.go create mode 100644 drift_enums_test.go create mode 100644 drift_extensions.go create mode 100644 drift_functions.go create mode 100644 drift_indexes.go create mode 100644 drift_sequences.go create mode 100644 drift_tables.go create mode 100644 drift_test.go create mode 100644 drift_triggers.go create mode 100644 drift_views.go create mode 100644 drift_views_test.go create mode 100644 initializer_options.go create mode 100644 locker.go create mode 100644 locker_test.go delete mode 100644 migration.go create mode 100644 migration_reader.go create mode 100644 migration_reader_embed.go create mode 100644 migration_reader_filesystem.go create mode 100644 migration_reader_test.go create mode 100644 migration_runner.go create mode 100644 migration_runner_test.go delete mode 100644 options.go delete mode 100644 paging.go create mode 100644 query.go create mode 100644 query_test.go create mode 100644 rows.go create mode 100644 rows_scanner.go create mode 100644 rows_slice_scanner.go create mode 100644 rows_slice_scanner_test.go create mode 100644 rows_value_scanner.go create mode 100644 rows_value_scanner_test.go create mode 100644 testdata/golden/TestDescribeSchema.golden create mode 100644 testdata/migrations/cic_in_down_migration/1_first/down.sql create mode 100644 testdata/migrations/cic_in_down_migration/1_first/up.sql create mode 100644 testdata/migrations/cic_in_down_migration/2_second/down.sql create mode 100644 testdata/migrations/cic_in_down_migration/2_second/up.sql create mode 100644 testdata/migrations/cic_in_down_migration/3_third/down.sql create mode 100644 testdata/migrations/cic_in_down_migration/3_third/up.sql create mode 100755 testdata/migrations/cic_pattern/1_first/down.sql create mode 100755 testdata/migrations/cic_pattern/1_first/up.sql create mode 100755 testdata/migrations/cic_pattern/2_second/down.sql create mode 100755 testdata/migrations/cic_pattern/2_second/up.sql create mode 100755 testdata/migrations/cic_pattern/3_third/down.sql create mode 100755 testdata/migrations/cic_pattern/3_third/up.sql create mode 100644 testdata/migrations/cic_pattern/4_fourth/down.sql create mode 100644 testdata/migrations/cic_pattern/4_fourth/up.sql create mode 100755 testdata/migrations/cic_with_additional_queries/1_first/down.sql create mode 100755 testdata/migrations/cic_with_additional_queries/1_first/up.sql create mode 100755 testdata/migrations/cic_with_additional_queries/2_second/down.sql create mode 100755 testdata/migrations/cic_with_additional_queries/2_second/up.sql create mode 100755 testdata/migrations/cic_with_additional_queries/3_third/down.sql create mode 100755 testdata/migrations/cic_with_additional_queries/3_third/up.sql create mode 100644 testdata/migrations/cic_with_additional_queries/4_fourth/down.sql create mode 100644 testdata/migrations/cic_with_additional_queries/4_fourth/up.sql create mode 100755 testdata/migrations/duplicate_identifiers/1_first/down.sql create mode 100755 testdata/migrations/duplicate_identifiers/1_first/up.sql create mode 100755 testdata/migrations/duplicate_identifiers/2_second/down.sql create mode 100755 testdata/migrations/duplicate_identifiers/2_second/up.sql create mode 100755 testdata/migrations/duplicate_identifiers/2_third/down.sql create mode 100755 testdata/migrations/duplicate_identifiers/2_third/up.sql create mode 100755 testdata/migrations/valid/1_first/down.sql create mode 100755 testdata/migrations/valid/1_first/up.sql create mode 100755 testdata/migrations/valid/2_second/down.sql create mode 100755 testdata/migrations/valid/2_second/up.sql create mode 100755 testdata/migrations/valid/3_third/down.sql create mode 100755 testdata/migrations/valid/3_third/up.sql create mode 100644 testdata/schemas/describe.sql create mode 100644 testing.go create mode 100644 url.go create mode 100644 util.go diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..a3bee69 --- /dev/null +++ b/.envrc @@ -0,0 +1,8 @@ +#!/bin/bash + +export PGHOST=localhost +export PGPORT=5432 +export PGUSER=postgres +export PGPASSWORD= +export PGDATABASE=postgres +export TEMPLATEDB=template0 diff --git a/LICENSE b/LICENSE index d3f24ba..4677eb3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2022 Eric Fritz +Copyright (c) 2024 Eric Fritz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 71be27d..c4e05c7 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Postgres utilities for use with nacelle. ### Usage -This library creates a [sqlx](https://github.com/jmoiron/sqlx) connection wrapped in a nacelle [logger](https://nacelle.dev/docs/core/log). The supplied initializer adds this connection into the nacelle [service container](https://nacelle.dev/docs/core/service) under the key `db`. The initializer will block until a ping succeeds. +This library creates a Postgres connection wrapped in a nacelle [logger](https://nacelle.dev/docs/core/log). The supplied initializer adds this connection into the nacelle [service container](https://nacelle.dev/docs/core/service) under the key `db`. The initializer will block until a ping succeeds. ```go func setup(processes nacelle.ProcessContainer, services nacelle.ServiceContainer) error { @@ -21,28 +21,6 @@ func setup(processes nacelle.ProcessContainer, services nacelle.ServiceContainer } ``` -This library uses [golang migrate](https://github.com/golang-migrate/migrate) to optionally run migrations on application startup. To configure migrations, supply a [source driver](https://github.com/golang-migrate/migrate#migration-sources) to the initializer, as follows. - -```go -import ( - _ "github.com/golang-migrate/migrate/v4/source/file" - "github.com/golang-migrate/migrate/v4/source" -) - -func setup(processes nacelle.ProcessContainer, services nacelle.ServiceContainer) error { - migrationSourceDriver, err := source.Open("file:///migrations") - if err != nil { - return err - } - - processes.RegisterInitializer(pgutil.NewInitializer( - pgutil.WithMigrationSourceDriver(migrationSourceDriver) - )) - - // ... -} -``` - ### Configuration The default service behavior can be configured by the following environment variables. @@ -51,6 +29,3 @@ The default service behavior can be configured by the following environment vari | ------------------------------- | -------- | ----------------- | ---------------------------------------------------------------------------------------------------- | | DATABASE_URL | yes | | The connection string of the remote database. | | LOG_SQL_QUERIES | | false | Whether or not to log parameterized SQL queries. | -| MIGRATIONS_TABLE | | schema_migrations | The name of the migrations table. | -| MIGRATIONS_SCHEMA_NAME | | default | The name of the schema used during migrations. | -| FAIL_ON_NEWER_MIGRATION_VERSION | | false | If true, fail startup when the database migration version is newer than the known set of migrations. | diff --git a/batch.go b/batch.go new file mode 100644 index 0000000..4fb10f0 --- /dev/null +++ b/batch.go @@ -0,0 +1,71 @@ +package pgutil + +import ( + "context" + "fmt" +) + +type BatchInserter struct { + db DB + numColumns int + maxBatchSize int + maxCapacity int + queryBuilder *batchQueryBuilder + returningScanner ScanFunc + values []any +} + +const maxNumPostgresParameters = 65535 + +func NewBatchInserter(db DB, tableName string, columnNames []string, configs ...BatchInserterConfigFunc) *BatchInserter { + var ( + options = getBatchInserterOptions(configs) + numColumns = len(columnNames) + maxBatchSize = int(maxNumPostgresParameters/numColumns) * numColumns + maxCapacity = maxBatchSize + numColumns + queryBuilder = newBatchQueryBuilder(tableName, columnNames, options.onConflictClause, options.returningClause) + returningScanner = options.returningScanner + ) + + return &BatchInserter{ + db: db, + numColumns: numColumns, + maxBatchSize: maxBatchSize, + maxCapacity: maxCapacity, + queryBuilder: queryBuilder, + returningScanner: returningScanner, + values: make([]any, 0, maxCapacity), + } +} + +func (i *BatchInserter) Insert(ctx context.Context, values ...any) error { + if len(values) != i.numColumns { + return fmt.Errorf("received %d values for %d columns", len(values), i.numColumns) + } + + i.values = append(i.values, values...) + + if len(i.values) >= i.maxBatchSize { + return i.Flush(ctx) + } + + return nil +} + +func (i *BatchInserter) Flush(ctx context.Context) error { + if len(i.values) == 0 { + return nil + } + + n := i.maxBatchSize + if len(i.values) < i.maxBatchSize { + n = len(i.values) + } + + batch := i.values[:n] + i.values = append(make([]any, 0, i.maxCapacity), i.values[n:]...) + + batchSize := len(batch) + query := i.queryBuilder.build(batchSize) + return NewRowScanner(i.returningScanner)(i.db.Query(ctx, RawQuery(query, batch...))) +} diff --git a/batch_options.go b/batch_options.go new file mode 100644 index 0000000..12bacde --- /dev/null +++ b/batch_options.go @@ -0,0 +1,38 @@ +package pgutil + +import ( + "fmt" + "strings" +) + +type ( + batchInserterOptions struct { + onConflictClause string + returningClause string + returningScanner ScanFunc + } + + BatchInserterConfigFunc func(*batchInserterOptions) +) + +func getBatchInserterOptions(configs []BatchInserterConfigFunc) *batchInserterOptions { + options := &batchInserterOptions{} + for _, f := range configs { + f(options) + } + + return options +} + +func WithBatchInserterOnConflict(clause string) BatchInserterConfigFunc { + return func(o *batchInserterOptions) { + o.onConflictClause = fmt.Sprintf("ON CONFLICT %s", clause) + } +} + +func WithBatchInserterReturn(columns []string, scanner ScanFunc) BatchInserterConfigFunc { + return func(o *batchInserterOptions) { + o.returningClause = fmt.Sprintf("RETURNING %s", strings.Join(quoteColumnNames(columns), ", ")) + o.returningScanner = scanner + } +} diff --git a/batch_queries.go b/batch_queries.go new file mode 100644 index 0000000..4fc914d --- /dev/null +++ b/batch_queries.go @@ -0,0 +1,102 @@ +package pgutil + +import ( + "fmt" + "strings" + "sync" +) + +var ( + placeholders []string + placeholdersCache = map[int]string{} + placeholdersCacheMutex sync.Mutex +) + +func init() { + placeholders = make([]string, 0, maxNumPostgresParameters) + + for i := 0; i < maxNumPostgresParameters; i++ { + placeholders = append(placeholders, fmt.Sprintf("$%05d", i+1)) + } +} + +type batchQueryBuilder struct { + numColumns int + queryPrefix string + querySuffix string + placeholders string +} + +func newBatchQueryBuilder(tableName string, columnNames []string, onConflictClause, returningClause string) *batchQueryBuilder { + var ( + numColumns = len(columnNames) + queryPrefix = fmt.Sprintf("INSERT INTO %q (%s) VALUES", tableName, strings.Join(quoteColumnNames(columnNames), ", ")) + querySuffix = fmt.Sprintf("%s %s", onConflictClause, returningClause) + all = makeBatchPlaceholdersString(numColumns) + ) + + return &batchQueryBuilder{ + numColumns: numColumns, + queryPrefix: queryPrefix, + querySuffix: querySuffix, + placeholders: all, + } +} + +func (b *batchQueryBuilder) build(batchSize int) string { + return fmt.Sprintf("%s %s %s", b.queryPrefix, b.placeholders[:placeholdersLen(b.numColumns, batchSize)], b.querySuffix) +} + +func makeBatchPlaceholdersString(numColumns int) string { + placeholdersCacheMutex.Lock() + defer placeholdersCacheMutex.Unlock() + if placeholders, ok := placeholdersCache[numColumns]; ok { + return placeholders + } + + var sb strings.Builder + sb.WriteString("(") + sb.WriteString(placeholders[0]) + for i := 1; i < maxNumPostgresParameters; i++ { + if i%numColumns == 0 { + sb.WriteString("),(") + } else { + sb.WriteString(",") + } + + sb.WriteString(placeholders[i]) + } + sb.WriteString(")") + + placeholders := sb.String() + placeholdersCache[numColumns] = placeholders + return placeholders +} + +func placeholdersLen(numColumns, batchSize int) int { + var ( + numRows = batchSize / numColumns + placeholderLen = 6 // e.g., `$00123` + rowLen = sequenceLen(numColumns, placeholderLen) + 2 // e.g., `($00123,$001234,...)` + totalLen = sequenceLen(numRows, rowLen) + ) + + return totalLen +} + +func sequenceLen(num, len int) int { + return num*(len+1) - 1 +} + +func quoteColumnNames(names []string) []string { + quoted := make([]string, len(names)) + for i, name := range names { + quoted[i] = quoteColumnName(name) + } + + return quoted +} + +func quoteColumnName(name string) string { + return fmt.Sprintf("%q", name) +} diff --git a/batch_test.go b/batch_test.go new file mode 100644 index 0000000..e294bd7 --- /dev/null +++ b/batch_test.go @@ -0,0 +1,182 @@ +package pgutil + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBatchInserter(t *testing.T) { + var ( + db = NewTestDB(t) + numRows = 100000 + numPayloads = 100 + columns = []string{"w", "x", "y", "z", "q", "payload"} + ) + + setupTestBatchTable(t, db) + payloads := createBatchPayloads(t, numPayloads) + rowValues := createBatchRowValues(t, numRows, payloads) + + expectedValues := make([]any, 0, numRows) + for _, values := range rowValues { + expectedValues = append(expectedValues, values[0]) + } + + // Insert rows and assert values of column "w" + inserter := NewBatchInserter(db, "test", columns) + runBatchInserter(t, inserter, rowValues) + assertBatchInsertedValues(t, db, "w", expectedValues) +} + +func TestBatchInserterWithOnConflict(t *testing.T) { + t.Run("do nothing", func(t *testing.T) { + var ( + db = NewTestDB(t) + numRows = 100000 + numPayloads = 100 + columns = []string{"w", "x", "y", "z", "q", "payload"} + ) + + setupTestBatchTable(t, db) + payloads := createBatchPayloads(t, numPayloads) + rowValues := createBatchRowValues(t, numRows, payloads) + + expectedValues := make([]any, 0, numRows) + for _, values := range rowValues { + expectedValues = append(expectedValues, values[0]) + } + + // Insert duplicate rows and assert _unique_ values of column "w" + inserter := NewBatchInserter(db, "test", columns, WithBatchInserterOnConflict("DO NOTHING")) + runBatchInserter(t, inserter, append(rowValues, createBatchRowValues(t, numRows/4, payloads)...)) + assertBatchInsertedValues(t, db, "w", expectedValues) + }) + + t.Run("update", func(t *testing.T) { + var ( + db = NewTestDB(t) + numRows = 100000 + numPayloads = 100 + columns = []string{"w", "x", "y", "z", "q", "payload"} + ) + + setupTestBatchTable(t, db) + payloads := createBatchPayloads(t, numPayloads) + initialRowValues := createBatchRowValues(t, numRows/4, payloads) + rowValues := createBatchRowValues(t, numRows, payloads) + inserter := NewBatchInserter(db, "test", columns) + runBatchInserter(t, inserter, initialRowValues) + + expectedValues := make([]any, 0, numRows) + for i, values := range rowValues { + if i < len(initialRowValues) { + // updated + expectedValues = append(expectedValues, int64(0)) + } else { + // not updated + expectedValues = append(expectedValues, values[1]) + } + } + + // Insert duplicates for update and assert updated values fo column "x" + inserter = NewBatchInserter(db, "test", columns, WithBatchInserterOnConflict("(w) DO UPDATE SET x = 0, y = 0, z = 0, q = 0")) + runBatchInserter(t, inserter, rowValues) + assertBatchInsertedValues(t, db, "x", expectedValues) + }) +} + +func TestBatchInserterWithReturning(t *testing.T) { + var ( + db = NewTestDB(t) + numRows = 100000 + numPayloads = 100 + columns = []string{"w", "x", "y", "z", "q", "payload"} + collector = NewCollector(NewAnyValueScanner[int]()) + ) + + setupTestBatchTable(t, db) + payloads := createBatchPayloads(t, numPayloads) + rowValues := createBatchRowValues(t, numRows, payloads) + + expectedValues := make([]int, 0, numRows) + for i := range rowValues { + expectedValues = append(expectedValues, i+1) + } + + // Insert rows and assert scanned serial ids + inserter := NewBatchInserter(db, "test", columns, WithBatchInserterReturn([]string{"id"}, collector.Scanner())) + runBatchInserter(t, inserter, rowValues) + assert.Equal(t, expectedValues, collector.Slice()) +} + +// +// + +func setupTestBatchTable(t testing.TB, db DB) { + t.Helper() + ctx := context.Background() + + require.NoError(t, db.Exec(ctx, RawQuery(` + CREATE TABLE test ( + id SERIAL, + w integer NOT NULL UNIQUE, + x integer NOT NULL, + y integer NOT NULL, + z integer NOT NULL, + q integer NOT NULL, + payload text + ) + `))) +} + +func runBatchInserter(t testing.TB, inserter *BatchInserter, rowValues [][]any) { + t.Helper() + ctx := context.Background() + + for _, values := range rowValues { + require.NoError(t, inserter.Insert(ctx, values...)) + } + + require.NoError(t, inserter.Flush(ctx)) +} + +func assertBatchInsertedValues(t testing.TB, db DB, columnName string, expectedValues []any) { + t.Helper() + ctx := context.Background() + + values, err := ScanAnys(db.Query(ctx, Query("SELECT {:col} FROM test", Args{"col": Quote(columnName)}))) + require.NoError(t, err) + assert.Equal(t, expectedValues, values) +} + +func createBatchPayloads(t testing.TB, n int) []string { + payloads := make([]string, 0, n) + for i := 0; i < n; i++ { + payload, err := randomHexString(128) + require.NoError(t, err) + + payloads = append(payloads, fmt.Sprintf("payload-%s", payload)) + } + + return payloads +} + +func createBatchRowValues(t testing.TB, n int, payloads []string) [][]any { + values := make([][]any, 0, n) + for i := 0; i < n; i++ { + values = append(values, []any{ + int64(i*2 + 1), // w + int64(i*2 + 2), // z + int64(i*2 + 3), // y + int64(i*2 + 4), // z + int64(i*2 + 5), // q + payloads[i%len(payloads)], // payload + }) + } + + return values +} diff --git a/cmd/migrate/internal/commands/create.go b/cmd/migrate/internal/commands/create.go new file mode 100644 index 0000000..e2c4794 --- /dev/null +++ b/cmd/migrate/internal/commands/create.go @@ -0,0 +1,91 @@ +package commands + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func CreateCommand(logger log.Logger) *cobra.Command { + var ( + migrationsDirectory string + ) + + createCmd := &cobra.Command{ + Use: "create [flags] 'migration name'", + Short: "Create a new schema migration", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return create(migrationsDirectory, args[0]) + }, + } + + flags.RegisterMigrationsDirectoryFlag(createCmd, &migrationsDirectory) + return createCmd +} + +func create(migrationsDirectory string, name string) error { + if err := ensureMigrationDirectoryExists(migrationsDirectory); err != nil { + return err + } + + definitions, err := pgutil.ReadMigrations(pgutil.NewFilesystemMigrationReader(migrationsDirectory)) + if err != nil { + return err + } + + var lastID int + if len(definitions) > 0 { + lastID = definitions[len(definitions)-1].ID + } + + dirPath := filepath.Join(migrationsDirectory, fmt.Sprintf("%d_%s", lastID+1, canonicalize(name))) + upPath := filepath.Join(dirPath, "up.sql") + downPath := filepath.Join(dirPath, "down.sql") + + if err := os.MkdirAll(dirPath, os.ModePerm); err != nil { + return err + } + if err := os.WriteFile(upPath, nil, os.ModePerm); err != nil { + return err + } + if err := os.WriteFile(downPath, nil, os.ModePerm); err != nil { + return err + } + + return nil +} + +func ensureMigrationDirectoryExists(migrationDirectory string) error { + stat, err := os.Stat(migrationDirectory) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(migrationDirectory, os.ModePerm); err != nil { + return err + } + + return nil + } + + return err + } + + if !stat.IsDir() { + return fmt.Errorf("supplied migration directory is not a directory") + } + + return nil +} + +var nonNamePattern = regexp.MustCompile(`[^a-z0-9_]+`) + +func canonicalize(name string) string { + return strings.ToLower(nonNamePattern.ReplaceAllString(name, "_")) +} diff --git a/cmd/migrate/internal/commands/describe.go b/cmd/migrate/internal/commands/describe.go new file mode 100644 index 0000000..7d97120 --- /dev/null +++ b/cmd/migrate/internal/commands/describe.go @@ -0,0 +1,50 @@ +package commands + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/database" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func DescribeCommand(logger log.Logger) *cobra.Command { + var ( + databaseURL string + ) + + describeCmd := &cobra.Command{ + Use: "describe", + Short: "Describe the current database schema", + RunE: func(cmd *cobra.Command, args []string) error { + return describe(databaseURL, logger) + }, + } + + flags.RegisterDatabaseURLFlag(describeCmd, &databaseURL) + return describeCmd +} + +func describe(databaseURL string, logger log.Logger) error { + db, err := database.Dial(databaseURL, logger) + if err != nil { + return err + } + + description, err := pgutil.DescribeSchema(context.Background(), db) + if err != nil { + return err + } + + serialized, err := json.Marshal(description) + if err != nil { + return err + } + + fmt.Printf("%s\n", serialized) + return nil +} diff --git a/cmd/migrate/internal/commands/drift.go b/cmd/migrate/internal/commands/drift.go new file mode 100644 index 0000000..ee2082d --- /dev/null +++ b/cmd/migrate/internal/commands/drift.go @@ -0,0 +1,67 @@ +package commands + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/database" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func DriftCommand(logger log.Logger) *cobra.Command { + var ( + databaseURL string + ) + + driftCmd := &cobra.Command{ + Use: "drift 'description.json'", + Short: "Compare the current database schema against the expected schema", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return drift(databaseURL, logger, args[0]) + }, + } + + flags.RegisterDatabaseURLFlag(driftCmd, &databaseURL) + return driftCmd +} + +func drift(databaseURL string, logger log.Logger, filename string) error { + db, err := database.Dial(databaseURL, logger) + if err != nil { + return err + } + + description, err := pgutil.DescribeSchema(context.Background(), db) + if err != nil { + return err + } + + b, err := os.ReadFile(filename) + if err != nil { + return err + } + + var expected pgutil.SchemaDescription + if err := json.Unmarshal(b, &expected); err != nil { + return err + } + + statements := pgutil.Compare(expected, description) + + if len(statements) == 0 { + fmt.Printf("No drift detected\n") + return nil + } + + for _, d := range statements { + fmt.Printf("%s\n\n", d) + } + + return nil +} diff --git a/cmd/migrate/internal/commands/force_write.go b/cmd/migrate/internal/commands/force_write.go new file mode 100644 index 0000000..3a890d0 --- /dev/null +++ b/cmd/migrate/internal/commands/force_write.go @@ -0,0 +1,46 @@ +package commands + +import ( + "context" + "fmt" + "strconv" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/database" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func WriteMigrationLogCommand(logger log.Logger) *cobra.Command { + var ( + databaseURL string + migrationsDirectory string + ) + + writeMigrationLogCmd := &cobra.Command{ + Use: "write-migration-log ", + Short: "Write a successful migration log entry without running a migration", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + migrationID, err := strconv.Atoi(args[0]) + if err != nil { + return fmt.Errorf("invalid migration ID: %v", err) + } + + return writeMigrationLog(databaseURL, migrationsDirectory, logger, migrationID) + }, + } + + flags.RegisterDatabaseURLFlag(writeMigrationLogCmd, &databaseURL) + flags.RegisterMigrationsDirectoryFlag(writeMigrationLogCmd, &migrationsDirectory) + return writeMigrationLogCmd +} + +func writeMigrationLog(databaseURL string, migrationsDirectory string, logger log.Logger, migrationID int) error { + runner, err := database.CreateRunner(databaseURL, migrationsDirectory, logger) + if err != nil { + return err + } + + return runner.WriteMigrationLog(context.Background(), migrationID) +} diff --git a/cmd/migrate/internal/commands/state.go b/cmd/migrate/internal/commands/state.go new file mode 100644 index 0000000..f4570c6 --- /dev/null +++ b/cmd/migrate/internal/commands/state.go @@ -0,0 +1,131 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + "github.com/fatih/color" + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/database" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func StatCommand(logger log.Logger) *cobra.Command { + var ( + databaseURL string + migrationsDirectory string + ) + + stateCmd := &cobra.Command{ + Use: "state", + Short: "Display the current state of the database schema", + RunE: func(cmd *cobra.Command, args []string) error { + return state(databaseURL, migrationsDirectory, logger) + }, + } + + flags.RegisterDatabaseURLFlag(stateCmd, &databaseURL) + flags.RegisterMigrationsDirectoryFlag(stateCmd, &migrationsDirectory) + flags.RegisterNoColorFlag(stateCmd) + return stateCmd +} + +func state(databaseURL, migrationsDirectory string, logger log.Logger) error { + runner, err := database.CreateRunner(databaseURL, migrationsDirectory, logger) + if err != nil { + return err + } + + definitions := runner.Definitions() + + logs, err := runner.MigrationLogs(context.Background()) + if err != nil { + return err + } + logMap := map[int]pgutil.MigrationLog{} + for _, log := range logs { + logMap[log.MigrationID] = log + } + + maxDefinitionLen := 0 + for _, definition := range definitions { + if len(definition.Name) > maxDefinitionLen { + maxDefinitionLen = len(definition.Name) + } + } + + type migrationError struct { + definition pgutil.Definition + errorMessage string + } + errorMessages := []migrationError{} + + for _, definition := range definitions { + log, exists := logMap[definition.ID] + color, statusEmoji, statusText := definitionStatus(log, exists) + + color.Printf( + "%s %04d: %s\t\t%s\n", + statusEmoji, + definition.ID, + definition.Name+strings.Repeat(" ", maxDefinitionLen-len(definition.Name)), + statusText, + ) + + if exists && log.ErrorMessage != nil { + errorMessages = append(errorMessages, migrationError{ + definition: definition, + errorMessage: *log.ErrorMessage, + }) + } + } + + if len(errorMessages) > 0 { + fmt.Println("\nErrors:") + + for _, message := range errorMessages { + fmt.Printf(" %04d: %s\n", message.definition.ID, message.errorMessage) + } + } + + return nil +} + +const ( + emojiApplied = "✅" + emojiError = "❌" + emojiUnknown = "❓" + emojiReverse = "↩️" + emojiNotApplied = " " +) + +func definitionStatus(log pgutil.MigrationLog, exists bool) (_ *color.Color, statusEmoji string, statusText string) { + if !exists { + return color.New(color.FgCyan), emojiNotApplied, "Not applied" + } + + if log.Success != nil { + if *log.Success { + if !log.Reverse { + return color.New(color.FgGreen), emojiApplied, "Successfully applied" + } else { + return color.New(color.FgYellow), emojiReverse, "Successfully un-apply" + } + } else { + if !log.Reverse { + return color.New(color.FgRed), emojiError, "Failed most recent apply" + } else { + return color.New(color.FgRed), emojiError, "Failed most recent un-apply" + } + } + } + + if !log.Reverse { + return color.New(color.FgMagenta), emojiUnknown, "Attempted apply (unknown status)" + } else { + return color.New(color.FgMagenta), emojiUnknown, "Attempted un-apply (unknown status)" + } +} diff --git a/cmd/migrate/internal/commands/undo.go b/cmd/migrate/internal/commands/undo.go new file mode 100644 index 0000000..f6b30bb --- /dev/null +++ b/cmd/migrate/internal/commands/undo.go @@ -0,0 +1,46 @@ +package commands + +import ( + "context" + "fmt" + "strconv" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/database" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func UndoCommand(logger log.Logger) *cobra.Command { + var ( + databaseURL string + migrationsDirectory string + ) + + undoCmd := &cobra.Command{ + Use: "undo ", + Short: "Undo migrations up to and including the specified migration ID", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + migrationID, err := strconv.Atoi(args[0]) + if err != nil { + return fmt.Errorf("invalid migration ID: %v", err) + } + + return undo(databaseURL, migrationsDirectory, logger, migrationID) + }, + } + + flags.RegisterDatabaseURLFlag(undoCmd, &databaseURL) + flags.RegisterMigrationsDirectoryFlag(undoCmd, &migrationsDirectory) + return undoCmd +} + +func undo(databaseURL string, migrationsDirectory string, logger log.Logger, migrationID int) error { + runner, err := database.CreateRunner(databaseURL, migrationsDirectory, logger) + if err != nil { + return err + } + + return runner.Undo(context.Background(), migrationID) +} diff --git a/cmd/migrate/internal/commands/up.go b/cmd/migrate/internal/commands/up.go new file mode 100644 index 0000000..05434ae --- /dev/null +++ b/cmd/migrate/internal/commands/up.go @@ -0,0 +1,54 @@ +package commands + +import ( + "context" + "fmt" + "strconv" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/database" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/flags" + "github.com/spf13/cobra" +) + +func UpCommand(logger log.Logger) *cobra.Command { + var ( + databaseURL string + migrationsDirectory string + ) + + upCmd := &cobra.Command{ + Use: "up [migration_id]", + Short: "Run migrations up to and including the specified migration ID", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var migrationID *int + if len(args) != 0 { + val, err := strconv.Atoi(args[0]) + if err != nil { + return fmt.Errorf("invalid migration ID: %v", err) + } + migrationID = &val + } + + return up(databaseURL, migrationsDirectory, logger, migrationID) + }, + } + + flags.RegisterDatabaseURLFlag(upCmd, &databaseURL) + flags.RegisterMigrationsDirectoryFlag(upCmd, &migrationsDirectory) + return upCmd +} + +func up(databaseURL, migrationsDirectory string, logger log.Logger, migrationID *int) error { + runner, err := database.CreateRunner(databaseURL, migrationsDirectory, logger) + if err != nil { + return err + } + + if migrationID == nil { + return runner.ApplyAll(context.Background()) + } + + return runner.Apply(context.Background(), *migrationID) +} diff --git a/cmd/migrate/internal/database/dial.go b/cmd/migrate/internal/database/dial.go new file mode 100644 index 0000000..e5f6e98 --- /dev/null +++ b/cmd/migrate/internal/database/dial.go @@ -0,0 +1,10 @@ +package database + +import ( + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil" +) + +func Dial(databaseURL string, logger log.Logger) (pgutil.DB, error) { + return pgutil.Dial(databaseURL, logger) +} diff --git a/cmd/migrate/internal/database/runner.go b/cmd/migrate/internal/database/runner.go new file mode 100644 index 0000000..0639d23 --- /dev/null +++ b/cmd/migrate/internal/database/runner.go @@ -0,0 +1,25 @@ +package database + +import ( + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/pgutil" +) + +func CreateRunner(databaseURL string, migrationDirectory string, logger log.Logger) (*pgutil.Runner, error) { + if migrationDirectory == "" { + panic("migration directory is not set by called command") + } + + db, err := Dial(databaseURL, logger) + if err != nil { + return nil, err + } + + reader := pgutil.NewFilesystemMigrationReader(migrationDirectory) + runner, err := pgutil.NewMigrationRunner(db, reader, logger) + if err != nil { + return nil, err + } + + return runner, nil +} diff --git a/cmd/migrate/internal/flags/database_url.go b/cmd/migrate/internal/flags/database_url.go new file mode 100644 index 0000000..bae21d7 --- /dev/null +++ b/cmd/migrate/internal/flags/database_url.go @@ -0,0 +1,48 @@ +package flags + +import ( + "fmt" + "net/url" + + "github.com/go-nacelle/pgutil" + "github.com/spf13/cobra" +) + +func RegisterDatabaseURLFlag(cmd *cobra.Command, databaseURL *string) { + defaultURL := pgutil.BuildDatabaseURL() + + masked, err := maskDatabasePassword(defaultURL) + if err != nil { + panic(err) + } + + cmd.PersistentFlags().StringVarP( + databaseURL, + "url", "u", + "", + fmt.Sprintf("The database connection URL (default %s)", masked), + ) + + registerPreRun(cmd, func(cmd *cobra.Command, args []string) error { + if *databaseURL == "" { + *databaseURL = defaultURL + } + + return nil + }) +} + +func maskDatabasePassword(databaseURL string) (string, error) { + parsedURL, err := url.Parse(databaseURL) + if err != nil { + return "", fmt.Errorf("failed to parse database URL: %w", err) + } + + if parsedURL.User != nil { + if _, ok := parsedURL.User.Password(); ok { + parsedURL.User = url.UserPassword(parsedURL.User.Username(), "xxxxx") + } + } + + return parsedURL.String(), nil +} diff --git a/cmd/migrate/internal/flags/migrations_dir.go b/cmd/migrate/internal/flags/migrations_dir.go new file mode 100644 index 0000000..632c952 --- /dev/null +++ b/cmd/migrate/internal/flags/migrations_dir.go @@ -0,0 +1,12 @@ +package flags + +import "github.com/spf13/cobra" + +func RegisterMigrationsDirectoryFlag(cmd *cobra.Command, migrationDirectory *string) { + cmd.PersistentFlags().StringVarP( + migrationDirectory, + "dir", "d", + "migrations", + "The directory where schema migrations are defined", + ) +} diff --git a/cmd/migrate/internal/flags/no_color.go b/cmd/migrate/internal/flags/no_color.go new file mode 100644 index 0000000..419a4bc --- /dev/null +++ b/cmd/migrate/internal/flags/no_color.go @@ -0,0 +1,26 @@ +package flags + +import ( + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +func RegisterNoColorFlag(cmd *cobra.Command) { + var noColor bool + + cmd.PersistentFlags().BoolVarP( + &noColor, + "no-color", + "", + false, + "Disable color output", + ) + + registerPreRun(cmd, func(cmd *cobra.Command, args []string) error { + if noColor { + color.NoColor = true + } + + return nil + }) +} diff --git a/cmd/migrate/internal/flags/util.go b/cmd/migrate/internal/flags/util.go new file mode 100644 index 0000000..4ac5b6a --- /dev/null +++ b/cmd/migrate/internal/flags/util.go @@ -0,0 +1,17 @@ +package flags + +import "github.com/spf13/cobra" + +func registerPreRun(cmd *cobra.Command, f func(cmd *cobra.Command, args []string) error) { + previous := cmd.PersistentPreRunE + + cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { + if previous != nil { + if err := previous(cmd, args); err != nil { + return err + } + } + + return f(cmd, args) + } +} diff --git a/cmd/migrate/internal/logging/logger.go b/cmd/migrate/internal/logging/logger.go new file mode 100644 index 0000000..c08b990 --- /dev/null +++ b/cmd/migrate/internal/logging/logger.go @@ -0,0 +1,20 @@ +package logging + +import ( + "github.com/go-nacelle/config/v3" + "github.com/go-nacelle/log/v2" +) + +func CreateLogger() (log.Logger, error) { + cfg := config.NewConfig(config.NewEnvSourcer("PGUTIL")) + if err := cfg.Init(); err != nil { + return nil, err + } + + c := &log.Config{} + if err := cfg.Load(c); err != nil { + return nil, err + } + + return log.InitLogger(c) +} diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go new file mode 100644 index 0000000..526fdb9 --- /dev/null +++ b/cmd/migrate/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "os" + + "github.com/go-nacelle/pgutil/cmd/migrate/internal/commands" + "github.com/go-nacelle/pgutil/cmd/migrate/internal/logging" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: "migrate", + Short: "Manage and execute Postgres schema migrations", +} + +func init() { + logger, err := logging.CreateLogger() + if err != nil { + panic(err) + } + + rootCmd.AddCommand(commands.CreateCommand(logger)) + rootCmd.AddCommand(commands.UpCommand(logger)) + rootCmd.AddCommand(commands.UndoCommand(logger)) + rootCmd.AddCommand(commands.StatCommand(logger)) + rootCmd.AddCommand(commands.WriteMigrationLogCommand(logger)) + rootCmd.AddCommand(commands.DescribeCommand(logger)) + rootCmd.AddCommand(commands.DriftCommand(logger)) +} + +func main() { + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/config.go b/config.go index 299162d..aed34b7 100644 --- a/config.go +++ b/config.go @@ -1,9 +1,6 @@ package pgutil type Config struct { - DatabaseURL string `env:"database_url" required:"true"` - LogSQLQueries bool `env:"log_sql_queries" default:"false"` - MigrationsTable string `env:"migrations_table"` - MigrationsSchemaName string `env:"migrations_schema_name"` - FailOnNewerMigrationVersion bool `env:"fail_on_newer_migration_version"` + DatabaseURL string `env:"database_url" required:"true"` + LogSQLQueries bool `env:"log_sql_queries" default:"false"` } diff --git a/db.go b/db.go index 628be78..8b38d1c 100644 --- a/db.go +++ b/db.go @@ -1,118 +1,62 @@ package pgutil import ( + "context" "database/sql" + "errors" "fmt" "time" - "github.com/go-nacelle/nacelle" - "github.com/jmoiron/sqlx" + "github.com/go-nacelle/nacelle/v2" ) -type ( - LoggingDB struct { - *sqlx.DB - logger nacelle.Logger - } - - LoggingTx struct { - *sqlx.Tx - logger nacelle.Logger - } -) - -const MaxPingAttempts = 15 - -func Dial(url string, logger nacelle.Logger) (*LoggingDB, error) { - db, err := sqlx.Open("postgres", url) - if err != nil { - return nil, fmt.Errorf("failed to connect to database (%s)", err) - } +type DB interface { + Query(ctx context.Context, query Q) (*sql.Rows, error) + Exec(ctx context.Context, query Q) error + WithTransaction(ctx context.Context, f func(tx DB) error) error - for attempts := 0; ; attempts++ { - err := db.Ping() - if err == nil { - break - } - - if attempts >= MaxPingAttempts { - return nil, fmt.Errorf("failed to ping database within timeout") - } - - logger.Error("Failed to ping database, will retry in 2s (%s)", err.Error()) - <-time.After(time.Second * 2) - } - - return &LoggingDB{db, logger}, nil + IsInTransaction() bool + Transact(ctx context.Context) (DB, error) + Done(err error) error } -func (db *LoggingDB) Beginx() (*LoggingTx, error) { - tx, err := db.DB.Beginx() - return &LoggingTx{tx, db.logger}, err -} - -func (db *LoggingDB) Query(query string, args ...interface{}) (*sql.Rows, error) { - start := time.Now() - rows, err := db.DB.Query(query, args...) - logQuery(db.logger, query, time.Since(start), args...) - return rows, err +type loggingDB struct { + *queryWrapper + db *sql.DB } -func (db *LoggingDB) Queryx(query string, args ...interface{}) (*sqlx.Rows, error) { - start := time.Now() - rows, err := db.DB.Queryx(query, args...) - logQuery(db.logger, query, time.Since(start), args...) - return rows, err -} - -func (db *LoggingDB) QueryRowx(query string, args ...interface{}) *sqlx.Row { - start := time.Now() - row := db.DB.QueryRowx(query, args...) - logQuery(db.logger, query, time.Since(start), args...) - return row +func newLoggingDB(db *sql.DB, logger nacelle.Logger) *loggingDB { + return &loggingDB{ + queryWrapper: newDBWrapper(db, logger), + db: db, + } } -func (db *LoggingDB) Exec(query string, args ...interface{}) (sql.Result, error) { - start := time.Now() - res, err := db.DB.Exec(query, args...) - logQuery(db.logger, query, time.Since(start), args...) - return res, err +func (db *loggingDB) WithTransaction(ctx context.Context, f func(tx DB) error) error { + return withTransaction(ctx, db, f) } -func (tx *LoggingTx) Query(query string, args ...interface{}) (*sql.Rows, error) { - start := time.Now() - rows, err := tx.Tx.Query(query, args...) - logQuery(tx.logger, query, time.Since(start), args...) - return rows, err +func (db *loggingDB) IsInTransaction() bool { + return false } -func (tx *LoggingTx) Queryx(query string, args ...interface{}) (*sqlx.Rows, error) { +func (db *loggingDB) Transact(ctx context.Context) (DB, error) { start := time.Now() - rows, err := tx.Tx.Queryx(query, args...) - logQuery(tx.logger, query, time.Since(start), args...) - return rows, err -} -func (tx *LoggingTx) QueryRowx(query string, args ...interface{}) *sqlx.Row { - start := time.Now() - row := tx.Tx.QueryRowx(query, args...) - logQuery(tx.logger, query, time.Since(start), args...) - return row -} + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } -func (tx *LoggingTx) Exec(query string, args ...interface{}) (sql.Result, error) { - start := time.Now() - res, err := tx.Tx.Exec(query, args...) - logQuery(tx.logger, query, time.Since(start), args...) - return res, err + return &loggingTx{ + queryWrapper: newTxWrapper(tx, db.logger), + tx: tx, + start: start, + }, nil } -func logQuery(logger nacelle.Logger, query string, duration time.Duration, args ...interface{}) { - fields := nacelle.LogFields{ - "query": query, - "args": args, - "duration": duration, - } +var ErrNotInTransaction = fmt.Errorf("not in a transaction") - logger.DebugWithFields(fields, "sql query executed") +func (db *loggingDB) Done(err error) error { + return errors.Join(err, ErrNotInTransaction) } diff --git a/db_transaction.go b/db_transaction.go new file mode 100644 index 0000000..ce570c4 --- /dev/null +++ b/db_transaction.go @@ -0,0 +1,120 @@ +package pgutil + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/go-nacelle/nacelle/v2" +) + +type loggingTx struct { + *queryWrapper + tx *sql.Tx + start time.Time +} + +func (tx *loggingTx) WithTransaction(ctx context.Context, f func(tx DB) error) error { + return withTransaction(ctx, tx, f) +} + +func (tx *loggingTx) IsInTransaction() bool { + return true +} + +func (tx *loggingTx) Transact(ctx context.Context) (DB, error) { + return createSavepoint(ctx, tx) +} + +func (tx *loggingTx) Done(err error) (combinedErr error) { + defer func() { logDone(tx.logger, time.Since(tx.start), combinedErr) }() + + if err != nil { + rollbackErr := tx.tx.Rollback() + return errors.Join(err, rollbackErr) + } + + return tx.tx.Commit() +} + +type loggingSavepoint struct { + *loggingTx + savepointID string + start time.Time +} + +func createSavepoint(ctx context.Context, tx *loggingTx) (*loggingSavepoint, error) { + start := time.Now() + + id, err := randomHexString(16) + if err != nil { + return nil, err + } + savepointID := fmt.Sprintf("sp_%s", id) + + // NOTE: Must interpolate identifier here as placeholders aren't valid in this position. + if err := tx.Exec(ctx, queryf("SAVEPOINT %s", savepointID)); err != nil { + return nil, err + } + + return &loggingSavepoint{ + loggingTx: tx, + savepointID: savepointID, + start: start, + }, nil +} + +func (tx *loggingSavepoint) WithTransaction(ctx context.Context, f func(tx DB) error) error { + return withTransaction(ctx, tx, f) +} + +func (tx *loggingSavepoint) IsInTransaction() bool { + return true +} + +func (tx *loggingSavepoint) Transact(ctx context.Context) (DB, error) { + return createSavepoint(ctx, tx.loggingTx) +} + +func (tx *loggingSavepoint) Done(err error) (combinedErr error) { + defer func() { logDone(tx.logger, time.Since(tx.start), combinedErr) }() + + if err != nil { + // NOTE: Must interpolate identifier here as placeholders aren't valid in this position. + return errors.Join(err, tx.Exec(context.Background(), queryf("ROLLBACK TO %s", tx.savepointID))) + } + + // NOTE: Must interpolate identifier here as placeholders aren't valid in this position. + return tx.Exec(context.Background(), queryf("RELEASE %s", tx.savepointID)) +} + +var ErrPanicDuringTransaction = fmt.Errorf("encountered panic during transaction") + +func withTransaction(ctx context.Context, db DB, f func(tx DB) error) (err error) { + tx, err := db.Transact(ctx) + if err != nil { + return err + } + + defer func() { + if r := recover(); r != nil { + err = tx.Done(ErrPanicDuringTransaction) + panic(r) + } + + err = tx.Done(err) + }() + + return f(tx) +} + +func logDone(logger nacelle.Logger, duration time.Duration, err error) { + fields := nacelle.LogFields{ + "err": err, + "duration": duration, + } + + logger.DebugWithFields(fields, "transaction closed") +} diff --git a/db_transaction_test.go b/db_transaction_test.go new file mode 100644 index 0000000..f51593c --- /dev/null +++ b/db_transaction_test.go @@ -0,0 +1,203 @@ +package pgutil + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/go-nacelle/log/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestTransaction(t *testing.T) { + db := NewTestDB(t) + setupTestTransactionTable(t, db) + + // Add record outside of transaction, ensure it's visible + err := db.Exec(context.Background(), RawQuery(`INSERT INTO test (x, y) VALUES (1, 42)`)) + require.NoError(t, err) + assert.Equal(t, map[int]int{1: 42}, testTableContents(t, db)) + + // Add record inside of a transaction + tx1, err := db.Transact(context.Background()) + require.NoError(t, err) + err = tx1.Exec(context.Background(), RawQuery(`INSERT INTO test (x, y) VALUES (2, 43)`)) + require.NoError(t, err) + + // Add record inside of another transaction + tx2, err := db.Transact(context.Background()) + require.NoError(t, err) + err = tx2.Exec(context.Background(), RawQuery(`INSERT INTO test (x, y) VALUES (3, 44)`)) + require.NoError(t, err) + + // Check what's visible pre-commit/rollback + assert.Equal(t, map[int]int{1: 42}, testTableContents(t, db)) + assert.Equal(t, map[int]int{1: 42, 2: 43}, testTableContents(t, tx1)) + assert.Equal(t, map[int]int{1: 42, 3: 44}, testTableContents(t, tx2)) + + // Finalize transactions + rollbackErr := errors.New("rollback") + err = tx1.Done(rollbackErr) + require.ErrorIs(t, err, rollbackErr) + err = tx2.Done(nil) + require.NoError(t, err) + + // Check what's visible post-commit/rollback + assert.Equal(t, map[int]int{1: 42, 3: 44}, testTableContents(t, db)) +} + +func TestConcurrentTransactions(t *testing.T) { + t.Run("creating transactions concurrently does not fail", func(t *testing.T) { + shim := &captureShim{} + db := NewTestDBWithLogger(t, log.FromMinimalLogger(shim)) + setupTestTransactionTable(t, db) + + var g errgroup.Group + for i := 0; i < 10; i++ { + routine := i + + g.Go(func() (err error) { + tx, err := db.Transact(context.Background()) + if err != nil { + return err + } + defer func() { err = tx.Done(err) }() + + if err := tx.Exec(context.Background(), RawQuery(`SELECT pg_sleep(0.1)`)); err != nil { + return err + } + + return tx.Exec(context.Background(), Query( + `INSERT INTO test (x, y) VALUES ({:routine}, {:routine})`, + Args{"routine": routine}, + )) + }) + } + + require.NoError(t, g.Wait()) + assert.NotContains(t, strings.Join(shim.logs, "\n"), "transaction used concurrently") + }) + + t.Run("parallel insertion on a single transaction does not fail but logs an error", func(t *testing.T) { + shim := &captureShim{} + db := NewTestDBWithLogger(t, log.FromMinimalLogger(shim)) + setupTestTransactionTable(t, db) + + tx, err := db.Transact(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { + if err := tx.Done(err); err != nil { + require.NoError(t, err) + } + }) + + var g errgroup.Group + for i := 0; i < 10; i++ { + routine := i + g.Go(func() (err error) { + if err := tx.Exec(context.Background(), RawQuery(`SELECT pg_sleep(0.1);`)); err != nil { + return err + } + + return tx.Exec(context.Background(), Query( + `INSERT INTO test (x, y) VALUES ({:routine}, {:routine})`, + Args{"routine": routine}, + )) + }) + } + + require.NoError(t, g.Wait()) + assert.Contains(t, strings.Join(shim.logs, "\n"), "transaction used concurrently") + }) +} + +const numSavepointTests = 10 + +func TestSavepoints(t *testing.T) { + for i := 0; i < numSavepointTests; i++ { + t.Run(fmt.Sprintf("i=%d", i), func(t *testing.T) { + db := NewTestDB(t) + setupTestTransactionTable(t, db) + + // Make `n` nested transactions where the `i`th transaction is rolled back. + // Test that all of the actions in any savepoint after this index is also rolled back. + recurSavepoints(t, db, numSavepointTests, i) + + expected := map[int]int{} + for j := numSavepointTests; j > i; j-- { + expected[j] = j * 2 + } + assert.Equal(t, expected, testTableContents(t, db)) + }) + } +} + +func recurSavepoints(t *testing.T, db DB, index, rollbackAt int) { + if index == 0 { + return + } + + tx, err := db.Transact(context.Background()) + require.NoError(t, err) + defer func() { + var doneErr error + if index == rollbackAt { + doneErr = errors.New("rollback") + } + + err := tx.Done(doneErr) + require.ErrorIs(t, err, doneErr) + }() + + require.NoError(t, tx.Exec(context.Background(), Query( + `INSERT INTO test (x, y) VALUES ({:index}, {:index} * 2)`, + Args{"index": index}, + ))) + + recurSavepoints(t, tx, index-1, rollbackAt) +} + +func setupTestTransactionTable(t *testing.T, db DB) { + require.NoError(t, db.Exec(context.Background(), RawQuery(` + CREATE TABLE test ( + id SERIAL PRIMARY KEY, + x INTEGER NOT NULL, + y INTEGER NOT NULL + ); + `))) +} + +func testTableContents(t *testing.T, db DB) map[int]int { + pairs, err := scanTestPairs(db.Query(context.Background(), RawQuery(`SELECT x, y FROM test`))) + require.NoError(t, err) + + pairsMap := make(map[int]int) + for _, p := range pairs { + pairsMap[p.x] = p.y + } + + return pairsMap +} + +// +// + +type captureShim struct { + logs []string +} + +func (n *captureShim) WithFields(log.LogFields) log.MinimalLogger { + return n +} + +func (n *captureShim) LogWithFields(level log.LogLevel, fields log.LogFields, format string, args ...interface{}) { + n.logs = append(n.logs, fmt.Sprintf(format, args...)) +} + +func (n *captureShim) Sync() error { + return nil +} diff --git a/db_wrapper.go b/db_wrapper.go new file mode 100644 index 0000000..22621f2 --- /dev/null +++ b/db_wrapper.go @@ -0,0 +1,97 @@ +package pgutil + +import ( + "context" + "database/sql" + "sync" + "time" + + "github.com/go-nacelle/nacelle/v2" +) + +type queryWrapper struct { + db sqlDB + mu *sync.Mutex + logger nacelle.Logger +} + +type sqlDB interface { + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) +} + +func newDBWrapper(db *sql.DB, logger nacelle.Logger) *queryWrapper { + return &queryWrapper{ + db: db, + logger: logger, + } +} + +func newTxWrapper(tx *sql.Tx, logger nacelle.Logger) *queryWrapper { + return &queryWrapper{ + db: tx, + mu: new(sync.Mutex), + logger: logger, + } +} + +func (db *queryWrapper) Query(ctx context.Context, q Q) (*sql.Rows, error) { + start := time.Now() + db.lock() + defer db.unlock() + + query, args := q.Format() + rows, err := db.db.QueryContext(ctx, query, args...) + logQuery(db.logger, time.Since(start), err, query, args) + return rows, err +} + +func (db *queryWrapper) Exec(ctx context.Context, q Q) error { + start := time.Now() + db.lock() + defer db.unlock() + + query, args := q.Format() + _, err := db.db.ExecContext(ctx, query, args...) + logQuery(db.logger, time.Since(start), err, query, args) + return err +} + +func (db *queryWrapper) lock() { + if db.mu == nil { + return + } + + if !db.mu.TryLock() { + start := time.Now() + db.mu.Lock() + logLockWait(db.logger, time.Since(start)) + } +} + +func (db *queryWrapper) unlock() { + if db.mu == nil { + return + } + + db.mu.Unlock() +} + +func logQuery(logger nacelle.Logger, duration time.Duration, err error, query string, args []any) { + fields := nacelle.LogFields{ + "query": query, + "args": args, + "err": err, + "duration": duration, + } + + logger.DebugWithFields(fields, "sql query executed") +} + +func logLockWait(logger nacelle.Logger, duration time.Duration) { + fields := nacelle.LogFields{ + "duration": duration, + } + + logger.WarningWithFields(fields, "transaction used concurrently") +} diff --git a/describe.go b/describe.go new file mode 100644 index 0000000..8fb5bf0 --- /dev/null +++ b/describe.go @@ -0,0 +1,76 @@ +package pgutil + +import ( + "context" +) + +type SchemaDescription struct { + Extensions []ExtensionDescription + Enums []EnumDescription + Functions []FunctionDescription + Tables []TableDescription + Sequences []SequenceDescription + Views []ViewDescription + Triggers []TriggerDescription + EnumDependencies []EnumDependency + ColumnDependencies []ColumnDependency +} + +func DescribeSchema(ctx context.Context, db DB) (SchemaDescription, error) { + extensions, err := DescribeExtensions(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + enums, err := DescribeEnums(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + functions, err := DescribeFunctions(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + tables, err := DescribeTables(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + sequences, err := DescribeSequences(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + views, err := DescribeViews(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + triggers, err := DescribeTriggers(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + enumDependencies, err := DescribeEnumDependencies(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + columnDependencies, err := DescribeColumnDependencies(ctx, db) + if err != nil { + return SchemaDescription{}, err + } + + return SchemaDescription{ + Extensions: extensions, + Enums: enums, + Functions: functions, + Tables: tables, + Sequences: sequences, + Views: views, + Triggers: triggers, + EnumDependencies: enumDependencies, + ColumnDependencies: columnDependencies, + }, nil +} diff --git a/describe_columns.go b/describe_columns.go new file mode 100644 index 0000000..a174510 --- /dev/null +++ b/describe_columns.go @@ -0,0 +1,134 @@ +package pgutil + +import ( + "context" + "fmt" +) + +type ColumnDescription struct { + Name string + Type string + IsNullable bool + Default string + CharacterMaximumLength int + IsIdentity bool + IdentityGeneration string + IsGenerated bool + GenerationExpression string +} + +func (d ColumnDescription) Equals(other ColumnDescription) bool { + return true && + d.Name == other.Name && + d.Type == other.Type && + d.IsNullable == other.IsNullable && + d.Default == other.Default && + d.CharacterMaximumLength == other.CharacterMaximumLength && + d.IsIdentity == other.IsIdentity && + d.IdentityGeneration == other.IdentityGeneration && + d.IsGenerated == other.IsGenerated && + d.GenerationExpression == other.GenerationExpression +} + +type column struct { + Namespace string + TableName string + Name string + Type string + IsNullable bool + Default *string + CharacterMaximumLength *int + IsIdentity bool + IdentityGeneration *string + IsGenerated bool + GenerationExpression *string +} + +var scanColumns = NewSliceScanner(func(s Scanner) (c column, _ error) { + var ( + isNullable string + isIdentity string + isGenerated string + ) + + err := s.Scan( + &c.Namespace, + &c.TableName, + &c.Name, + &c.Type, + &isNullable, + &c.Default, + &c.CharacterMaximumLength, + &isIdentity, + &c.IdentityGeneration, + &isGenerated, + &c.GenerationExpression, + ) + + c.IsNullable = truthy(isNullable) + c.IsIdentity = truthy(isIdentity) + c.IsGenerated = truthy(isGenerated) + return c, err +}) + +func describeColumns(ctx context.Context, db DB) (map[string][]ColumnDescription, error) { + columns, err := scanColumns(db.Query(ctx, RawQuery(` + SELECT + c.table_schema AS namespace, + c.table_name AS name, + c.column_name AS column_name, + CASE + WHEN c.data_type = 'ARRAY' THEN COALESCE(( + SELECT e.data_type + FROM information_schema.element_types e + WHERE + e.object_type = 'TABLE' AND + e.object_catalog = c.table_catalog AND + e.object_schema = c.table_schema AND + e.object_name = c.table_name AND + e.collection_type_identifier = c.dtd_identifier + )) || '[]' + WHEN c.data_type = 'USER-DEFINED' THEN c.udt_name + WHEN c.character_maximum_length != 0 THEN c.data_type || '(' || c.character_maximum_length::text || ')' + ELSE c.data_type + END AS type, + c.is_nullable AS is_nullable, + c.column_default AS default, + c.character_maximum_length AS character_maximum_length, + c.is_identity AS is_identity, + c.identity_generation AS identity_generation, + c.is_generated AS is_generated, + c.generation_expression AS generation_expression + FROM information_schema.columns c + JOIN information_schema.tables t ON + t.table_schema = c.table_schema AND + t.table_name = c.table_name + WHERE + t.table_type = 'BASE TABLE' AND + t.table_schema NOT LIKE 'pg_%' AND + t.table_schema != 'information_schema' + ORDER BY c.table_schema, c.table_name, c.column_name + `))) + if err != nil { + return nil, err + } + + columnMap := map[string][]ColumnDescription{} + for _, column := range columns { + key := fmt.Sprintf("%q.%q", column.Namespace, column.TableName) + + columnMap[key] = append(columnMap[key], ColumnDescription{ + Name: column.Name, + Type: column.Type, + IsNullable: column.IsNullable, + Default: deref(column.Default), + CharacterMaximumLength: deref(column.CharacterMaximumLength), + IsIdentity: column.IsIdentity, + IdentityGeneration: deref(column.IdentityGeneration), + IsGenerated: column.IsGenerated, + GenerationExpression: deref(column.GenerationExpression), + }) + } + + return columnMap, nil +} diff --git a/describe_constraints.go b/describe_constraints.go new file mode 100644 index 0000000..3cb5eff --- /dev/null +++ b/describe_constraints.go @@ -0,0 +1,89 @@ +package pgutil + +import ( + "context" + "fmt" +) + +type ConstraintDescription struct { + Name string + Type string + IsDeferrable bool + ReferencedTableName string + Definition string +} + +func (d ConstraintDescription) Equals(other ConstraintDescription) bool { + return true && + d.Name == other.Name && + d.Type == other.Type && + d.IsDeferrable == other.IsDeferrable && + d.ReferencedTableName == other.ReferencedTableName && + d.Definition == other.Definition +} + +type constraint struct { + Namespace string + TableName string + Name string + Type string + IsDeferrable *bool + ReferencedTableName *string + Definition string +} + +var scanConstraints = NewSliceScanner(func(s Scanner) (c constraint, _ error) { + err := s.Scan( + &c.Namespace, + &c.TableName, + &c.Name, + &c.Type, + &c.IsDeferrable, + &c.ReferencedTableName, + &c.Definition, + ) + return c, err +}) + +func describeConstraints(ctx context.Context, db DB) (map[string][]ConstraintDescription, error) { + constraints, err := scanConstraints(db.Query(ctx, RawQuery(` + SELECT + n.nspname AS namespace, + table_class.relname AS table_name, + con.conname AS name, + con.contype AS type, + con.condeferrable AS is_deferrable, + reftable_class.relname AS ref_table_name, + pg_catalog.pg_get_constraintdef(con.oid, true) AS definition + FROM pg_catalog.pg_constraint con + JOIN pg_catalog.pg_class table_class ON table_class.oid = con.conrelid + JOIN pg_catalog.pg_namespace n ON n.oid = table_class.relnamespace + LEFT OUTER JOIN pg_catalog.pg_class reftable_class ON reftable_class.oid = con.confrelid + WHERE + n.nspname NOT LIKE 'pg_%' AND + n.nspname != 'information_schema' AND + con.contype IN ('c', 'f', 't') + ORDER BY + n.nspname, + table_class.relname, + con.conname + `))) + if err != nil { + return nil, err + } + + constraintMap := map[string][]ConstraintDescription{} + for _, constraint := range constraints { + key := fmt.Sprintf("%q.%q", constraint.Namespace, constraint.TableName) + + constraintMap[key] = append(constraintMap[key], ConstraintDescription{ + Name: constraint.Name, + Type: constraint.Type, + IsDeferrable: deref(constraint.IsDeferrable), + ReferencedTableName: deref(constraint.ReferencedTableName), + Definition: constraint.Definition, + }) + } + + return constraintMap, nil +} diff --git a/describe_dependencies.go b/describe_dependencies.go new file mode 100644 index 0000000..596f8a8 --- /dev/null +++ b/describe_dependencies.go @@ -0,0 +1,91 @@ +package pgutil + +import "context" + +type EnumDependency struct { + EnumNamespace string + EnumName string + TableNamespace string + TableName string + ColumnName string +} + +var scanEnumDependencies = NewSliceScanner(func(s Scanner) (d EnumDependency, _ error) { + err := s.Scan( + &d.EnumNamespace, + &d.EnumName, + &d.TableNamespace, + &d.TableName, + &d.ColumnName, + ) + return d, err +}) + +func DescribeEnumDependencies(ctx context.Context, db DB) ([]EnumDependency, error) { + return scanEnumDependencies(db.Query(ctx, RawQuery(` + SELECT + ns.nspname AS enum_namespace, + col.udt_name AS enum_name, + col.table_schema AS table_namespace, + col.table_name AS table_name, + col.column_name AS column_name + FROM information_schema.columns col + JOIN information_schema.tables tab + ON + tab.table_schema = col.table_schema AND + tab.table_name = col.table_name AND + tab.table_type = 'BASE TABLE' + JOIN pg_type typ ON col.udt_name = typ.typname + JOIN pg_namespace ns ON ns.oid = typ.typnamespace + WHERE + col.table_schema NOT LIKE 'pg_%' AND + col.table_schema != 'information_schema' AND + typ.typtype = 'e' + ORDER BY col.table_schema, col.table_name, col.ordinal_position + `))) +} + +type ColumnDependency struct { + SourceNamespace string + SourceTableOrViewName string + SourceColumnName string + UsedNamespace string + UsedTableOrView string +} + +var scanColumnDependencies = NewSliceScanner(func(s Scanner) (d ColumnDependency, _ error) { + err := s.Scan( + &d.SourceNamespace, + &d.SourceTableOrViewName, + &d.SourceColumnName, + &d.UsedNamespace, + &d.UsedTableOrView, + ) + return d, err +}) + +func DescribeColumnDependencies(ctx context.Context, db DB) ([]ColumnDependency, error) { + return scanColumnDependencies(db.Query(ctx, RawQuery(` + SELECT + source_ns.nspname AS source_namespace, + source_table.relname AS source_table_or_view_name, + pg_attribute.attname AS source_column_name, + dependent_ns.nspname AS used_namespace, + dependent_view.relname AS used_table_or_view_name + FROM pg_depend + JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid + JOIN pg_class AS dependent_view ON pg_rewrite.ev_class = dependent_view.oid + JOIN pg_class AS source_table ON pg_depend.refobjid = source_table.oid + JOIN pg_attribute ON + pg_depend.refobjid = pg_attribute.attrelid AND + pg_depend.refobjsubid = pg_attribute.attnum + JOIN pg_namespace dependent_ns ON dependent_ns.oid = dependent_view.relnamespace + JOIN pg_namespace source_ns ON source_ns.oid = source_table.relnamespace + WHERE + dependent_ns.nspname NOT LIKE 'pg_%' AND + dependent_ns.nspname != 'information_schema' AND + source_ns.nspname NOT LIKE 'pg_%' AND + source_ns.nspname != 'information_schemea' + ORDER BY dependent_ns.nspname, dependent_view.relname + `))) +} diff --git a/describe_enums.go b/describe_enums.go new file mode 100644 index 0000000..91a9ee3 --- /dev/null +++ b/describe_enums.go @@ -0,0 +1,40 @@ +package pgutil + +import ( + "context" + + "github.com/lib/pq" + "golang.org/x/exp/slices" +) + +type EnumDescription struct { + Namespace string + Name string + Labels []string +} + +func (d EnumDescription) Equals(other EnumDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name && + slices.Equal(d.Labels, other.Labels) +} + +var scanEnums = NewSliceScanner(func(s Scanner) (l EnumDescription, _ error) { + err := s.Scan(&l.Namespace, &l.Name, pq.Array(&l.Labels)) + return l, err +}) + +func DescribeEnums(ctx context.Context, db DB) ([]EnumDescription, error) { + return scanEnums(db.Query(ctx, RawQuery(` + SELECT + n.nspname AS namespace, + t.typname AS name, + array_agg(e.enumlabel ORDER BY e.enumsortorder) AS labels + FROM pg_catalog.pg_enum e + JOIN pg_catalog.pg_type t ON t.oid = e.enumtypid + JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + GROUP BY n.nspname, t.typname + ORDER BY n.nspname, t.typname + `))) +} diff --git a/describe_extensions.go b/describe_extensions.go new file mode 100644 index 0000000..6e3bfba --- /dev/null +++ b/describe_extensions.go @@ -0,0 +1,35 @@ +package pgutil + +import ( + "context" +) + +type ExtensionDescription struct { + Namespace string + Name string +} + +func (d ExtensionDescription) Equals(other ExtensionDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name +} + +var scanExtensions = NewSliceScanner(func(s Scanner) (e ExtensionDescription, _ error) { + err := s.Scan(&e.Namespace, &e.Name) + return e, err +}) + +func DescribeExtensions(ctx context.Context, db DB) ([]ExtensionDescription, error) { + return scanExtensions(db.Query(ctx, RawQuery(` + SELECT + n.nspname AS namespace, + e.extname AS name + FROM pg_catalog.pg_extension e + JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace + WHERE + n.nspname NOT LIKE 'pg_%' AND + n.nspname != 'information_schema' + ORDER BY n.nspname, e.extname + `))) +} diff --git a/describe_functions.go b/describe_functions.go new file mode 100644 index 0000000..761885a --- /dev/null +++ b/describe_functions.go @@ -0,0 +1,55 @@ +package pgutil + +import ( + "context" + "slices" + + "github.com/lib/pq" +) + +type FunctionDescription struct { + Namespace string + Name string + Definition string + ArgTypes []string +} + +func (d FunctionDescription) Equals(other FunctionDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name && + slices.Equal(d.ArgTypes, other.ArgTypes) && + d.Definition == other.Definition +} + +var scanFunctions = NewSliceScanner(func(s Scanner) (f FunctionDescription, _ error) { + err := s.Scan(&f.Namespace, &f.Name, &f.Definition, pq.Array(&f.ArgTypes)) + return f, err +}) + +func DescribeFunctions(ctx context.Context, db DB) ([]FunctionDescription, error) { + return scanFunctions(db.Query(ctx, RawQuery(` + SELECT + n.nspname AS namespace, + p.proname AS name, + pg_get_functiondef(p.oid) AS definition, + COALESCE( + ARRAY( + SELECT typ.typname + FROM unnest(p.proargtypes) AS t(type_oid) + JOIN pg_type typ ON typ.oid = t.type_oid + ), + '{}'::text[]) AS argtypes + FROM pg_catalog.pg_proc p + JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace + JOIN pg_language l ON l.oid = p.prolang AND l.lanname IN ('sql', 'plpgsql') + WHERE + n.nspname NOT LIKE 'pg_%' AND + n.nspname != 'information_schema' AND + -- function is defined outside of any active extension + NOT EXISTS (SELECT 1 FROM pg_depend d WHERE d.objid = p.oid AND d.deptype = 'e') + ORDER BY + n.nspname, + p.proname + `))) +} diff --git a/describe_indexes.go b/describe_indexes.go new file mode 100644 index 0000000..a5dd4f5 --- /dev/null +++ b/describe_indexes.go @@ -0,0 +1,116 @@ +package pgutil + +import ( + "context" + "fmt" +) + +type IndexDescription struct { + Name string + IsPrimaryKey bool + IsUnique bool + IsExclusion bool + IsDeferrable bool + IndexDefinition string + ConstraintType string + ConstraintDefinition string +} + +func (d IndexDescription) Equals(other IndexDescription) bool { + return true && + d.Name == other.Name && + d.IsPrimaryKey == other.IsPrimaryKey && + d.IsUnique == other.IsUnique && + d.IsExclusion == other.IsExclusion && + d.IsDeferrable == other.IsDeferrable && + d.IndexDefinition == other.IndexDefinition && + d.ConstraintType == other.ConstraintType && + d.ConstraintDefinition == other.ConstraintDefinition + +} + +type index struct { + Namespace string + TableName string + Name string + IsPrimaryKey bool + IsUnique bool + IsExclusion *bool + IsDeferrable *bool + IndexDefinition string + ConstraintType *string + ConstraintDefinition *string +} + +var scanIndexes = NewSliceScanner(func(s Scanner) (i index, _ error) { + var ( + isPrimaryKey string + isUnique string + ) + + err := s.Scan( + &i.Namespace, + &i.TableName, + &i.Name, + &isPrimaryKey, + &isUnique, + &i.IsExclusion, + &i.IsDeferrable, + &i.IndexDefinition, + &i.ConstraintType, + &i.ConstraintDefinition, + ) + + i.IsPrimaryKey = truthy(isPrimaryKey) + i.IsUnique = truthy(isUnique) + return i, err +}) + +func describeIndexes(ctx context.Context, db DB) (map[string][]IndexDescription, error) { + indexes, err := scanIndexes(db.Query(ctx, RawQuery(` + SELECT + n.nspname AS namespace, + table_class.relname AS table_name, + index_class.relname AS name, + i.indisprimary AS is_primary_key, + i.indisunique AS is_unique, + i.indisexclusion AS is_exclusion, + con.condeferrable AS is_deferrable, + pg_catalog.pg_get_indexdef(i.indexrelid, 0, true) AS index_definition, + con.contype AS constraint_type, + pg_catalog.pg_get_constraintdef(con.oid, true) AS constraint_definition + FROM pg_catalog.pg_index i + JOIN pg_catalog.pg_class table_class ON table_class.oid = i.indrelid + JOIN pg_catalog.pg_class index_class ON index_class.oid = i.indexrelid + JOIN pg_catalog.pg_namespace n ON n.oid = table_class.relnamespace + LEFT OUTER JOIN pg_catalog.pg_constraint con ON + con.conrelid = i.indrelid AND + con.conindid = i.indexrelid AND + con.contype IN ('p', 'u', 'x') + WHERE + n.nspname NOT LIKE 'pg_%' AND + n.nspname != 'information_schema' + ORDER BY n.nspname, table_class.relname, index_class.relname + `))) + if err != nil { + return nil, err + } + + indexMap := map[string][]IndexDescription{} + for _, index := range indexes { + key := fmt.Sprintf("%q.%q", index.Namespace, index.TableName) + + indexMap[key] = append(indexMap[key], IndexDescription{ + Name: index.Name, + IsPrimaryKey: index.IsPrimaryKey, + IsUnique: index.IsUnique, + IsExclusion: deref(index.IsExclusion), + IsDeferrable: deref(index.IsDeferrable), + IndexDefinition: index.IndexDefinition, + ConstraintType: deref(index.ConstraintType), + ConstraintDefinition: deref(index.ConstraintDefinition), + }) + } + + return indexMap, nil +} diff --git a/describe_sequences.go b/describe_sequences.go new file mode 100644 index 0000000..80ad047 --- /dev/null +++ b/describe_sequences.go @@ -0,0 +1,61 @@ +package pgutil + +import ( + "context" +) + +type SequenceDescription struct { + Namespace string + Name string + Type string + StartValue int + MinimumValue int + MaximumValue int + Increment int + CycleOption string +} + +func (d SequenceDescription) Equals(other SequenceDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name && + d.Type == other.Type && + d.StartValue == other.StartValue && + d.MinimumValue == other.MinimumValue && + d.MaximumValue == other.MaximumValue && + d.Increment == other.Increment && + d.CycleOption == other.CycleOption +} + +var scanSequences = NewSliceScanner(func(s Scanner) (l SequenceDescription, _ error) { + err := s.Scan( + &l.Namespace, + &l.Name, + &l.Type, + &l.StartValue, + &l.MinimumValue, + &l.MaximumValue, + &l.Increment, + &l.CycleOption, + ) + return l, err +}) + +func DescribeSequences(ctx context.Context, db DB) ([]SequenceDescription, error) { + return scanSequences(db.Query(ctx, RawQuery(` + SELECT + s.sequence_schema AS namespace, + s.sequence_name AS name, + s.data_type AS type, + s.start_value AS start_value, + s.minimum_value AS minimum_value, + s.maximum_value AS maximum_value, + s.increment AS increment, + s.cycle_option AS cycle_option + FROM information_schema.sequences s + WHERE + s.sequence_schema NOT LIKE 'pg_%' AND + s.sequence_schema != 'information_schema' + ORDER BY s.sequence_schema, s.sequence_name + `))) +} diff --git a/describe_tables.go b/describe_tables.go new file mode 100644 index 0000000..ae4a681 --- /dev/null +++ b/describe_tables.go @@ -0,0 +1,95 @@ +package pgutil + +import ( + "context" + "fmt" + "strings" +) + +type TableDescription struct { + Namespace string + Name string + Columns []ColumnDescription + Constraints []ConstraintDescription + Indexes []IndexDescription +} + +// Note: not a deep comparison +func (d TableDescription) Equals(other TableDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name +} + +type table struct { + Namespace string + Name string +} + +var scanTables = NewSliceScanner(func(s Scanner) (t table, _ error) { + err := s.Scan(&t.Namespace, &t.Name) + return t, err +}) + +func DescribeTables(ctx context.Context, db DB) ([]TableDescription, error) { + tables, err := scanTables(db.Query(ctx, RawQuery(` + SELECT + t.table_schema AS namespace, + t.table_name AS name + FROM information_schema.tables t + WHERE + t.table_type = 'BASE TABLE' AND + t.table_schema NOT LIKE 'pg_%' AND + t.table_schema != 'information_schema' + ORDER BY t.table_schema, t.table_name + `))) + if err != nil { + return nil, err + } + + columnMap, err := describeColumns(ctx, db) + if err != nil { + return nil, err + } + + constraintMap, err := describeConstraints(ctx, db) + if err != nil { + return nil, err + } + + indexMap, err := describeIndexes(ctx, db) + if err != nil { + return nil, err + } + + var hydratedTables []TableDescription + for _, table := range tables { + key := fmt.Sprintf("%q.%q", table.Namespace, table.Name) + + hydratedTables = append(hydratedTables, TableDescription{ + Namespace: table.Namespace, + Name: table.Name, + Columns: columnMap[key], + Constraints: constraintMap[key], + Indexes: indexMap[key], + }) + } + + return hydratedTables, nil +} + +// +// + +func truthy(value string) bool { + // truthy strings + SQL spec YES_NO + return strings.ToLower(value) == "yes" || strings.ToLower(value) == "true" +} + +func deref[T any](p *T) (v T) { + if p != nil { + v = *p + } + + return +} diff --git a/describe_test.go b/describe_test.go new file mode 100644 index 0000000..44419e9 --- /dev/null +++ b/describe_test.go @@ -0,0 +1,29 @@ +package pgutil + +import ( + "context" + "os" + "path" + "testing" + + "github.com/hexops/autogold/v2" + "github.com/stretchr/testify/require" +) + +func TestDescribeSchema(t *testing.T) { + var ( + goldenDir = path.Join("testdata", "golden") + schemaFile = path.Join("testdata", "schemas", "describe.sql") + ) + + schemaBytes, err := os.ReadFile(schemaFile) + require.NoError(t, err) + + db := NewTestDB(t) + err = db.Exec(context.Background(), RawQuery(string(schemaBytes))) + require.NoError(t, err) + + schema, err := DescribeSchema(context.Background(), db) + require.NoError(t, err) + autogold.ExpectFile(t, schema, autogold.Dir(goldenDir)) +} diff --git a/describe_triggers.go b/describe_triggers.go new file mode 100644 index 0000000..41ab25f --- /dev/null +++ b/describe_triggers.go @@ -0,0 +1,43 @@ +package pgutil + +import "context" + +type TriggerDescription struct { + Namespace string + Name string + TableName string + FunctionNamespace string + Definition string +} + +func (d TriggerDescription) Equals(other TriggerDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name && + d.TableName == other.TableName && + d.FunctionNamespace == other.FunctionNamespace && + d.Definition == other.Definition +} + +var scanTriggers = NewSliceScanner(func(s Scanner) (t TriggerDescription, _ error) { + err := s.Scan(&t.Namespace, &t.Name, &t.TableName, &t.FunctionNamespace, &t.Definition) + return t, err +}) + +func DescribeTriggers(ctx context.Context, db DB) ([]TriggerDescription, error) { + return scanTriggers(db.Query(ctx, RawQuery(` + SELECT + n.nspname AS namespace, + t.tgname AS name, + c.relname AS table_name, + tn.nspname AS function_namespace, + pg_catalog.pg_get_triggerdef(t.oid, true) AS definition + FROM pg_catalog.pg_trigger t + JOIN pg_catalog.pg_class c ON c.oid = t.tgrelid + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + JOIN pg_catalog.pg_proc p ON p.oid = t.tgfoid + JOIN pg_catalog.pg_namespace tn ON tn.oid = p.pronamespace + WHERE NOT t.tgisinternal + ORDER BY n.nspname, t.tgname, c.relname, tn.nspname + `))) +} diff --git a/describe_views.go b/describe_views.go new file mode 100644 index 0000000..0b27f03 --- /dev/null +++ b/describe_views.go @@ -0,0 +1,37 @@ +package pgutil + +import ( + "context" +) + +type ViewDescription struct { + Namespace string + Name string + Definition string +} + +func (d ViewDescription) Equals(other ViewDescription) bool { + return true && + d.Namespace == other.Namespace && + d.Name == other.Name && + d.Definition == other.Definition +} + +var scanViews = NewSliceScanner(func(s Scanner) (v ViewDescription, _ error) { + err := s.Scan(&v.Namespace, &v.Name, &v.Definition) + return v, err +}) + +func DescribeViews(ctx context.Context, db DB) ([]ViewDescription, error) { + return scanViews(db.Query(ctx, RawQuery(` + SELECT + v.schemaname AS namespace, + v.viewname AS name, + v.definition AS definition + FROM pg_catalog.pg_views v + WHERE + v.schemaname NOT LIKE 'pg_%' AND + v.schemaname != 'information_schema' + ORDER BY v.schemaname, v.viewname + `))) +} diff --git a/dial.go b/dial.go new file mode 100644 index 0000000..85baf71 --- /dev/null +++ b/dial.go @@ -0,0 +1,34 @@ +package pgutil + +import ( + "database/sql" + "fmt" + "time" + + "github.com/go-nacelle/nacelle/v2" +) + +const MaxPingAttempts = 15 + +func Dial(url string, logger nacelle.Logger) (DB, error) { + db, err := sql.Open("postgres", url) + if err != nil { + return nil, fmt.Errorf("failed to connect to database (%s)", err) + } + + for attempts := 0; ; attempts++ { + err := db.Ping() + if err == nil { + break + } + + if attempts >= MaxPingAttempts { + return nil, fmt.Errorf("failed to ping database within timeout") + } + + logger.Error("Failed to ping database, will retry in 2s (%s)", err.Error()) + <-time.After(time.Second * 2) + } + + return newLoggingDB(db, logger), nil +} diff --git a/drift.go b/drift.go new file mode 100644 index 0000000..9c18a4f --- /dev/null +++ b/drift.go @@ -0,0 +1,388 @@ +package pgutil + +import ( + "cmp" + "fmt" + "slices" + "sort" + "strings" +) + +func Compare(a, b SchemaDescription) (statements []string) { + var ( + aTableComponentModifiers = NewTableComponentModifiers(a, a.Tables) + bTableComponentModifiers = NewTableComponentModifiers(b, b.Tables) + ) + + uniqueStatements := map[string]ddlStatement{} + for _, statements := range [][]ddlStatement{ + compareObjects(a, b, wrapWithContextValue(a, a.Extensions, NewExtensionModifier), wrapWithContextValue(b, b.Extensions, NewExtensionModifier)), + compareObjects(a, b, wrapWithContextValue(a, a.Enums, NewEnumModifier), wrapWithContextValue(b, b.Enums, NewEnumModifier)), + compareObjects(a, b, wrapWithContextValue(a, a.Functions, NewFunctionModifier), wrapWithContextValue(b, b.Functions, NewFunctionModifier)), + compareObjects(a, b, wrapWithContextValue(a, a.Tables, NewTableModifier), wrapWithContextValue(b, b.Tables, NewTableModifier)), + compareObjects(a, b, wrapWithContextValue(a, a.Sequences, NewSequenceModifier), wrapWithContextValue(b, b.Sequences, NewSequenceModifier)), + compareObjects(a, b, aTableComponentModifiers.Columns, bTableComponentModifiers.Columns), + compareObjects(a, b, aTableComponentModifiers.Constraints, bTableComponentModifiers.Constraints), + compareObjects(a, b, aTableComponentModifiers.Indexes, bTableComponentModifiers.Indexes), + compareObjects(a, b, wrapWithContextValue(a, a.Views, NewViewModifier), wrapWithContextValue(b, b.Views, NewViewModifier)), + compareObjects(a, b, wrapWithContextValue(a, a.Triggers, NewTriggerModifier), wrapWithContextValue(b, b.Triggers, NewTriggerModifier)), + } { + for _, statement := range statements { + key := strings.Join([]string{ + statement.statementType, + statement.objectType, + statement.key, + }, "::") + + uniqueStatements[key] = statement + } + } + + var unorderedStatements []ddlStatement + for _, statement := range uniqueStatements { + unorderedStatements = append(unorderedStatements, statement) + } + + filter := func(statementType, objectType string) []ddlStatement { + var filtered []ddlStatement + for _, statements := range unorderedStatements { + if statements.statementType == statementType && statements.objectType == objectType { + filtered = append(filtered, statements) + } + } + + return filtered + } + + // Dependency mapping: + // + // extensions : no dependencies + // enums : no dependencies + // functions : no dependencies + // tables : no dependencies + // sequences : no dependencies + // columns : depends on tables, enums, sequences + // constraints : depends on tables, columns + // indexes : depends on tables, columns + // views : depends on tables, columns, views + // triggers : depends on tables, columns, functions + + sortByKey := func(statements []ddlStatement) { + slices.SortFunc(statements, func(a, b ddlStatement) int { + return cmp.Compare(a.key, b.key) + }) + } + + sortByClosure := func(cls closure) func([]ddlStatement) { + return func(statements []ddlStatement) { + statementsByKey := map[string]ddlStatement{} + for _, stmt := range statements { + statementsByKey[stmt.key] = stmt + } + + // Build a graph where nodes are statement keys and edges are + // transitive references between them. Edges are directed from + // the reference to the referencee. + graph := map[string]map[string]struct{}{} + for _, stmt := range statements { + // Ensure the graph contains all keys. + graph[stmt.key] = map[string]struct{}{} + } + for _, stmt := range statements { + for reference := range cls[stmt.key] { + if _, ok := graph[reference]; ok { + graph[reference][stmt.key] = struct{}{} + } + } + } + + // Build a topological ordering of the statements where ties + // are broken by keys in lexicographic order. + topologicalOrder := make([]ddlStatement, 0, len(statements)) + for len(graph) > 0 { + // Gather all keys with no remaining dependencies. + // + // The textbook implementation would use a min queue to quickly select the + // key with no adjacent edges, but the size of the data here should be small + // enough that that scanning the (shrinking) graph on each iteration is fine. + var candidates []string + for key, edges := range graph { + if len(edges) == 0 { + candidates = append(candidates, key) + } + } + if len(candidates) == 0 { + panic("cycle detected in closure, cannot perform topological sort") + } + + // Select the next key and add it to the topological order. + sort.Strings(candidates) + top := candidates[0] + topologicalOrder = append(topologicalOrder, statementsByKey[top]) + + // Remove the key from the node and edge sets. + delete(graph, top) + for _, edges := range graph { + delete(edges, top) + } + } + + // Update the statements in-place to reflect the new order. + for i := range statements { + statements[i] = topologicalOrder[i] + } + } + } + + createDependencyClosure, dropDependencyClosure := viewDependencyClosures(a, b) + sortCreateViews := sortByClosure(createDependencyClosure) + sortDropViews := sortByClosure(dropDependencyClosure) + + order := []struct { + statementType string + objectType string + order func(statements []ddlStatement) + }{ + {"drop", "trigger", sortByKey}, + {"drop", "view", sortDropViews}, + {"drop", "constraint", sortDropViews}, + {"drop", "index", sortByKey}, + {"drop", "column", sortByKey}, + {"drop", "sequence", sortByKey}, + {"drop", "table", sortByKey}, + {"drop", "function", sortByKey}, + {"drop", "enum", sortByKey}, + {"drop", "extension", sortByKey}, + {"create", "extension", sortByKey}, + {"create", "enum", sortByKey}, + {"replace", "enum", sortByKey}, + {"create", "function", sortByKey}, + {"replace", "function", sortByKey}, + {"create", "table", sortByKey}, + {"create", "sequence", sortByKey}, + {"replace", "sequence", sortByKey}, + {"create", "column", sortByKey}, + {"replace", "column", sortByKey}, + {"create", "index", sortByKey}, + {"create", "constraint", sortByKey}, + {"create", "view", sortCreateViews}, + {"create", "trigger", sortByKey}, + } + + for _, o := range order { + filtered := filter(o.statementType, o.objectType) + o.order(filtered) + + for _, statement := range filtered { + statements = append(statements, statement.statements...) + } + } + + return statements +} + +// +// +// + +func viewDependencyClosures(a, b SchemaDescription) (createDependencyClosure, dropDependencyClosure closure) { + createDependencyClosure = closure{} + for _, dependency := range a.ColumnDependencies { + sourceKey := fmt.Sprintf("%q.%q", dependency.SourceNamespace, dependency.SourceTableOrViewName) + dependencyKey := fmt.Sprintf("%q.%q", dependency.UsedNamespace, dependency.UsedTableOrView) + + if _, ok := createDependencyClosure[sourceKey]; !ok { + createDependencyClosure[sourceKey] = map[string]struct{}{} + } + + createDependencyClosure[sourceKey][dependencyKey] = struct{}{} + } + + dropDependencyClosure = closure{} + for _, dependency := range b.ColumnDependencies { + sourceKey := fmt.Sprintf("%q.%q", dependency.SourceNamespace, dependency.SourceTableOrViewName) + dependencyKey := fmt.Sprintf("%q.%q", dependency.UsedNamespace, dependency.UsedTableOrView) + + if _, ok := dropDependencyClosure[dependencyKey]; !ok { + dropDependencyClosure[dependencyKey] = map[string]struct{}{} + } + + dropDependencyClosure[dependencyKey][sourceKey] = struct{}{} + } + + transitiveClosure(createDependencyClosure) + transitiveClosure(dropDependencyClosure) + + return createDependencyClosure, dropDependencyClosure +} + +// +// + +// closure is a reference relationship mapping a key to a set of references. +type closure map[string]map[string]struct{} + +// transitiveClosure expands the given closure in-place to directly encode +// all transitive references. +func transitiveClosure(cls closure) { + changed := true + for changed { + changed = false + + for _, references := range cls { + for oldReference := range references { + for newReference := range cls[oldReference] { + if _, ok := references[newReference]; !ok { + references[newReference] = struct{}{} + changed = true + } + } + } + } + } +} + +// +// +// + +type keyer interface { + Key() string +} + +type equaler[T any] interface { + Equals(T) bool +} + +type modifier[T equaler[T]] interface { + keyer + ObjectType() string + Description() T + Create() string + Drop() string +} + +type alterer[T any] interface { + AlterExisting(existingSchema SchemaDescription, existingObject T) ([]ddlStatement, bool) +} + +type ddlStatement struct { + key string + statementType string + objectType string + statements []string +} + +func newStatement(key string, statementType, objectType string, statements ...string) ddlStatement { + return ddlStatement{ + key: key, + statementType: statementType, + objectType: objectType, + statements: statements, + } +} + +func compareObjects[T equaler[T], M modifier[T]](a, b SchemaDescription, as, bs []M) (statements []ddlStatement) { + missing, additional, common := partition(as, bs) + + for _, modifier := range missing { + statements = append(statements, newStatement( + modifier.Key(), + "create", + modifier.ObjectType(), + modifier.Create(), + )) + } + + for _, modifier := range additional { + statements = append(statements, newStatement( + modifier.Key(), + "drop", + modifier.ObjectType(), + modifier.Drop(), + )) + } + + for _, pair := range common { + var ( + aModifier = pair.a + bModifier = pair.b + aDescription = aModifier.Description() + bDescription = bModifier.Description() + ) + + if aDescription.Equals(bDescription) { + continue + } + + if alterer, ok := any(aModifier).(alterer[T]); ok { + if alterStatements, ok := alterer.AlterExisting(b, bDescription); ok { + statements = append(statements, alterStatements...) + continue + } + } + + statements = append(statements, newStatement(bModifier.Key(), "drop", bModifier.ObjectType(), bModifier.Drop())) + statements = append(statements, newStatement(aModifier.Key(), "create", aModifier.ObjectType(), aModifier.Create())) + } + + return statements +} + +// +// +// + +type pair[T any] struct { + a, b T +} + +// missing = present in a but not b +// additional = present in b but not a +func partition[T keyer](a, b []T) (missing, additional []T, common []pair[T]) { + aMap := map[string]T{} + for _, value := range a { + aMap[value.Key()] = value + } + + bMap := map[string]T{} + for _, value := range b { + bMap[value.Key()] = value + } + + for key, aValue := range aMap { + if bValue, ok := bMap[key]; ok { + common = append(common, pair[T]{aValue, bValue}) + } else { + missing = append(missing, aValue) + } + } + + for key, bValue := range bMap { + if _, ok := aMap[key]; !ok { + additional = append(additional, bValue) + } + } + + return missing, additional, common +} + +// +// +// + +func wrap[T, R any](s []T, f func(T) R) (wrapped []R) { + for _, value := range s { + wrapped = append(wrapped, f(value)) + } + + return wrapped +} + +func wrapWithContextValue[C, T, R any](c C, s []T, f func(C, T) R) []R { + return wrap(s, func(v T) R { return f(c, v) }) +} + +func wrapWithContextValues[C1, C2, T, R any](c1 C1, c2 C2, s []T, f func(C1, C2, T) R) []R { + return wrap(s, func(v T) R { return f(c1, c2, v) }) +} diff --git a/drift_columns.go b/drift_columns.go new file mode 100644 index 0000000..b6799ab --- /dev/null +++ b/drift_columns.go @@ -0,0 +1,87 @@ +package pgutil + +import ( + "fmt" +) + +type ColumnModifier struct { + t TableDescription + d ColumnDescription +} + +func NewColumnModifier(_ SchemaDescription, t TableDescription, d ColumnDescription) ColumnModifier { + return ColumnModifier{ + t: t, + d: d, + } +} + +func (m ColumnModifier) Key() string { + return fmt.Sprintf("%q.%q.%q", m.t.Namespace, m.t.Name, m.d.Name) +} + +func (m ColumnModifier) ObjectType() string { + return "column" +} + +func (m ColumnModifier) Description() ColumnDescription { + return m.d +} + +func (m ColumnModifier) Create() string { + nullableExpr := "" + if !m.d.IsNullable { + nullableExpr = " NOT NULL" + } + + defaultExpr := "" + if m.d.Default != "" { + defaultExpr = fmt.Sprintf(" DEFAULT %s", m.d.Default) + } + + return fmt.Sprintf("ALTER TABLE %q.%q ADD COLUMN IF NOT EXISTS %q %s%s%s;", m.t.Namespace, m.t.Name, m.d.Name, m.d.Type, nullableExpr, defaultExpr) +} + +func (m ColumnModifier) Drop() string { + return fmt.Sprintf("ALTER TABLE %q.%q DROP COLUMN IF EXISTS %q;", m.t.Namespace, m.t.Name, m.d.Name) +} + +func (m ColumnModifier) AlterExisting(existingSchema SchemaDescription, existingObject ColumnDescription) ([]ddlStatement, bool) { + statements := []string{} + alterColumn := func(format string, args ...any) { + statements = append(statements, fmt.Sprintf(fmt.Sprintf("ALTER TABLE %q.%q ALTER COLUMN %q %s;", m.t.Namespace, m.t.Name, m.d.Name, format), args...)) + } + + if m.d.Type != existingObject.Type { + alterColumn("SET DATA TYPE %s", m.d.Type) + } + if m.d.Default != existingObject.Default { + if m.d.Default == "" { + alterColumn("DROP DEFAULT") + } else { + alterColumn("SET DEFAULT %s", m.d.Default) + } + } + if m.d.IsNullable != existingObject.IsNullable { + if m.d.IsNullable { + alterColumn("DROP NOT NULL") + } else { + alterColumn("SET NOT NULL") + } + } + + // TODO - handle CharacterMaximumLength + // TODO - handle IsIdentity + // TODO - handle IdentityGeneration + // TODO - handle IsGenerated + // TODO - handle GenerationExpression + + return []ddlStatement{ + newStatement( + m.Key(), + "replace", + m.ObjectType(), + statements..., + ), + }, true +} diff --git a/drift_constraints.go b/drift_constraints.go new file mode 100644 index 0000000..93220b7 --- /dev/null +++ b/drift_constraints.go @@ -0,0 +1,35 @@ +package pgutil + +import "fmt" + +type ConstraintModifier struct { + t TableDescription + d ConstraintDescription +} + +func NewConstraintModifier(_ SchemaDescription, t TableDescription, d ConstraintDescription) ConstraintModifier { + return ConstraintModifier{ + t: t, + d: d, + } +} + +func (m ConstraintModifier) Key() string { + return fmt.Sprintf("%q.%q.%q", m.t.Namespace, m.t.Name, m.d.Name) +} + +func (m ConstraintModifier) ObjectType() string { + return "constraint" +} + +func (m ConstraintModifier) Description() ConstraintDescription { + return m.d +} + +func (m ConstraintModifier) Create() string { + return fmt.Sprintf("ALTER TABLE %q.%q ADD CONSTRAINT %q %s;", m.t.Namespace, m.t.Name, m.d.Name, m.d.Definition) +} + +func (m ConstraintModifier) Drop() string { + return fmt.Sprintf("ALTER TABLE %q.%q DROP CONSTRAINT IF EXISTS %q;", m.t.Namespace, m.t.Name, m.d.Name) +} diff --git a/drift_enums.go b/drift_enums.go new file mode 100644 index 0000000..c392b35 --- /dev/null +++ b/drift_enums.go @@ -0,0 +1,263 @@ +package pgutil + +import ( + "fmt" + "strings" +) + +type EnumModifier struct { + s SchemaDescription + d EnumDescription +} + +func NewEnumModifier(s SchemaDescription, d EnumDescription) EnumModifier { + return EnumModifier{ + s: s, + d: d, + } +} + +func (m EnumModifier) Key() string { + return fmt.Sprintf("%q.%q", m.d.Namespace, m.d.Name) +} + +func (m EnumModifier) ObjectType() string { + return "enum" +} + +func (m EnumModifier) Description() EnumDescription { + return m.d +} + +func (m EnumModifier) Create() string { + var quotedLabels []string + for _, label := range m.d.Labels { + quotedLabels = append(quotedLabels, enumQuote(label)) + } + + return fmt.Sprintf("CREATE TYPE %s AS ENUM (%s);", m.Key(), strings.Join(quotedLabels, ", ")) +} + +func (m EnumModifier) Drop() string { + return fmt.Sprintf("DROP TYPE IF EXISTS %s;", m.Key()) +} + +// NOTE: This depends on the order of the schema being modified. We must be certain that the order of +// drop/apply/create ensures that the columns here in the existing schema are still valid within the +// schema being altered. +func (m EnumModifier) AlterExisting(existingSchema SchemaDescription, existingObject EnumDescription) ([]ddlStatement, bool) { + if reconstruction, ok := unifyLabels(m.d.Labels, existingObject.Labels); ok { + return m.alterViaReconstruction(reconstruction) + } + + return m.alterViaDropAndRecreate(existingSchema) +} + +func (m EnumModifier) alterViaReconstruction(reconstruction []missingLabel) ([]ddlStatement, bool) { + var statements []string + for _, missingLabel := range reconstruction { + relativeTo := "" + if missingLabel.Next != nil { + relativeTo = fmt.Sprintf("BEFORE %s", enumQuote(*missingLabel.Next)) + } else { + relativeTo = fmt.Sprintf("AFTER %s", enumQuote(*missingLabel.Prev)) + } + + statements = append(statements, fmt.Sprintf("ALTER TYPE %q.%q ADD VALUE %s %s;", m.d.Namespace, m.d.Name, enumQuote(missingLabel.Label), relativeTo)) + } + + return []ddlStatement{ + newStatement( + m.Key(), + "replace", + m.ObjectType(), + statements..., + ), + }, true +} + +func (m EnumModifier) alterViaDropAndRecreate(existingSchema SchemaDescription) ([]ddlStatement, bool) { + // Basic plan: + // 1. Rename the existing enum type. + // 2. Create the new enum type with the old name. + // 3. Drop all views that depend (transitively) on the enum type. + // 4. Drop defaults on columns that reference the enum type. + // 5. Alter column types to reference the new enum type. + // 6. Re-add any defaults that were dropped. + // 7. Recreate the views that were dropped. + // 8. Drop the old enum type. + // + // NOTE: View statements are ordered by `Compare`. We do, however, need to be cautious + // of the order in which we modify the enums and tables. + + // Select the dependencies that are relevant to the enum type we're modifying. + var dependencies []EnumDependency + for _, dependency := range existingSchema.EnumDependencies { + if dependency.EnumNamespace == m.d.Namespace && dependency.EnumName == m.d.Name { + dependencies = append(dependencies, dependency) + } + } + + // Calculate the transitive dependencies for all views in the current schema. + createDependencyClosure, _ := viewDependencyClosures(existingSchema, SchemaDescription{}) + + // Collect the set of views referencing a table with a column of the enum type. + var views []string + for _, dependency := range dependencies { + for key := range createDependencyClosure[fmt.Sprintf("%q.%q", dependency.TableNamespace, dependency.TableName)] { + views = append(views, key) + } + } + + // Generate ALTER TABLE statements for each table with a column of the enum type. + var alterTableStatements []string + for _, dependency := range dependencies { + defaultValue := getDefaultValue( + existingSchema.Tables, + dependency.EnumNamespace, + dependency.TableName, + dependency.ColumnName, + ) + + var alterTableActions []string + if defaultValue != "" { + alterTableActions = append(alterTableActions, fmt.Sprintf( + "ALTER COLUMN %q DROP DEFAULT", + dependency.ColumnName, + )) + } + + alterTableActions = append(alterTableActions, fmt.Sprintf( + "ALTER COLUMN %q TYPE %s USING (%q::text::%s)", + dependency.ColumnName, + m.Key(), + dependency.ColumnName, + m.Key(), + )) + + if defaultValue != "" { + alterTableActions = append(alterTableActions, fmt.Sprintf( + "ALTER COLUMN %q SET DEFAULT %s", + dependency.ColumnName, + defaultValue, + )) + } + + alterTableStatements = append(alterTableStatements, fmt.Sprintf( + "ALTER TABLE %q.%q %s;", + dependency.TableNamespace, + dependency.TableName, + strings.Join(alterTableActions, ", "), + )) + } + + // Generate DROP/CREATE VIEW statements for each view that references the enum type. + var viewStatements []ddlStatement + for _, viewKey := range views { + viewStatements = append(viewStatements, newStatement( + viewKey, + "drop", + "view", + fmt.Sprintf("DROP VIEW IF EXISTS %s;", viewKey), + )) + + // Look for the view in the new schema (which may have been dropped or modified). If the view + // exists and has the SAME definition, then we need to be sure to issue a recreation statement, + // otherwise we've dropped the view as an unintentional side-effect. If the view exists and has + // a different definition, then we don't need to recreate the view because it will be recreated + // as part of the normal view drift repair. + + var existingDefinition string + for _, view := range existingSchema.Views { + if fmt.Sprintf("%q.%q", view.Namespace, view.Name) == viewKey { + existingDefinition = view.Definition + } + } + + for _, view := range m.s.Views { + if viewKey == fmt.Sprintf("%q.%q", view.Namespace, view.Name) && view.Definition == existingDefinition { + viewStatements = append(viewStatements, newStatement( + viewKey, + "create", + "view", + fmt.Sprintf("CREATE OR REPLACE VIEW %s AS %s", viewKey, strings.TrimSpace(stripIdent(" "+existingDefinition))), + )) + } + } + } + + // Construct enum replacement statements. + var enumStatements []string + enumStatements = append(enumStatements, fmt.Sprintf("ALTER TYPE %q.%q RENAME TO %q;", m.d.Namespace, m.d.Name, m.d.Name+"_bak")) + enumStatements = append(enumStatements, m.Create()) + enumStatements = append(enumStatements, alterTableStatements...) + enumStatements = append(enumStatements, fmt.Sprintf("DROP TYPE %q.%q;", m.d.Namespace, m.d.Name+"_bak")) + + return append(viewStatements, newStatement( + m.Key(), + "replace", + m.ObjectType(), + enumStatements..., + )), true +} + +func enumQuote(label string) string { + return fmt.Sprintf("'%s'", strings.ReplaceAll(label, "'", "''")) +} + +func getDefaultValue(tables []TableDescription, namespace, tableName, columnName string) string { + for _, table := range tables { + if table.Namespace == namespace && table.Name == tableName { + for _, c := range table.Columns { + if c.Name == columnName { + return c.Default + } + } + } + } + + return "" +} + +type missingLabel struct { + Label string + Prev *string + Next *string +} + +func unifyLabels(expectedLabels, existingLabels []string) (reconstruction []missingLabel, _ bool) { + var ( + j = 0 + missingIndexMap = map[int]struct{}{} + ) + + for i, label := range expectedLabels { + if j < len(existingLabels) && existingLabels[j] == label { + j++ + } else if i > 0 { + missingIndexMap[i] = struct{}{} + } + } + + if j < len(existingLabels) { + return nil, false + } + + if expectedLabels[0] != existingLabels[0] { + reconstruction = append(reconstruction, missingLabel{ + Label: expectedLabels[0], + Next: &existingLabels[0], + }) + } + + for i, label := range expectedLabels { + if _, ok := missingIndexMap[i]; ok { + reconstruction = append(reconstruction, missingLabel{ + Label: label, + Prev: &expectedLabels[i-1], + }) + } + } + + return reconstruction, true +} diff --git a/drift_enums_test.go b/drift_enums_test.go new file mode 100644 index 0000000..7619ccc --- /dev/null +++ b/drift_enums_test.go @@ -0,0 +1,76 @@ +package pgutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUnifyLabels(t *testing.T) { + for _, testCase := range []struct { + name string + expectedLabels []string + existingLabels []string + valid bool + reconstruction []missingLabel + }{ + { + name: "mismatched", + expectedLabels: []string{"foo", "bar", "baz"}, + existingLabels: []string{"foo", "baz", "bonk"}, + valid: false, + }, + { + name: "inversions", + expectedLabels: []string{"foo", "bar", "baz"}, + existingLabels: []string{"baz", "bar"}, + valid: false, + }, + + { + name: "missing at end", + expectedLabels: []string{"foo", "bar", "baz", "bonk"}, + existingLabels: []string{"foo", "bar"}, + valid: true, + reconstruction: []missingLabel{ + {Label: "baz", Prev: ptr("bar")}, + {Label: "bonk", Prev: ptr("baz")}, + }, + }, + { + name: "missing in middle", + expectedLabels: []string{"foo", "bar", "baz", "bonk"}, + existingLabels: []string{"foo", "bonk"}, + valid: true, + reconstruction: []missingLabel{ + {Label: "bar", Prev: ptr("foo")}, + {Label: "baz", Prev: ptr("bar")}, + }, + }, + { + name: "missing at beginning", + expectedLabels: []string{"foo", "bar", "baz", "bonk"}, + existingLabels: []string{"baz", "bonk"}, + valid: true, + reconstruction: []missingLabel{ + {Label: "foo", Next: ptr("baz")}, + {Label: "bar", Prev: ptr("foo")}, + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + reconstruction, valid := unifyLabels(testCase.expectedLabels, testCase.existingLabels) + if testCase.valid { + require.True(t, valid) + assert.Equal(t, testCase.reconstruction, reconstruction) + } else { + require.False(t, valid) + } + }) + } +} + +func ptr[S any](v S) *S { + return &v +} diff --git a/drift_extensions.go b/drift_extensions.go new file mode 100644 index 0000000..45b763d --- /dev/null +++ b/drift_extensions.go @@ -0,0 +1,35 @@ +package pgutil + +import ( + "fmt" +) + +type ExtensionModifier struct { + d ExtensionDescription +} + +func NewExtensionModifier(_ SchemaDescription, d ExtensionDescription) ExtensionModifier { + return ExtensionModifier{ + d: d, + } +} + +func (m ExtensionModifier) Key() string { + return fmt.Sprintf("%q.%q", m.d.Namespace, m.d.Name) +} + +func (m ExtensionModifier) ObjectType() string { + return "extension" +} + +func (m ExtensionModifier) Description() ExtensionDescription { + return m.d +} + +func (m ExtensionModifier) Create() string { + return fmt.Sprintf("CREATE EXTENSION IF NOT EXISTS %q WITH SCHEMA %q;", m.d.Name, m.d.Namespace) +} + +func (m ExtensionModifier) Drop() string { + return fmt.Sprintf("DROP EXTENSION IF EXISTS %q;", m.d.Name) +} diff --git a/drift_functions.go b/drift_functions.go new file mode 100644 index 0000000..0c66ed8 --- /dev/null +++ b/drift_functions.go @@ -0,0 +1,47 @@ +package pgutil + +import ( + "fmt" + "strings" +) + +type FunctionModifier struct { + d FunctionDescription +} + +func NewFunctionModifier(_ SchemaDescription, d FunctionDescription) FunctionModifier { + return FunctionModifier{ + d: d, + } +} + +func (m FunctionModifier) Key() string { + return fmt.Sprintf("%q.%q(%s)", m.d.Namespace, m.d.Name, strings.Join(m.d.ArgTypes, ", ")) +} + +func (m FunctionModifier) ObjectType() string { + return "function" +} + +func (m FunctionModifier) Description() FunctionDescription { + return m.d +} + +func (m FunctionModifier) Create() string { + return fmt.Sprintf("%s;", m.d.Definition) +} + +func (m FunctionModifier) Drop() string { + return fmt.Sprintf("DROP FUNCTION IF EXISTS %s;", m.Key()) +} + +func (m FunctionModifier) AlterExisting(_ SchemaDescription, _ FunctionDescription) ([]ddlStatement, bool) { + return []ddlStatement{ + newStatement( + m.Key(), + "create", + m.ObjectType(), + m.Create(), + ), + }, true +} diff --git a/drift_indexes.go b/drift_indexes.go new file mode 100644 index 0000000..bd904f5 --- /dev/null +++ b/drift_indexes.go @@ -0,0 +1,47 @@ +package pgutil + +import "fmt" + +type IndexModifier struct { + t TableDescription + d IndexDescription +} + +func NewIndexModifier(_ SchemaDescription, t TableDescription, d IndexDescription) IndexModifier { + return IndexModifier{ + t: t, + d: d, + } +} + +func (m IndexModifier) Key() string { + return fmt.Sprintf("%q.%q.%q", m.t.Namespace, m.t.Name, m.d.Name) +} + +func (m IndexModifier) ObjectType() string { + return "index" +} + +func (m IndexModifier) Description() IndexDescription { + return m.d +} + +func (m IndexModifier) Create() string { + if m.isConstraint() { + return fmt.Sprintf("ALTER TABLE %q.%q ADD CONSTRAINT %q %s;", m.t.Namespace, m.t.Name, m.d.Name, m.d.ConstraintDefinition) + } + + return fmt.Sprintf("%s;", m.d.IndexDefinition) +} + +func (m IndexModifier) Drop() string { + if m.isConstraint() { + return fmt.Sprintf("ALTER TABLE %q.%q DROP CONSTRAINT IF EXISTS %q;", m.t.Namespace, m.t.Name, m.d.Name) + } + + return fmt.Sprintf("DROP INDEX IF EXISTS %q.%q;", m.t.Namespace, m.d.Name) +} + +func (m IndexModifier) isConstraint() bool { + return m.d.ConstraintType == "u" || m.d.ConstraintType == "p" +} diff --git a/drift_sequences.go b/drift_sequences.go new file mode 100644 index 0000000..69071bf --- /dev/null +++ b/drift_sequences.go @@ -0,0 +1,92 @@ +package pgutil + +import ( + "fmt" + "strings" +) + +type SequenceModifier struct { + d SequenceDescription +} + +func NewSequenceModifier(_ SchemaDescription, d SequenceDescription) SequenceModifier { + return SequenceModifier{ + d: d, + } +} + +func (m SequenceModifier) Key() string { + return fmt.Sprintf("%q.%q", m.d.Namespace, m.d.Name) +} + +func (m SequenceModifier) ObjectType() string { + return "sequence" +} + +func (m SequenceModifier) Description() SequenceDescription { + return m.d +} + +func (m SequenceModifier) Create() string { + minValue := "NO MINVALUE" + if m.d.MinimumValue != 0 { + minValue = fmt.Sprintf("MINVALUE %d", m.d.MinimumValue) + } + + maxValue := "NO MAXVALUE" + if m.d.MaximumValue != 0 { + maxValue = fmt.Sprintf("MAXVALUE %d", m.d.MaximumValue) + } + + return fmt.Sprintf( + "CREATE SEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY %d %s %s START WITH %d %s CYCLE;", + m.Key(), + m.d.Type, + m.d.Increment, + minValue, + maxValue, + m.d.StartValue, + m.d.CycleOption, + ) +} + +func (m SequenceModifier) Drop() string { + return fmt.Sprintf("DROP SEQUENCE IF EXISTS %s;", m.Key()) +} + +func (m SequenceModifier) AlterExisting(existingSchema SchemaDescription, existingObject SequenceDescription) ([]ddlStatement, bool) { + parts := []string{ + fmt.Sprintf("ALTER SEQUENCE IF EXISTS %s", m.Key()), + } + if m.d.Type != existingObject.Type { + parts = append(parts, fmt.Sprintf("AS %s", m.d.Type)) + } + if m.d.Increment != existingObject.Increment { + parts = append(parts, fmt.Sprintf("INCREMENT BY %d", m.d.Increment)) + } + if m.d.MinimumValue != existingObject.MinimumValue { + parts = append(parts, fmt.Sprintf("MINVALUE %d", m.d.MinimumValue)) + } + if m.d.MaximumValue != existingObject.MaximumValue { + parts = append(parts, fmt.Sprintf("MAXVALUE %d", m.d.MaximumValue)) + } + if m.d.StartValue != existingObject.StartValue { + parts = append(parts, fmt.Sprintf("START WITH %d", m.d.StartValue)) + } + if m.d.CycleOption != existingObject.CycleOption { + if m.d.CycleOption == "YES" { + parts = append(parts, "CYCLE") + } else { + parts = append(parts, "NO CYCLE") + } + } + + return []ddlStatement{ + newStatement( + m.Key(), + "replace", + m.ObjectType(), + strings.Join(parts, " ")+";", + ), + }, true +} diff --git a/drift_tables.go b/drift_tables.go new file mode 100644 index 0000000..825240c --- /dev/null +++ b/drift_tables.go @@ -0,0 +1,59 @@ +package pgutil + +import "fmt" + +type TableModifier struct { + d TableDescription +} + +func NewTableModifier(_ SchemaDescription, d TableDescription) TableModifier { + return TableModifier{ + d: d, + } +} + +func (m TableModifier) Key() string { + return fmt.Sprintf("%q.%q", m.d.Namespace, m.d.Name) +} + +func (m TableModifier) ObjectType() string { + return "table" +} + +func (m TableModifier) Description() TableDescription { + return m.d +} + +func (m TableModifier) Create() string { + return fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s();", m.Key()) +} + +func (m TableModifier) Drop() string { + return fmt.Sprintf("DROP TABLE IF EXISTS %s;", m.Key()) +} + +type TableComponentModifiers struct { + Columns []ColumnModifier + Constraints []ConstraintModifier + Indexes []IndexModifier +} + +func NewTableComponentModifiers(schema SchemaDescription, tables []TableDescription) TableComponentModifiers { + var ( + columns []ColumnModifier + constraints []ConstraintModifier + indexes []IndexModifier + ) + + for _, table := range tables { + columns = append(columns, wrapWithContextValues(schema, table, table.Columns, NewColumnModifier)...) + constraints = append(constraints, wrapWithContextValues(schema, table, table.Constraints, NewConstraintModifier)...) + indexes = append(indexes, wrapWithContextValues(schema, table, table.Indexes, NewIndexModifier)...) + } + + return TableComponentModifiers{ + Columns: columns, + Constraints: constraints, + Indexes: indexes, + } +} diff --git a/drift_test.go b/drift_test.go new file mode 100644 index 0000000..fef8435 --- /dev/null +++ b/drift_test.go @@ -0,0 +1,858 @@ +package pgutil + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDrift_Extensions(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE EXTENSION hstore;`}, + Alter: []string{`DROP EXTENSION hstore;`}, + Expected: []string{`CREATE EXTENSION IF NOT EXISTS "hstore" WITH SCHEMA "public";`}, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{}, + Alter: []string{`CREATE EXTENSION pg_trgm;`}, + Expected: []string{`DROP EXTENSION IF EXISTS "pg_trgm";`}, + }) + }) +} + +func TestDrift_Enums(t *testing.T) { + + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');`}, + Alter: []string{`DROP TYPE mood;`}, + Expected: []string{`CREATE TYPE "public"."mood" AS ENUM ('sad', 'ok', 'happy');`}, + }) + + t.Run("escaped values", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE TYPE spell_check AS ENUM ('there', 'their', 'they''re');`}, + Alter: []string{`DROP TYPE spell_check;`}, + Expected: []string{`CREATE TYPE "public"."spell_check" AS ENUM ('there', 'their', 'they''re');`}, + }) + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{}, + Alter: []string{`CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');`}, + Expected: []string{`DROP TYPE IF EXISTS "public"."mood";`}, + }) + }) + + t.Run("alter", func(t *testing.T) { + t.Run("missing labels", func(t *testing.T) { + for _, testCase := range []struct { + name string + expectedLabels []string + existingLabels []string + expectedQueries []string + }{ + { + name: "missing at end", + expectedLabels: []string{"foo", "bar", "baz", "bonk"}, + existingLabels: []string{"foo", "bar"}, + expectedQueries: []string{ + `ALTER TYPE "public"."mood" ADD VALUE 'baz' AFTER 'bar';`, + `ALTER TYPE "public"."mood" ADD VALUE 'bonk' AFTER 'baz';`, + }, + }, + { + name: "missing in middle", + expectedLabels: []string{"foo", "bar", "baz", "bonk"}, + existingLabels: []string{"foo", "bonk"}, + expectedQueries: []string{ + `ALTER TYPE "public"."mood" ADD VALUE 'bar' AFTER 'foo';`, + `ALTER TYPE "public"."mood" ADD VALUE 'baz' AFTER 'bar';`, + }, + }, + { + name: "missing at beginning", + expectedLabels: []string{"foo", "bar", "baz", "bonk"}, + existingLabels: []string{"baz", "bonk"}, + expectedQueries: []string{ + `ALTER TYPE "public"."mood" ADD VALUE 'foo' BEFORE 'baz';`, + `ALTER TYPE "public"."mood" ADD VALUE 'bar' AFTER 'foo';`, + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + // Prepare setup queries + setupLabels := make([]string, len(testCase.expectedLabels)) + for i, label := range testCase.expectedLabels { + setupLabels[i] = fmt.Sprintf("'%s'", label) + } + setupQuery := fmt.Sprintf("CREATE TYPE mood AS ENUM (%s);", strings.Join(setupLabels, ", ")) + + // Prepare alter queries + existingLabelsFormatted := make([]string, len(testCase.existingLabels)) + for i, label := range testCase.existingLabels { + existingLabelsFormatted[i] = fmt.Sprintf("'%s'", label) + } + alterQuery := fmt.Sprintf(` + DROP TYPE mood; + CREATE TYPE mood AS ENUM (%s); + `, strings.Join(existingLabelsFormatted, ", ")) + + // Execute testDrift with the new struct + testDrift(t, DriftTestCase{ + Setup: []string{setupQuery}, + Alter: []string{alterQuery}, + Expected: testCase.expectedQueries, + }) + }) + } + + t.Run("escaped values", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE TYPE spell_check AS ENUM ('they''re', 'there', 'their', 'whose', 'who''s');`, + }, + Alter: []string{ + `DROP TYPE spell_check;`, + `CREATE TYPE spell_check AS ENUM ('they''re', 'their', 'whose');`, + }, + Expected: []string{ + `ALTER TYPE "public"."spell_check" ADD VALUE 'there' AFTER 'they''re';`, + `ALTER TYPE "public"."spell_check" ADD VALUE 'who''s' AFTER 'whose';`, + }, + }) + }) + }) + + t.Run("non-repairable labels", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');`, + }, + Alter: []string{ + `DROP TYPE mood;`, + `CREATE TYPE mood AS ENUM ('happy', 'sad', 'ok', 'gleeful');`, + }, + Expected: []string{ + `ALTER TYPE "public"."mood" RENAME TO "mood_bak";`, + `CREATE TYPE "public"."mood" AS ENUM ('sad', 'ok', 'happy');`, + `DROP TYPE "public"."mood_bak";`, + }, + }) + }) + + t.Run("updates column types", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');`, + `CREATE TABLE t (m mood);`, + }, + Alter: []string{ + `DROP TABLE t;`, + `DROP TYPE mood;`, + `CREATE TYPE mood AS ENUM ('happy', 'sad', 'ok', 'gleeful');`, + `CREATE TABLE t (m mood);`, + }, + Expected: []string{ + `ALTER TYPE "public"."mood" RENAME TO "mood_bak";`, + `CREATE TYPE "public"."mood" AS ENUM ('sad', 'ok', 'happy');`, + `ALTER TABLE "public"."t" ALTER COLUMN "m" TYPE "public"."mood" USING ("m"::text::"public"."mood");`, + `DROP TYPE "public"."mood_bak";`, + }, + }) + }) + + t.Run("updates column defaults", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE SCHEMA s;`, + `CREATE TYPE s.mood AS ENUM ('sad', 'ok', 'happy');`, + `CREATE TABLE s.t (m s.mood DEFAULT 'sad');`, + }, + Alter: []string{ + `DROP TABLE s.t;`, + `DROP TYPE s.mood;`, + `CREATE TYPE s.mood AS ENUM ('happy', 'sad', 'ok', 'gleeful');`, + `CREATE TABLE s.t (m s.mood DEFAULT 'sad');`, + }, + Expected: []string{ + `ALTER TYPE "s"."mood" RENAME TO "mood_bak";`, + `CREATE TYPE "s"."mood" AS ENUM ('sad', 'ok', 'happy');`, + `ALTER TABLE "s"."t" ALTER COLUMN "m" DROP DEFAULT, ALTER COLUMN "m" TYPE "s"."mood" USING ("m"::text::"s"."mood"), ALTER COLUMN "m" SET DEFAULT 'sad'::s.mood;`, + `DROP TYPE "s"."mood_bak";`, + }, + }) + }) + + t.Run("temporarily drops dependent views", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');`, + `CREATE TABLE t (m mood);`, + `CREATE VIEW v1 AS SELECT m FROM t;`, + `CREATE VIEW v2 AS SELECT m FROM v1;`, + }, + Alter: []string{ + `DROP VIEW v2;`, + `DROP VIEW v1;`, + `DROP TABLE t;`, + `DROP TYPE mood;`, + `CREATE TYPE mood AS ENUM ('happy', 'sad', 'ok', 'gleeful');`, + `CREATE TABLE t (m mood);`, + `CREATE VIEW v1 AS SELECT m FROM t;`, + `CREATE VIEW v2 AS SELECT m FROM v1;`, + }, + Expected: []string{ + `DROP VIEW IF EXISTS "public"."v2";`, + `DROP VIEW IF EXISTS "public"."v1";`, + `ALTER TYPE "public"."mood" RENAME TO "mood_bak";`, + `CREATE TYPE "public"."mood" AS ENUM ('sad', 'ok', 'happy');`, + `ALTER TABLE "public"."t" ALTER COLUMN "m" TYPE "public"."mood" USING ("m"::text::"public"."mood");`, + `DROP TYPE "public"."mood_bak";`, + `CREATE OR REPLACE VIEW "public"."v1" AS SELECT m + FROM t;`, + `CREATE OR REPLACE VIEW "public"."v2" AS SELECT m + FROM v1;`, + }, + }) + }) + }) +} + +var postgresAddFunctionDefinition = `CREATE OR REPLACE FUNCTION public.add(integer, integer) + RETURNS integer + LANGUAGE sql +AS $function$SELECT $1 + $2;$function$ +;` + +func TestDrift_Functions(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL;`}, + Alter: []string{`DROP FUNCTION add(integer, integer);`}, + Expected: []string{postgresAddFunctionDefinition}, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{}, + Alter: []string{`CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL;`}, + Expected: []string{`DROP FUNCTION IF EXISTS "public"."add"(int4, int4);`}, + }) + }) + + t.Run("alter", func(t *testing.T) { + t.Run("mismatched definition", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL;`}, + Alter: []string{`CREATE OR REPLACE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 - $2;' LANGUAGE SQL;`}, + Expected: []string{postgresAddFunctionDefinition}, + }) + }) + + t.Run("preserves functions with differing argument types", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL;`}, + Alter: []string{`CREATE FUNCTION add(integer, integer, integer) RETURNS integer AS 'SELECT $1 + $2 + $3;' LANGUAGE SQL;`}, + Expected: []string{`DROP FUNCTION IF EXISTS "public"."add"(int4, int4, int4);`}, + }) + }) + }) +} + +func TestDrift_Tables(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `DROP TABLE my_table;`, + }, + Expected: []string{ + `CREATE TABLE IF NOT EXISTS "public"."my_table"();`, + `ALTER TABLE "public"."my_table" ADD COLUMN IF NOT EXISTS "id" integer NOT NULL;`, + `ALTER TABLE "public"."my_table" ADD COLUMN IF NOT EXISTS "name" text;`, + `ALTER TABLE "public"."my_table" ADD CONSTRAINT "my_table_pkey" PRIMARY KEY (id);`, + }, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{}, + Alter: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" DROP CONSTRAINT IF EXISTS "my_table_pkey";`, + `ALTER TABLE "public"."my_table" DROP COLUMN IF EXISTS "id";`, + `ALTER TABLE "public"."my_table" DROP COLUMN IF EXISTS "name";`, + `DROP TABLE IF EXISTS "public"."my_table";`, + }, + }) + }) + + t.Run("alter", func(t *testing.T) { + t.Run("columns", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table DROP COLUMN name;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ADD COLUMN IF NOT EXISTS "name" text;`, + }, + }) + }) + + t.Run("extra", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ADD COLUMN age INTEGER;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" DROP COLUMN IF EXISTS "age";`, + }, + }) + }) + + t.Run("mismatched type", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name TYPE VARCHAR(255);`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" SET DATA TYPE text;`, + }, + }) + }) + + t.Run("mismatched default", func(t *testing.T) { + t.Run("add default", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name SET DEFAULT 'foo';`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" DROP DEFAULT;`, + }, + }) + + t.Run("drop default", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT DEFAULT 'foo' + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name DROP DEFAULT;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" SET DEFAULT 'foo'::text;`, + }, + }) + }) + + t.Run("change default", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT DEFAULT 'foo' + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name SET DEFAULT 'bar';`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" SET DEFAULT 'foo'::text;`, + }, + }) + }) + }) + + t.Run("mismatched nullability", func(t *testing.T) { + t.Run("add not null", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name SET NOT NULL;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" DROP NOT NULL;`, + }, + }) + }) + + t.Run("drop not null`", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT NOT NULL + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name DROP NOT NULL;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" SET NOT NULL;`, + }, + }) + }) + }) + + t.Run("multiple changes", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT DEFAULT 'foo' + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ALTER COLUMN name SET DEFAULT 'bar';`, + `ALTER TABLE my_table ALTER COLUMN name SET NOT NULL;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" SET DEFAULT 'foo'::text;`, + `ALTER TABLE "public"."my_table" ALTER COLUMN "name" DROP NOT NULL;`, + }, + }) + }) + }) + + t.Run("constraint", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT UNIQUE + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table DROP CONSTRAINT my_table_name_key;`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" ADD CONSTRAINT "my_table_name_key" UNIQUE (name);`, + }, + }) + }) + + t.Run("extra", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table ADD CONSTRAINT my_table_name_key UNIQUE (name);`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" DROP CONSTRAINT IF EXISTS "my_table_name_key";`, + }, + }) + }) + + t.Run("alter", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE TABLE my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `ALTER TABLE my_table DROP CONSTRAINT my_table_pkey;`, + `ALTER TABLE my_table ADD CONSTRAINT my_table_pkey UNIQUE (name);`, + }, + Expected: []string{ + `ALTER TABLE "public"."my_table" DROP CONSTRAINT IF EXISTS "my_table_pkey";`, + `ALTER TABLE "public"."my_table" ADD CONSTRAINT "my_table_pkey" PRIMARY KEY (id);`, + }, + }) + }) + }) + + t.Run("index", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE SCHEMA s; + CREATE TABLE s.my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + `CREATE INDEX my_table_name_idx ON s.my_table (name);`, + }, + Alter: []string{ + `DROP INDEX s.my_table_name_idx;`, + }, + Expected: []string{ + `CREATE INDEX my_table_name_idx ON s.my_table USING btree (name);`, + }, + }) + }) + + t.Run("extra", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE SCHEMA s; + CREATE TABLE s.my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + }, + Alter: []string{ + `CREATE INDEX my_table_name_idx ON s.my_table (name);`, + }, + Expected: []string{ + `DROP INDEX IF EXISTS "s"."my_table_name_idx";`, + }, + }) + }) + + t.Run("alter", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + ` + CREATE SCHEMA s; + CREATE TABLE s.my_table ( + id INTEGER PRIMARY KEY, + name TEXT + ); + `, + `CREATE INDEX my_table_name_idx ON s.my_table (name);`, + }, + Alter: []string{ + `DROP INDEX s.my_table_name_idx;`, + `CREATE INDEX my_table_name_idx ON s.my_table (name DESC);`, + }, + Expected: []string{ + `DROP INDEX IF EXISTS "s"."my_table_name_idx";`, + `CREATE INDEX my_table_name_idx ON s.my_table USING btree (name);`, + }, + }) + }) + }) + }) + }) +} + +func TestDrift_Sequences(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE SEQUENCE my_seq AS bigint;`}, + Alter: []string{`DROP SEQUENCE my_seq;`}, + Expected: []string{`CREATE SEQUENCE IF NOT EXISTS "public"."my_seq" AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE;`}, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{}, + Alter: []string{`CREATE SEQUENCE my_seq;`}, + Expected: []string{`DROP SEQUENCE IF EXISTS "public"."my_seq";`}, + }) + }) + + t.Run("alter", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE SEQUENCE my_seq AS bigint INCREMENT BY 2 MINVALUE 2 MAXVALUE 12000 START WITH 2 CYCLE;`, + }, + Alter: []string{ + `DROP SEQUENCE my_seq;`, + `CREATE SEQUENCE my_seq AS int INCREMENT BY 1 MINVALUE 1 MAXVALUE 24000 START WITH 1 NO CYCLE;`, + `SELECT setval('my_seq', 43, true);`, + }, + Expected: []string{ + `ALTER SEQUENCE IF EXISTS "public"."my_seq" AS bigint INCREMENT BY 2 MINVALUE 2 MAXVALUE 12000 START WITH 2 CYCLE;`, + }, + }) + }) +} + +func TestDrift_Views(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{`CREATE VIEW my_view AS SELECT 1 AS one;`}, + Alter: []string{`DROP VIEW my_view;`}, + Expected: []string{`CREATE OR REPLACE VIEW "public"."my_view" AS SELECT 1 AS one;`}, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{}, + Alter: []string{`CREATE VIEW my_view AS SELECT 1 AS one;`}, + Expected: []string{`DROP VIEW IF EXISTS "public"."my_view";`}, + }) + }) + + t.Run("alter", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE VIEW my_view AS SELECT 1 AS one;`, + }, + Alter: []string{ + `DROP VIEW my_view;`, + `CREATE OR REPLACE VIEW my_view AS SELECT 2 AS two;`, + }, + Expected: []string{ + `DROP VIEW IF EXISTS "public"."my_view";`, + `CREATE OR REPLACE VIEW "public"."my_view" AS SELECT 1 AS one;`, + }, + }) + }) + + t.Run("dependency closure", func(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE TABLE t (x int);`, + `CREATE VIEW v_foo AS SELECT * FROM t;`, + `CREATE VIEW v_bar AS SELECT * FROM v_foo;`, + `CREATE VIEW v_baz AS SELECT * FROM t UNION SELECT * FROM v_foo;`, + `CREATE VIEW v_bonk AS SELECT * FROM t UNION SELECT * FROM v_bar;`, + `CREATE VIEW v_quux AS SELECT * FROM v_foo UNION SELECT * FROM v_bar;`, + `CREATE VIEW v_one AS SELECT 1 AS one;`, + `CREATE VIEW v_two AS SELECT 2 AS two;`, + }, + Alter: []string{ + `DROP VIEW v_two;`, + `DROP VIEW v_one;`, + `DROP VIEW v_quux;`, + `DROP VIEW v_bonk;`, + `DROP VIEW v_baz;`, + `DROP VIEW v_bar;`, + `DROP VIEW v_foo;`, + }, + Expected: []string{ + `CREATE OR REPLACE VIEW "public"."v_foo" AS SELECT x + FROM t;`, + `CREATE OR REPLACE VIEW "public"."v_bar" AS SELECT x + FROM v_foo;`, + `CREATE OR REPLACE VIEW "public"."v_baz" AS SELECT t.x + FROM t +UNION + SELECT v_foo.x + FROM v_foo;`, + `CREATE OR REPLACE VIEW "public"."v_bonk" AS SELECT t.x + FROM t +UNION + SELECT v_bar.x + FROM v_bar;`, + `CREATE OR REPLACE VIEW "public"."v_one" AS SELECT 1 AS one;`, + `CREATE OR REPLACE VIEW "public"."v_quux" AS SELECT v_foo.x + FROM v_foo +UNION + SELECT v_bar.x + FROM v_bar;`, + `CREATE OR REPLACE VIEW "public"."v_two" AS SELECT 2 AS two;`, + }, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE TABLE t (x int);`, + }, + Alter: []string{ + `CREATE VIEW v_foo AS SELECT * FROM t;`, + `CREATE VIEW v_bar AS SELECT * FROM v_foo;`, + `CREATE VIEW v_baz AS SELECT * FROM t UNION SELECT * FROM v_foo;`, + `CREATE VIEW v_bonk AS SELECT * FROM t UNION SELECT * FROM v_bar;`, + `CREATE VIEW v_quux AS SELECT * FROM v_foo UNION SELECT * FROM v_bar;`, + `CREATE VIEW v_one AS SELECT 1 AS one;`, + `CREATE VIEW v_two AS SELECT 2 AS two;`, + }, + Expected: []string{ + `DROP VIEW IF EXISTS "public"."v_baz";`, + `DROP VIEW IF EXISTS "public"."v_bonk";`, + `DROP VIEW IF EXISTS "public"."v_one";`, + `DROP VIEW IF EXISTS "public"."v_quux";`, + `DROP VIEW IF EXISTS "public"."v_bar";`, + `DROP VIEW IF EXISTS "public"."v_foo";`, + `DROP VIEW IF EXISTS "public"."v_two";`, + }, + }) + }) + }) +} + +func TestDrift_Triggers(t *testing.T) { + t.Run("create", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE SCHEMA a;`, + `CREATE SCHEMA b;`, + `CREATE TABLE a.t (x int);`, + + `CREATE FUNCTION b.f() RETURNS TRIGGER AS $$ + BEGIN RETURN NEW; END; + $$ LANGUAGE plpgsql;`, + + `CREATE TRIGGER "t-insert" BEFORE INSERT ON a.t FOR EACH ROW EXECUTE FUNCTION b.f();`, + }, + Alter: []string{ + `DROP TRIGGER "t-insert" ON a.t;`, + }, + Expected: []string{ + `CREATE TRIGGER "t-insert" BEFORE INSERT ON a.t FOR EACH ROW EXECUTE FUNCTION b.f();`, + }, + }) + }) + + t.Run("drop", func(t *testing.T) { + testDrift(t, DriftTestCase{ + Setup: []string{ + `CREATE SCHEMA a;`, + `CREATE SCHEMA b;`, + `CREATE TABLE a.t (x int);`, + `CREATE FUNCTION b.f() RETURNS TRIGGER AS $$ + BEGIN RETURN NEW; END; + $$ LANGUAGE plpgsql;`, + }, + Alter: []string{ + `CREATE TRIGGER "t-insert" BEFORE INSERT ON a.t FOR EACH ROW EXECUTE FUNCTION b.f();`, + }, + Expected: []string{ + `DROP TRIGGER IF EXISTS "t-insert" ON "a"."t";`, + }, + }) + }) +} + +// +// + +type DriftTestCase struct { + Setup []string + Alter []string + Expected []string +} + +func testDrift(t *testing.T, testCase DriftTestCase) { + t.Helper() + db := NewTestDB(t) + ctx := context.Background() + + // Execute all setup queries + for _, query := range testCase.Setup { + require.NoError(t, db.Exec(ctx, RawQuery(query)), "query=%q", query) + } + + // Describe the initial schema + before, err := DescribeSchema(ctx, db) + if err != nil { + t.Fatalf("Failed to describe schema: %v", err) + } + + // Execute all alter queries + for _, query := range testCase.Alter { + require.NoError(t, db.Exec(ctx, RawQuery(query)), "query=%q", query) + } + + // Describe the altered schema + after, err := DescribeSchema(ctx, db) + require.NoError(t, err) + + // Compare schemas and assert expected drift + require.Equal(t, testCase.Expected, Compare(before, after)) + + // Apply the expected repair queries + for _, query := range testCase.Expected { + require.NoError(t, db.Exec(ctx, RawQuery(query)), "query=%q", query) + } + + // Verify that the drift has been repaired + repaired, err := DescribeSchema(ctx, db) + require.NoError(t, err) + assert.Empty(t, Compare(before, repaired)) +} diff --git a/drift_triggers.go b/drift_triggers.go new file mode 100644 index 0000000..40ee6ef --- /dev/null +++ b/drift_triggers.go @@ -0,0 +1,33 @@ +package pgutil + +import "fmt" + +type TriggerModifier struct { + d TriggerDescription +} + +func NewTriggerModifier(_ SchemaDescription, d TriggerDescription) TriggerModifier { + return TriggerModifier{ + d: d, + } +} + +func (m TriggerModifier) Key() string { + return fmt.Sprintf("%q.%q", m.d.Namespace, m.d.Name) +} + +func (m TriggerModifier) ObjectType() string { + return "trigger" +} + +func (m TriggerModifier) Description() TriggerDescription { + return m.d +} + +func (m TriggerModifier) Create() string { + return fmt.Sprintf("%s;", m.d.Definition) +} + +func (m TriggerModifier) Drop() string { + return fmt.Sprintf("DROP TRIGGER IF EXISTS %q ON %q.%q;", m.d.Name, m.d.Namespace, m.d.TableName) +} diff --git a/drift_views.go b/drift_views.go new file mode 100644 index 0000000..e3b86a7 --- /dev/null +++ b/drift_views.go @@ -0,0 +1,53 @@ +package pgutil + +import ( + "fmt" + "strings" +) + +type ViewModifier struct { + d ViewDescription +} + +func NewViewModifier(_ SchemaDescription, d ViewDescription) ViewModifier { + return ViewModifier{ + d: d, + } +} + +func (m ViewModifier) Key() string { + return fmt.Sprintf("%q.%q", m.d.Namespace, m.d.Name) +} + +func (m ViewModifier) ObjectType() string { + return "view" +} + +func (m ViewModifier) Description() ViewDescription { + return m.d +} + +func (m ViewModifier) Create() string { + return fmt.Sprintf("CREATE OR REPLACE VIEW %s AS %s", m.Key(), strings.TrimSpace(stripIdent(" "+m.d.Definition))) +} + +func (m ViewModifier) Drop() string { + return fmt.Sprintf("DROP VIEW IF EXISTS %s;", m.Key()) +} + +func stripIdent(s string) string { + lines := strings.Split(strings.TrimRight(s, "\n"), "\n") + + min := len(lines[0]) + for _, line := range lines { + if ident := len(line) - len(strings.TrimLeft(line, " ")); ident < min { + min = ident + } + } + + for i, line := range lines { + lines[i] = line[min:] + } + + return strings.Join(lines, "\n") +} diff --git a/drift_views_test.go b/drift_views_test.go new file mode 100644 index 0000000..1568d62 --- /dev/null +++ b/drift_views_test.go @@ -0,0 +1,40 @@ +package pgutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStripIdent(t *testing.T) { + for _, testCase := range []struct { + name string + input string + expected string + }{ + { + name: "single line", + input: "CREATE VIEW IF NOT EXISTS v AS SELECT 1;", + expected: "CREATE VIEW IF NOT EXISTS v AS SELECT 1;", + }, + { + name: "single line with ident", + input: " CREATE VIEW IF NOT EXISTS v AS SELECT 1;", + expected: "CREATE VIEW IF NOT EXISTS v AS SELECT 1;", + }, + { + name: "multi line, common indent", + input: " CREATE VIEW IF NOT EXISTS v AS\n SELECT 1;", + expected: "CREATE VIEW IF NOT EXISTS v AS\nSELECT 1;", + }, + { + name: "multi line, jagged indent", + input: " CREATE VIEW IF NOT EXISTS v AS\n SELECT *\n FROM t;", + expected: "CREATE VIEW IF NOT EXISTS v AS\n SELECT *\n FROM t;", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + assert.Equal(t, testCase.expected, stripIdent(testCase.input)) + }) + } +} diff --git a/go.mod b/go.mod index 5ddb16b..55b53dc 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,53 @@ module github.com/go-nacelle/pgutil -go 1.13 +go 1.21 require ( - github.com/aphistic/sweet v0.3.0 - github.com/aphistic/sweet-junit v0.2.0 - github.com/go-nacelle/nacelle v1.1.0 - github.com/golang-migrate/migrate/v4 v4.7.0 - github.com/jmoiron/sqlx v1.2.0 - github.com/lib/pq v1.2.0 - github.com/onsi/gomega v1.7.1 + github.com/fatih/color v1.17.0 + github.com/go-nacelle/config/v3 v3.0.0 + github.com/go-nacelle/log/v2 v2.0.1 + github.com/go-nacelle/nacelle/v2 v2.1.1 + github.com/hexops/autogold/v2 v2.2.1 + github.com/jackc/pgconn v1.14.1 + github.com/lib/pq v1.10.9 + github.com/segmentio/fasthash v1.0.3 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.9.0 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/sync v0.8.0 +) + +require ( + github.com/BurntSushi/toml v1.4.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/derision-test/glock v1.1.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-nacelle/process/v2 v2.1.0 // indirect + github.com/go-nacelle/service/v2 v2.0.1 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/hexops/autogold v1.3.1 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/hexops/valast v1.4.4 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-zglob v0.0.6 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/nightlyone/lockfile v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.14.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + mvdan.cc/gofumpt v0.5.0 // indirect ) diff --git a/go.sum b/go.sum index 03403e4..2c98595 100644 --- a/go.sum +++ b/go.sum @@ -1,389 +1,346 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= -github.com/aphistic/gomol v0.0.0-20190314031446-1546845ba714 h1:ml3df+ybkktxzxTLInLXEDqfoFQUMC8kQtdfv8iwI+M= -github.com/aphistic/gomol v0.0.0-20190314031446-1546845ba714/go.mod h1:/wJ/Wijq31ktyhrvSuqh8KPiPEtJLKU/T4KwxmBYk2w= -github.com/aphistic/gomol-console v0.0.0-20180111152223-9fa1742697a8 h1:tzgowv45TOFALtZLJ9y3k+krzOh2J8IkCvJ8T//6VAU= -github.com/aphistic/gomol-console v0.0.0-20180111152223-9fa1742697a8/go.mod h1:3w1309L1wdWg0BwrcOnhJA06I0lm7G7wIhjGmqig4DM= -github.com/aphistic/gomol-gelf v0.0.0-20170516042314-573e82a82082 h1:PgPqI/JnStmzwTof+PtT53Pz53dlrz2BmF7cn5CAwQM= -github.com/aphistic/gomol-gelf v0.0.0-20170516042314-573e82a82082/go.mod h1:jaNu5/0CyDa/8+Y5rqU7H3+wX9cbAAuJtEU89/XLDoc= -github.com/aphistic/gomol-json v1.1.0 h1:XJWwW8PxYOHf0f0FquuBWcgvZBvQ89nPxZsqQ9pfpro= -github.com/aphistic/gomol-json v1.1.0/go.mod h1:wEOdY9oByrlQ4KEXY2wY3GvCWKoyIg7WeChslvrTkik= -github.com/aphistic/sweet v0.0.0-20180618201346-68e18ab55a67/go.mod h1:iggGz3Cujwru5rGKuOi4u1rfI+38suzhVVJj8Ey7Q3M= -github.com/aphistic/sweet v0.2.0 h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs= -github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= -github.com/aphistic/sweet v0.3.0 h1:xZTMfCoMsjWubPNxOBODluBC4qfGP0CdRJ88jon46XE= -github.com/aphistic/sweet v0.3.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= -github.com/aphistic/sweet-junit v0.0.0-20171005212431-6b78f7014f7c/go.mod h1:+rEpaBMG7nKCTS5rjybTdJwqNG0ayGoPUm+sCPBgi9Y= -github.com/aphistic/sweet-junit v0.0.0-20190314030539-8d7e248096c2 h1:qDCG/a4+mCcRqj+QHTc1RNncar6rpg0oGz9ynH4IRME= -github.com/aphistic/sweet-junit v0.0.0-20190314030539-8d7e248096c2/go.mod h1:+eL69RqmiKF2Jm3poefxF/ZyVNGXFdSsPq3ScBFtX9s= -github.com/aphistic/sweet-junit v0.2.0 h1:f3+QqXgIddHGW+wf9GAqvTTZFp1jwfWvttqcB2N6sKg= -github.com/aphistic/sweet-junit v0.2.0/go.mod h1:m5//tucV/gmAgd5yJaWscEpwxjuohGwhM/kzXtrGY5k= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvvatEupQk= -github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= -github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= -github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= -github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= -github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= -github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= -github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= -github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU= -github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc= -github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/derision-test/glock v0.0.0-20210316032053-f5b74334bb29/go.mod h1:jKtLdBMrF+XQatqvg46wiWdDfDSSDjdhO4dOM2FX9H4= +github.com/derision-test/glock v1.0.0/go.mod h1:jKtLdBMrF+XQatqvg46wiWdDfDSSDjdhO4dOM2FX9H4= +github.com/derision-test/glock v1.1.0 h1:xCRXft0A2i7HDt0zmwRLep4abI3yq9XFeyuLX//QqN4= +github.com/derision-test/glock v1.1.0/go.mod h1:b64lRouYlknys/hp3/+m9FcbNZWG3K2j7OgJawsY4VA= +github.com/derision-test/go-mockgen v0.0.0-20201001011750-eb2233de6342 h1:EyNcBkaw2jkJxYuCN+Aa1fTOohkjTRK9Ltapi1SWcFk= +github.com/derision-test/go-mockgen v0.0.0-20201001011750-eb2233de6342/go.mod h1:FGP9Qq+gWtYK9ine/2Bzeww8SXS7mD1wypNl1Q3AKN0= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/efritz/backoff v0.0.0-20181228195520-96f666d52d44 h1:/1ZYwIsQO3XHzvQia/XxSeJ+3ddMxyFOP3CpT2WQPXQ= -github.com/efritz/backoff v0.0.0-20181228195520-96f666d52d44/go.mod h1:L7a/1pfrfOzpf5i9MEQTeiW9ZdRUcYMfK4QHud9+OSA= -github.com/efritz/backoff v1.0.0 h1:r1DfNhA1J7p8kZ185J/hLPz2Bl5ezTicUr9KamEAOYw= -github.com/efritz/backoff v1.0.0/go.mod h1:/tKomesOo7ekklUHEHxBbzNpjyBiOoiDCif3AcO+OIU= -github.com/efritz/glock v0.0.0-20180604185841-7e95e8b27a61 h1:q6eqGBPguNW4xPham18pvfzRQPyAjPAUmtIyAXx7cSM= -github.com/efritz/glock v0.0.0-20180604185841-7e95e8b27a61/go.mod h1:cpd5N5da92h+gt4FqUvF4jqMdXyVRM86t6Rl99W6xR0= -github.com/efritz/glock v0.0.0-20181228234553-f184d69dff2c h1:Q3HKbZogL9GGZVdO3PiVCOxZmRCsQAgV1xfelXJF/dY= -github.com/efritz/glock v0.0.0-20181228234553-f184d69dff2c/go.mod h1:4behwg5YZ7amYrI5VDO/1s68YXZQHklcyFQpVDDgB2w= -github.com/efritz/go-genlib v0.0.0-20190429143346-e1e478a98211/go.mod h1:zUniQY7pV7ciqIjXY8TyDHXaqHZJa3NQIiiYDycogYk= -github.com/efritz/go-mockgen v0.0.0-20190613153341-3425cf558834 h1:37JFXKsLttHkcvFFr4t68C21uInpCNAIvPeE7IYRXKA= -github.com/efritz/go-mockgen v0.0.0-20190613153341-3425cf558834/go.mod h1:2Ia+C4l9Ns2t8XwO8JVbJ6GUEL/Bwm4u1DAj7ApQdqE= -github.com/efritz/watchdog v0.0.0-20181228234521-84cf7cb74656 h1:xtclV2XiE/m2kwsD77SExHvN73urFKqAd0f0Ovl78bA= -github.com/efritz/watchdog v0.0.0-20181228234521-84cf7cb74656/go.mod h1:5dIiUx6OFLQeNCjiOSY3ylcJC2yaUxzecH9Ot3ADtn4= -github.com/fatih/structtag v1.0.0 h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc= -github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA= -github.com/fatih/structtag v1.1.0 h1:6j4mUV/ES2duvnAzKMFkN6/A5mCaNYPD3xfbAkLLOF8= -github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-nacelle/config v1.0.0 h1:A7ouv042iLlWV1NPldTp8gLSO1SG9J8+8fmB1W2xFcQ= -github.com/go-nacelle/config v1.0.0/go.mod h1:oTfdaiTgNqiEUu0oXsEBxTlO66FtSXNAKbZXsE4BmY0= -github.com/go-nacelle/config v1.1.0 h1:PYWhFef60/ryC+rda9q4V640fx2Y6YDphznWVkA79xg= -github.com/go-nacelle/config v1.1.0/go.mod h1:dMu+mFkZhuyvQszbhNfhBlu1DSU+thCsCyeUycSGfk8= -github.com/go-nacelle/log v1.0.0 h1:1PdYEw6a5GnRQZdWqDsJmXyARbKru3XC2+Rzxo5GbnU= -github.com/go-nacelle/log v1.0.0/go.mod h1:2bN7vfOSvHTzHWxiTYz+F8/MvustIt/HmARQt67gXz4= -github.com/go-nacelle/log v1.0.1 h1:OC9MXbFYlP5rqfsBPcz18oN3GtDk7jSJ17+obsVYSq0= -github.com/go-nacelle/log v1.0.1/go.mod h1:2bN7vfOSvHTzHWxiTYz+F8/MvustIt/HmARQt67gXz4= -github.com/go-nacelle/log v1.1.0 h1:LOdo2GYWhZUW9hAsrseoRDEI8vfh75Gf1jquMhwS1ro= -github.com/go-nacelle/log v1.1.0/go.mod h1:QgxuTSxqEPzHqL66tpY2j8t/s4dDOwszQJL2hMKyzdI= -github.com/go-nacelle/nacelle v1.0.0 h1:MOLZ2uVtA+CLAFHvdupsfmdw+A0ydfoe2gq62+PgI4g= -github.com/go-nacelle/nacelle v1.0.0/go.mod h1:N9nK+iIQgbztRao+Lu1uFyiEcw2RscUQUNa8QtmZUGk= -github.com/go-nacelle/nacelle v1.1.0 h1:A0bwcR1Zzz3sbgWPU9yJ+yu9h9i2+yjiShdKqwq2je8= -github.com/go-nacelle/nacelle v1.1.0/go.mod h1:/J8GEHaKCm9lfo7FaajS4reCF3feqj0ncwhB+wIJCO0= -github.com/go-nacelle/process v1.0.0 h1:2582kC2Exy7M+x8Xgp9EXx6CZ9Ugw1lESFgVp+YI3n8= -github.com/go-nacelle/process v1.0.0/go.mod h1:rdCy03/mUU2X8EBAbSVRH13QvNpto5ir372I9YTRu1M= -github.com/go-nacelle/service v1.0.0 h1:isr7QDxRxGxaAoAmXupOEGf6D7v9wX6odmPRnPyh1f8= -github.com/go-nacelle/service v1.0.0/go.mod h1:JHLisFWKRjLlGdg17Jrm+rtUEXdDnPtT7gZcrRhGrMA= -github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-nacelle/config/v3 v3.0.0 h1:YseTUtwKC21rgWNdBh+f0A5ANnh7kSMqGkB2JHI0aJ8= +github.com/go-nacelle/config/v3 v3.0.0/go.mod h1:cj+WGluCHOR/5gup6dx3wunXsEGEbxmCiB6cYa9iNYQ= +github.com/go-nacelle/log/v2 v2.0.1 h1:vOkiKz/pZannZIpH2yDV03hUKbtr6uLvrWklk8N6eR0= +github.com/go-nacelle/log/v2 v2.0.1/go.mod h1:KpBIkm+Rc9qzdwUKQlhxbN0slXSKi/FhDWY5oiFVhY8= +github.com/go-nacelle/nacelle/v2 v2.1.1 h1:rHPbzqOGXThSZm9xF301ywJdHmL8QXaDANcmefyrqfM= +github.com/go-nacelle/nacelle/v2 v2.1.1/go.mod h1:j+rbDQaYWQcPwiZiTvPdb5/YzM3BCnUIZJfkxXhb69s= +github.com/go-nacelle/process/v2 v2.1.0 h1:T7cN5KUhhDJJ4qd38V7LmuQLKTvmUAK4ZXEHrL8MHeM= +github.com/go-nacelle/process/v2 v2.1.0/go.mod h1:rqzTvXR+gH0/f+7yBrxbVs8ZKL7AhYzNCxWvHIJF4Mk= +github.com/go-nacelle/service/v2 v2.0.1 h1:Pnxbqnpv2w0ntzYIMkRwCxoYbvvu35r9b4hTVrt2ZSE= +github.com/go-nacelle/service/v2 v2.0.1/go.mod h1:o5K7mx7JaIxDbyfzFGH+JkRlTXUudjyx9Fx+AGHuk6I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang-migrate/migrate v3.5.4+incompatible h1:R7OzwvCJTCgwapPCiX6DyBiu2czIUMDCB118gFTKTUA= -github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk= -github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnbAaq2zgrPrs= -github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hexops/autogold v0.8.1/go.mod h1:97HLDXyG23akzAoRYJh/2OBs3kd80eHyKPvZw0S5ZBY= +github.com/hexops/autogold v1.3.1 h1:YgxF9OHWbEIUjhDbpnLhgVsjUDsiHDTyDfy2lrfdlzo= +github.com/hexops/autogold v1.3.1/go.mod h1:sQO+mQUCVfxOKPht+ipDSkJ2SCJ7BNJVHZexsXqWMx4= +github.com/hexops/autogold/v2 v2.2.1 h1:JPUXuZQGkcQMv7eeDXuNMovjfoRYaa0yVcm+F3voaGY= +github.com/hexops/autogold/v2 v2.2.1/go.mod h1:IJwxtUfj1BGLm0YsR/k+dIxYi6xbeLjqGke2bzcOTMI= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hexops/valast v1.4.3/go.mod h1:Iqx2kLj3Jn47wuXpj3wX40xn6F93QNFBHuiKBerkTGA= +github.com/hexops/valast v1.4.4 h1:rETyycw+/L2ZVJHHNxEBgh8KUn+87WugH9MxcEv9PGs= +github.com/hexops/valast v1.4.4/go.mod h1:Jcy1pNH7LNraVaAZDLyv21hHg2WBv9Nf9FL6fGxU7o4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4= +github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-zglob v0.0.1 h1:xsEx/XUoVlI6yXjqBK062zYhRTZltCNmYPx6v+8DNaY= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-zglob v0.0.3/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= +github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A= +github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= +github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56 h1:ZpKuNIejY8P0ExLOVyKhb0WsgG8UdvHXe6TWjY7eL6k= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190429094411-2cc0cad0ac78/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191119195528-f068ffe820e4 h1:FjhQftcbpdYXneEYSWZO7+6Bu+Bi1A8VPvGYWOIzIbw= -golang.org/x/sys v0.0.0-20191119195528-f068ffe820e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210531080801-fdfd190a6549/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190128232029-0a99049195af/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190428024724-550556f78a90/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200916225323-c537a342ddf6/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= +mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= +mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= diff --git a/initializer.go b/initializer.go index cf65b19..9b62fbb 100644 --- a/initializer.go +++ b/initializer.go @@ -1,25 +1,23 @@ package pgutil import ( - "github.com/go-nacelle/nacelle" - "github.com/golang-migrate/migrate/v4/source" + "github.com/go-nacelle/nacelle/v2" _ "github.com/lib/pq" ) type Initializer struct { - Logger nacelle.Logger `service:"logger"` - Services nacelle.ServiceContainer `service:"services"` - sourceDriver source.Driver + Logger nacelle.Logger `service:"logger"` + Services nacelle.ServiceContainer `service:"services"` } const ServiceName = "db" func NewInitializer(configs ...ConfigFunc) *Initializer { + // For expansion options := getOptions(configs) + _ = options - return &Initializer{ - sourceDriver: options.sourceDriver, - } + return &Initializer{} } func (i *Initializer) Init(config nacelle.Config) error { @@ -38,16 +36,5 @@ func (i *Initializer) Init(config nacelle.Config) error { return err } - if err := runMigrations( - db.DB.DB, - i.sourceDriver, - i.Logger, - dbConfig.MigrationsTable, - dbConfig.MigrationsSchemaName, - dbConfig.FailOnNewerMigrationVersion, - ); err != nil { - return err - } - return i.Services.Set(ServiceName, db) } diff --git a/initializer_options.go b/initializer_options.go new file mode 100644 index 0000000..db7d8c1 --- /dev/null +++ b/initializer_options.go @@ -0,0 +1,19 @@ +package pgutil + +type ( + options struct { + // For expansion + } + + // ConfigFunc is a function used to configure an initializer. + ConfigFunc func(*options) +) + +func getOptions(configs []ConfigFunc) *options { + options := &options{} + for _, f := range configs { + f(options) + } + + return options +} diff --git a/locker.go b/locker.go new file mode 100644 index 0000000..84d992a --- /dev/null +++ b/locker.go @@ -0,0 +1,63 @@ +package pgutil + +import ( + "context" + "errors" + "math" + + "github.com/segmentio/fasthash/fnv1" +) + +type TransactionalLocker struct { + db DB + namespace int32 +} + +var ErrInTransaction = errors.New("locker database must not be in transaction") + +func StringKey(key string) int32 { + return int32(fnv1.HashString32(key) % math.MaxInt32) +} + +func NewTransactionalLocker(db DB, namespace int32) (*TransactionalLocker, error) { + if db.IsInTransaction() { + return nil, ErrInTransaction + } + + locker := &TransactionalLocker{ + db: db, + namespace: namespace, + } + + return locker, nil +} + +func (l *TransactionalLocker) WithLock(ctx context.Context, key int32, f func(tx DB) error) error { + return l.db.WithTransaction(ctx, func(tx DB) error { + if err := tx.Exec(ctx, Query("SELECT pg_advisory_xact_lock({:namespace}, {:key})", Args{ + "namespace": l.namespace, + "key": key, + })); err != nil { + return err + } + + return f(tx) + }) +} + +func (l *TransactionalLocker) TryWithLock(ctx context.Context, key int32, f func(tx DB) error) (acquired bool, _ error) { + err := l.db.WithTransaction(ctx, func(tx DB) (err error) { + if acquired, _, err = ScanBool(tx.Query(ctx, Query("SELECT pg_try_advisory_xact_lock({:namespace}, {:key})", Args{ + "namespace": l.namespace, + "key": key, + }))); err != nil { + return err + } else if !acquired { + return nil + } + + return f(tx) + }) + + return acquired, err +} diff --git a/locker_test.go b/locker_test.go new file mode 100644 index 0000000..cf97e1b --- /dev/null +++ b/locker_test.go @@ -0,0 +1,82 @@ +package pgutil + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLocker(t *testing.T) { + var ( + db = NewTestDB(t) + ctx = context.Background() + ) + + locker, err := NewTransactionalLocker(db, StringKey("test")) + require.NoError(t, err) + + t.Run("sequential", func(t *testing.T) { + require.NoError(t, locker.WithLock(ctx, 125, func(tx DB) error { + return nil + })) + + require.NoError(t, locker.WithLock(ctx, 125, func(tx DB) error { + return nil + })) + }) + + t.Run("concurrent", func(t *testing.T) { + runWithHeldLock := func(f func()) { + var ( + signal = make(chan struct{}) // closed when key=125 is acquired + block = make(chan struct{}) // closed when key=125 should be released + errors = make(chan error, 1) // holds acquisition error from goroutine + ) + + go func() { + defer close(errors) + + if err := locker.WithLock(ctx, 125, func(tx DB) error { + close(signal) + <-block + return nil + }); err != nil { + errors <- err + } + }() + + <-signal // Wait for key=125 to be acquired by goroutine above + f() // Run test function with held lock + close(block) // Unblock test routine + + for err := range errors { + require.NoError(t, err) + } + } + + runWithHeldLock(func() { + // Test acquisition of concurrently held lock + acquired, err := locker.TryWithLock(ctx, 125, func(tx DB) error { + return nil + }) + require.NoError(t, err) + assert.False(t, acquired) + + // Test acquisition of concurrently un-held lock + acquired, err = locker.TryWithLock(ctx, 126, func(tx DB) error { + return nil + }) + require.NoError(t, err) + assert.True(t, acquired) + }) + + // Test acquisition of released lock + acquired, err := locker.TryWithLock(ctx, 125, func(tx DB) error { + return nil + }) + require.NoError(t, err) + assert.True(t, acquired) + }) +} diff --git a/migration.go b/migration.go deleted file mode 100644 index 8708e25..0000000 --- a/migration.go +++ /dev/null @@ -1,78 +0,0 @@ -package pgutil - -import ( - "database/sql" - "os" - "strings" - - "github.com/go-nacelle/nacelle" - migrate "github.com/golang-migrate/migrate/v4" - "github.com/golang-migrate/migrate/v4/database/postgres" - "github.com/golang-migrate/migrate/v4/source" -) - -type migrationLogger struct { - Logger nacelle.Logger -} - -func (m *migrationLogger) Printf(format string, v ...interface{}) { - m.Logger.Debug("migrate: "+strings.TrimSpace(format), v...) -} - -func (m *migrationLogger) Verbose() bool { - return false -} - -func runMigrations( - db *sql.DB, - sourceDriver source.Driver, - logger nacelle.Logger, - migrationsTable string, - schemaName string, - failOnNewerMigrationVersion bool, -) error { - if sourceDriver == nil { - return nil - } - - logger.Info("Running migrations") - - databaseDriver, err := postgres.WithInstance(db, &postgres.Config{ - MigrationsTable: migrationsTable, - SchemaName: schemaName, - }) - if err != nil { - return err - } - - m, err := migrate.NewWithInstance("pgutil-source", sourceDriver, "postgres", databaseDriver) - if err != nil { - return err - } - - m.Log = &migrationLogger{logger} - - if err := m.Up(); err != nil && err != migrate.ErrNoChange { - // migrate returns not-exists errors when the database version is newer - // than the target version - this can happen during rolling restarts when - // an old version of a process starts after a newer one has become active - // for the first time. This should be generally harmless when following - // best practices, but we'll give an escape-hatch to kill the older - // processes in such an event. - - if !os.IsNotExist(err) || failOnNewerMigrationVersion { - return err - } - - version, _, err := m.Version() - if err != nil { - return err - } - - logger.Warning("Current database schema is on a future version %s", version) - return nil - } - - logger.Info("Database schema is up to date") - return nil -} diff --git a/migration_reader.go b/migration_reader.go new file mode 100644 index 0000000..57ba48a --- /dev/null +++ b/migration_reader.go @@ -0,0 +1,115 @@ +package pgutil + +import ( + "fmt" + "regexp" + "strings" +) + +type Definition struct { + ID int + Name string + UpQuery Q + DownQuery Q + IndexMetadata *IndexMetadata +} + +type IndexMetadata struct { + TableName string + IndexName string +} + +type MigrationReader interface { + ReadAll() ([]RawDefinition, error) +} + +type MigrationReaderFunc func() ([]RawDefinition, error) + +func (f MigrationReaderFunc) ReadAll() ([]RawDefinition, error) { + return f() +} + +type RawDefinition struct { + ID int + Name string + RawUpQuery string + RawDownQuery string +} + +var ( + keyword = func(pattern string) string { return phrase(pattern) } + phrase = func(patterns ...string) string { return strings.Join(patterns, `\s+`) + `\s+` } + opt = func(pattern string) string { return `(?:` + pattern + `)?` } + + capturedIdentifierPattern = `([a-zA-Z0-9$_]+|"(?:[^"]+)")` + createIndexConcurrentlyPatternHead = strings.Join([]string{ + keyword(`CREATE`), + opt(keyword(`UNIQUE`)), + keyword(`INDEX`), + opt(keyword(`CONCURRENTLY`)), + opt(phrase(`IF`, `NOT`, `EXISTS`)), + capturedIdentifierPattern, // capture index name + `\s+`, + keyword(`ON`), + opt(keyword(`ONLY`)), + capturedIdentifierPattern, // capture table name + }, ``) + + createIndexConcurrentlyPattern = regexp.MustCompile(createIndexConcurrentlyPatternHead) + createIndexConcurrentlyPatternAll = regexp.MustCompile(createIndexConcurrentlyPatternHead + "[^;]+;") +) + +func ReadMigrations(reader MigrationReader) (definitions []Definition, _ error) { + rawDefinitions, err := reader.ReadAll() + if err != nil { + return nil, err + } + + ids := map[int]struct{}{} + for _, rawDefinition := range rawDefinitions { + if _, ok := ids[rawDefinition.ID]; ok { + return nil, fmt.Errorf("duplicate migration identifier %d", rawDefinition.ID) + } + ids[rawDefinition.ID] = struct{}{} + + var indexMetadata *IndexMetadata + prunedUp := removeComments(rawDefinition.RawUpQuery) + prunedDown := removeComments(rawDefinition.RawDownQuery) + + if matches := createIndexConcurrentlyPattern.FindStringSubmatch(prunedUp); len(matches) > 0 { + if strings.TrimSpace(createIndexConcurrentlyPatternAll.ReplaceAllString(prunedUp, "")) != "" { + return nil, fmt.Errorf(`"create index concurrently" is not the only statement in the up migration`) + } + + indexMetadata = &IndexMetadata{ + TableName: matches[2], + IndexName: matches[1], + } + } + + if len(createIndexConcurrentlyPattern.FindAllString(prunedDown, 1)) > 0 { + return nil, fmt.Errorf(`"create index concurrently" is not allowed in down migrations`) + } + + definitions = append(definitions, Definition{ + ID: rawDefinition.ID, + Name: rawDefinition.Name, + UpQuery: RawQuery(rawDefinition.RawUpQuery), + DownQuery: RawQuery(rawDefinition.RawDownQuery), + IndexMetadata: indexMetadata, + }) + } + + return definitions, nil +} + +func removeComments(query string) string { + var filtered []string + for _, line := range strings.Split(query, "\n") { + if line := strings.TrimSpace(strings.Split(line, "--")[0]); line != "" { + filtered = append(filtered, line) + } + } + + return strings.TrimSpace(strings.Join(filtered, "\n")) +} diff --git a/migration_reader_embed.go b/migration_reader_embed.go new file mode 100644 index 0000000..ed81cc1 --- /dev/null +++ b/migration_reader_embed.go @@ -0,0 +1,7 @@ +package pgutil + +import "embed" + +func NewEmbedMigrationReader(fs embed.FS) MigrationReader { + return newFilesystemMigrationReader("", fs) +} diff --git a/migration_reader_filesystem.go b/migration_reader_filesystem.go new file mode 100644 index 0000000..40987f8 --- /dev/null +++ b/migration_reader_filesystem.go @@ -0,0 +1,105 @@ +package pgutil + +import ( + "fmt" + "io" + "io/fs" + "net/http" + "os" + "path" + "sort" + "strconv" + "strings" +) + +type FilesystemMigrationReader struct { + name string + fs fs.FS +} + +func NewFilesystemMigrationReader(dirname string) MigrationReader { + return newFilesystemMigrationReader(dirname, os.DirFS(dirname)) +} + +func newFilesystemMigrationReader(name string, fs fs.FS) MigrationReader { + return &FilesystemMigrationReader{ + name: name, + fs: fs, + } +} + +func (r *FilesystemMigrationReader) ReadAll() (definitions []RawDefinition, _ error) { + root, err := http.FS(r.fs).Open("/") + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("migration directory %q does not exist", r.name) + } + + return nil, err + } + defer root.Close() + + entries, err := root.Readdir(0) + if err != nil { + return nil, err + } + + for _, entry := range entries { + if entry.IsDir() { + if definition, ok, err := r.readDefinition(entry.Name()); err != nil { + return nil, err + } else if ok { + definitions = append(definitions, definition) + } + } + } + + sort.Slice(definitions, func(i, j int) bool { return definitions[i].ID < definitions[j].ID }) + return definitions, nil +} + +func (r *FilesystemMigrationReader) readDefinition(dirname string) (RawDefinition, bool, error) { + upPath := path.Join(dirname, "up.sql") + downPath := path.Join(dirname, "down.sql") + + upFileContents, upErr := readFile(r.fs, upPath) + downFileContents, downErr := readFile(r.fs, downPath) + if os.IsNotExist(upErr) && os.IsNotExist(downErr) { + return RawDefinition{}, false, nil + } else if upErr != nil { + return RawDefinition{}, false, upErr + } else if downErr != nil { + return RawDefinition{}, false, downErr + } + + nameParts := strings.SplitN(dirname, "_", 2) + id, err := strconv.Atoi(nameParts[0]) + if err != nil { + return RawDefinition{}, false, err + } + name := strings.Replace(nameParts[1], "_", " ", -1) + + definition := RawDefinition{ + ID: id, + Name: name, + RawUpQuery: string(upFileContents), + RawDownQuery: string(downFileContents), + } + + return definition, true, nil +} + +func readFile(fs fs.FS, filepath string) ([]byte, error) { + file, err := fs.Open(filepath) + if err != nil { + return nil, err + } + defer file.Close() + + contents, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + return contents, nil +} diff --git a/migration_reader_test.go b/migration_reader_test.go new file mode 100644 index 0000000..d56e6de --- /dev/null +++ b/migration_reader_test.go @@ -0,0 +1,77 @@ +package pgutil + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testUpQuery = `-- Create the comments table +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + post_id INTEGER NOT NULL REFERENCES posts(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + content TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); +` + +var testDownQuery = `-- Drop the comments table +DROP TABLE IF EXISTS comments; +` + +var testConcurrentIndexUpQuery = `-- Create a concurrent index +CREATE INDEX CONCURRENTLY idx_users_email ON users (email);` + +var testConcurrentIndexDownQuery = `-- Drop the concurrent index +DROP INDEX CONCURRENTLY IF EXISTS idx_users_email;` + +func TestReadMigrations(t *testing.T) { + t.Run("valid", func(t *testing.T) { + definitions, err := ReadMigrations(NewFilesystemMigrationReader(path.Join("testdata", "migrations", "valid"))) + require.NoError(t, err) + require.Len(t, definitions, 3) + + assert.Equal(t, Definition{ + ID: 3, + Name: "third", + UpQuery: RawQuery(testUpQuery), + DownQuery: RawQuery(testDownQuery), + }, definitions[2]) + }) + + t.Run("CIC pattern", func(t *testing.T) { + t.Skip() + definitions, err := ReadMigrations(NewFilesystemMigrationReader(path.Join("testdata", "migrations", "cic_pattern"))) + require.NoError(t, err) + require.Len(t, definitions, 4) + + assert.Equal(t, Definition{ + ID: 3, + Name: "third", + UpQuery: RawQuery(testConcurrentIndexUpQuery), + DownQuery: RawQuery(testConcurrentIndexDownQuery), + IndexMetadata: &IndexMetadata{ + TableName: "users", + IndexName: "idx_users_email", + }, + }, definitions[3]) + }) + + t.Run("duplicate identifiers", func(t *testing.T) { + _, err := ReadMigrations(NewFilesystemMigrationReader(path.Join("testdata", "migrations", "duplicate_identifiers"))) + assert.ErrorContains(t, err, "duplicate migration identifier 2") + }) + + t.Run("CIC with additional queries", func(t *testing.T) { + _, err := ReadMigrations(NewFilesystemMigrationReader(path.Join("testdata", "migrations", "cic_with_additional_queries"))) + assert.ErrorContains(t, err, `"create index concurrently" is not the only statement in the up migration`) + }) + + t.Run("CIC in down migration", func(t *testing.T) { + _, err := ReadMigrations(NewFilesystemMigrationReader(path.Join("testdata", "migrations", "cic_in_down_migration"))) + assert.ErrorContains(t, err, `"create index concurrently" is not the only statement in the up migration`) + }) +} diff --git a/migration_runner.go b/migration_runner.go new file mode 100644 index 0000000..5617994 --- /dev/null +++ b/migration_runner.go @@ -0,0 +1,559 @@ +package pgutil + +import ( + "context" + "errors" + "slices" + "time" + + "github.com/go-nacelle/log/v2" + "github.com/go-nacelle/nacelle/v2" + "github.com/jackc/pgconn" +) + +type Runner struct { + db DB + logger nacelle.Logger + definitions []Definition + locker *TransactionalLocker +} + +func NewMigrationRunner(db DB, reader MigrationReader, logger nacelle.Logger) (*Runner, error) { + definitions, err := ReadMigrations(reader) + if err != nil { + return nil, err + } + + locker, err := NewTransactionalLocker(db, StringKey("nacelle/pgutil.migration-runner")) + if err != nil { + return nil, err + } + + return &Runner{ + db: db, + logger: logger, + definitions: definitions, + locker: locker, + }, nil +} + +func (r *Runner) Definitions() []Definition { + return r.definitions +} + +func (r *Runner) ApplyAll(ctx context.Context) error { + return r.apply(ctx, r.definitions) +} + +func (r *Runner) Apply(ctx context.Context, id int) error { + for i, definition := range r.definitions { + if definition.ID == id { + return r.apply(ctx, r.definitions[:i+1]) + } + } + + return errors.New("migration not found") +} + +func (r *Runner) apply(ctx context.Context, definitions []Definition) error { + if err := r.ensureMigrationLogsTable(ctx); err != nil { + return err + } + + for { + upToDate, cicDefinition, err := r.applyDefinitions(ctx, definitions, false) + if err != nil || upToDate { + return err + } + + if cicDefinition != nil { + if err := r.applyConcurrentIndexCreation(ctx, *cicDefinition); err != nil { + return err + } + } + } +} + +func (r *Runner) Undo(ctx context.Context, id int) error { + if err := r.ensureMigrationLogsTable(ctx); err != nil { + return err + } + + for i, definition := range r.definitions { + if definition.ID == id { + definitions := slices.Clone(r.definitions[i:]) + slices.Reverse(definitions) + + // NOTE: CIC are illegal in down migrations; perform this in one shot + _, _, err := r.applyDefinitions(ctx, definitions, true) + return err + } + } + + return errors.New("migration not found") +} + +func (r *Runner) ensureMigrationLogsTable(ctx context.Context) error { + for _, query := range []string{ + "CREATE TABLE IF NOT EXISTS migration_logs(id SERIAL PRIMARY KEY)", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS migration_id integer NOT NULL", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS reverse bool NOT NULL", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS started_at timestamptz NOT NULL DEFAULT current_timestamp", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS last_heartbeat_at timestamptz", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS finished_at timestamptz", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS success boolean", + "ALTER TABLE migration_logs ADD COLUMN IF NOT EXISTS error_message text", + } { + if err := r.db.Exec(ctx, RawQuery(query)); err != nil { + return err + } + } + + return nil +} + +func (r *Runner) applyDefinitions(ctx context.Context, definitions []Definition, reverse bool) (upToDate bool, cicDefinition *Definition, _ error) { + err := r.locker.WithLock(ctx, StringKey("ddl"), func(_ DB) (err error) { + migrationLogs, err := r.MigrationLogs(ctx) + if err != nil { + return err + } + + applied := map[int]struct{}{} + for _, log := range migrationLogs { + if log.Success != nil && *log.Success && !log.Reverse { + applied[log.MigrationID] = struct{}{} + } + } + + var migrationsToApply []Definition + for _, definition := range definitions { + if _, ok := applied[definition.ID]; ok == reverse { + migrationsToApply = append(migrationsToApply, definition) + } + } + + if len(migrationsToApply) == 0 { + r.logger.Info("Migrations are in expected state") + upToDate = true + return nil + } + + for _, definition := range migrationsToApply { + if definition.IndexMetadata != nil && !reverse { + // We can't perform CIC while holding a lock or else we'll deadlock. + // Capture this definition to be applied outside of the lock we're holding. + // We can skip this check for reverse application as CIC are illegal in down migrations. + cicDefinition = &definition + return nil + } + + if err := r.withMigrationLog(ctx, definition, reverse, func(_ int) error { + query, direction := definition.UpQuery, "up" + if reverse { + query, direction = definition.DownQuery, "down" + } + + logger := r.logger.WithFields(log.LogFields{ + "id": definition.ID, + "name": definition.Name, + "direction": direction, + }) + logger.Info("Applying migration") + + if err := r.db.WithTransaction(ctx, func(tx DB) error { return tx.Exec(ctx, query) }); err != nil { + logger.ErrorWithFields(log.LogFields{"error": err}, "Failed to apply migration") + return err + } + + return nil + }); err != nil { + return err + } + } + + return nil + }) + + return upToDate, cicDefinition, err +} + +func (r *Runner) applyConcurrentIndexCreation(ctx context.Context, definition Definition) error { + tableName := definition.IndexMetadata.TableName + indexName := definition.IndexMetadata.IndexName + + logger := r.logger.WithFields(log.LogFields{ + "id": definition.ID, + "name": definition.Name, + "direction": "up", + "tableName": tableName, + "indexName": indexName, + }) + logger.Info("Handling concurrent index creation") + +indexPollLoop: + for i := 0; ; i++ { + if i != 0 { + if err := wait(ctx, time.Second*5); err != nil { + return err + } + } + + indexStatus, exists, err := r.getIndexStatus(ctx, tableName, indexName) + if err != nil { + return err + } + + if exists { + logger.InfoWithFields(log.LogFields{ + "phase": deref(indexStatus.Phase), + "lockersTotal": deref(indexStatus.LockersTotal), + "lockersDone": deref(indexStatus.LockersDone), + "blocksTotal": deref(indexStatus.BlocksTotal), + "blocksDone": deref(indexStatus.BlocksDone), + "tuplesTotal": deref(indexStatus.TuplesTotal), + "tuplesDone": deref(indexStatus.TuplesDone), + }, "Index exists") + + if indexStatus.IsValid { + logger.Info("Index is valid") + + if recheck, err := r.handleValidIndex(ctx, definition); err != nil { + return err + } else if recheck { + continue indexPollLoop + } else { + return nil + } + } + + if indexStatus.Phase != nil { + continue indexPollLoop + } + + logger.Info("Dropping invalid index") + + // NOTE: Must interpolate identifier here as placeholders aren't valid in this position. + if err := r.db.Exec(ctx, queryf(`DROP INDEX IF EXISTS %s`, indexName)); err != nil { + return err + } + } + + logger.Info("Creating index") + + if raceDetected, err := r.createIndexConcurrently(ctx, definition); err != nil { + return err + } else if raceDetected { + continue indexPollLoop + } + + return nil + } +} + +func (r *Runner) handleValidIndex(ctx context.Context, definition Definition) (recheck bool, _ error) { + err := r.locker.WithLock(ctx, StringKey("log"), func(tx DB) error { + log, ok, err := r.getLogForConcurrentIndex(ctx, tx, definition.ID) + if err != nil { + return err + } + if !ok { + if err := tx.Exec(ctx, Query(` + INSERT INTO migration_logs (migration_id, reverse, finished_at, success) + VALUES ({:id}, false, current_timestamp, true) + `, + Args{"id": definition.ID}, + )); err != nil { + return err + } + + return nil + } + + if log.Success != nil { + if *log.Success { + return nil + } + + return errors.New(*log.ErrorMessage) + } + + if time.Since(log.LastHeartbeatAt) >= time.Second*15 { + recheck = true + return nil + } + + if err := tx.Exec(ctx, Query(` + UPDATE migration_logs + SET success = true, finished_at = current_timestamp + WHERE id = {:id} + `, + Args{"id": log.ID}, + )); err != nil { + return err + } + + return nil + }) + + return recheck, err +} + +func (r *Runner) createIndexConcurrently(ctx context.Context, definition Definition) (raceDetected bool, _ error) { + err := r.withMigrationLog(ctx, definition, false, func(id int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + for { + if err := r.db.Exec(ctx, Query(` + UPDATE migration_logs + SET last_heartbeat_at = current_timestamp + WHERE id = {:id} + `, + Args{"id": id}, + )); err != nil && ctx.Err() != context.Canceled { + r.logger.ErrorWithFields(log.LogFields{ + "error": err, + }, "Failed to update heartbeat") + } + + if err := wait(ctx, time.Second*5); err != nil { + break + } + } + }() + + if err := r.db.Exec(ctx, definition.UpQuery); err != nil { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) || pgErr.Code == "42P07" { + return err + } + + if err := r.db.Exec(ctx, Query( + `DELETE FROM migration_logs WHERE id = {:id}`, + Args{"id": id}, + )); err != nil { + return err + } + + raceDetected = true + } + + return nil + }) + + return raceDetected, err +} + +// +// + +type MigrationLog struct { + MigrationID int + Reverse bool + Success *bool + ErrorMessage *string +} + +var scanMigrationLogs = NewSliceScanner(func(s Scanner) (ms MigrationLog, _ error) { + err := s.Scan(&ms.MigrationID, &ms.Reverse, &ms.Success, &ms.ErrorMessage) + return ms, err +}) + +func (r *Runner) MigrationLogs(ctx context.Context) (map[int]MigrationLog, error) { + if err := r.ensureMigrationLogsTable(ctx); err != nil { + return nil, err + } + + migrationLogs, err := scanMigrationLogs(r.db.Query(ctx, RawQuery(` + WITH ranked_migration_logs AS ( + SELECT + l.*, + ROW_NUMBER() OVER (PARTITION BY migration_id ORDER BY started_at DESC) AS rank + FROM migration_logs l + ) + SELECT + migration_id, + reverse, + success, + error_message + FROM ranked_migration_logs + WHERE rank = 1 + ORDER BY migration_id + `))) + if err != nil { + return nil, err + } + + logMap := map[int]MigrationLog{} + for _, state := range migrationLogs { + logMap[state.MigrationID] = state + } + + return logMap, nil +} + +func (r *Runner) WriteMigrationLog(ctx context.Context, id int) error { + if err := r.ensureMigrationLogsTable(ctx); err != nil { + return err + } + + var definition *Definition + for _, def := range r.definitions { + if def.ID == id { + definition = &def + break + } + } + + if definition == nil { + return errors.New("migration not found") + } + + return r.withMigrationLog(ctx, *definition, false, func(_ int) error { + r.logger.InfoWithFields(log.LogFields{"id": id}, "Forcing writing migration log") + return nil + }) +} + +func (r *Runner) withMigrationLog(ctx context.Context, definition Definition, reverse bool, f func(id int) error) (err error) { + id, _, err := ScanInt(r.db.Query(ctx, Query(` + INSERT INTO migration_logs (migration_id, reverse) + VALUES ({:id}, {:reverse}) + RETURNING id + `, Args{ + "id": definition.ID, + "reverse": reverse, + }))) + if err != nil { + return err + } + + defer func() { + err = errors.Join(err, r.db.Exec(ctx, Query(` + UPDATE migration_logs + SET + finished_at = current_timestamp, + success = {:success}, + error_message = {:error_message} + WHERE id = {:id} + `, Args{ + "success": err == nil, + "error_message": extractErrorMessage(err), + "id": id, + }))) + }() + + return f(id) +} + +// +// + +type indexStatus struct { + IsValid bool + Phase *string + LockersTotal *int + LockersDone *int + BlocksTotal *int + BlocksDone *int + TuplesTotal *int + TuplesDone *int +} + +var scanIndexStatus = NewFirstScanner(func(s Scanner) (is indexStatus, _ error) { + err := s.Scan( + &is.IsValid, + &is.Phase, + &is.LockersTotal, + &is.LockersDone, + &is.BlocksTotal, + &is.BlocksDone, + &is.TuplesTotal, + &is.TuplesDone, + ) + return is, err +}) + +func (r *Runner) getIndexStatus(ctx context.Context, tableName, indexName string) (indexStatus, bool, error) { + return scanIndexStatus(r.db.Query(ctx, Query(` + SELECT + index.indisvalid, + progress.phase, + progress.lockers_total, + progress.lockers_done, + progress.blocks_total, + progress.blocks_done, + progress.tuples_total, + progress.tuples_done + FROM pg_catalog.pg_class table_class + JOIN pg_catalog.pg_index index ON index.indrelid = table_class.oid + JOIN pg_catalog.pg_class index_class ON index_class.oid = index.indexrelid + LEFT JOIN pg_catalog.pg_stat_progress_create_index progress ON progress.relid = table_class.oid AND progress.index_relid = index_class.oid + WHERE + table_class.relname = {:tableName} AND + index_class.relname = {:indexName} + `, Args{ + "tableName": tableName, + "indexName": indexName, + }))) +} + +// +// + +type concurrentIndexLog struct { + ID int + Success *bool + ErrorMessage *string + LastHeartbeatAt time.Time +} + +var scanConcurrentIndexLog = NewFirstScanner(func(s Scanner) (l concurrentIndexLog, _ error) { + err := s.Scan(&l.ID, &l.Success, &l.ErrorMessage, &l.LastHeartbeatAt) + return l, err +}) + +func (r *Runner) getLogForConcurrentIndex(ctx context.Context, db DB, id int) (concurrentIndexLog, bool, error) { + return scanConcurrentIndexLog(db.Query(ctx, Query(` + WITH ranked_migration_logs AS ( + SELECT + l.*, + ROW_NUMBER() OVER (ORDER BY started_at DESC) AS rank + FROM migration_logs l + WHERE migration_id = {:id} + ) + SELECT + id, + success, + error_message, + COALESCE(last_heartbeat_at, started_at) + FROM ranked_migration_logs + WHERE rank = 1 AND NOT reverse + `, + Args{"id": id}, + ))) +} + +// +// + +func wait(ctx context.Context, duration time.Duration) error { + select { + case <-time.After(duration): + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} + +func extractErrorMessage(err error) *string { + if err == nil { + return nil + } + + msg := err.Error() + return &msg +} diff --git a/migration_runner_test.go b/migration_runner_test.go new file mode 100644 index 0000000..e8cd81e --- /dev/null +++ b/migration_runner_test.go @@ -0,0 +1,314 @@ +package pgutil + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/go-nacelle/log/v2" + "github.com/stretchr/testify/require" +) + +func TestApply(t *testing.T) { + definitions := []RawDefinition{ + {ID: 1, RawUpQuery: "CREATE TABLE users (id SERIAL PRIMARY KEY, email TEXT);"}, + {ID: 2, RawUpQuery: "INSERT INTO users (email) VALUES ('test@gmail.com');"}, + {ID: 3, RawUpQuery: "ALTER TABLE users ADD COLUMN name TEXT;"}, + {ID: 4, RawUpQuery: "UPDATE users SET name = 'test';"}, + {ID: 5, RawUpQuery: "CREATE UNIQUE INDEX users_email_idx ON users (email);"}, + } + definitionsWithoutUpdates := []RawDefinition{definitions[0], definitions[1], definitions[2], definitions[4]} + reader := MigrationReaderFunc(func() ([]RawDefinition, error) { return definitions, nil }) + readerWithoutUpdates := MigrationReaderFunc(func() ([]RawDefinition, error) { return definitionsWithoutUpdates, nil }) + + t.Run("all", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply all migrations from scratch + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.ApplyAll(ctx)) + + // Assert last migration (unique index) was applied + err = db.Exec(ctx, Query( + "INSERT INTO users (name, email) VALUES ({:name}, {:email})", + Args{"name": "duplicate", "email": "test@gmail.com"}, + )) + require.ErrorContains(t, err, "duplicate key value violates unique constraint") + }) + + t.Run("tail", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Head first + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.Apply(ctx, 2)) + + // Assert no name column yet + _, _, err = ScanString(db.Query(ctx, RawQuery("SELECT name FROM users WHERE email = 'test@gmail.com'"))) + require.ErrorContains(t, err, "column \"name\" does not exist") + + // Apply the tail + require.NoError(t, runner.Apply(ctx, 5)) + + // Assert name column added and populated + email, _, err := ScanString(db.Query(ctx, RawQuery("SELECT name FROM users WHERE email = 'test@gmail.com'"))) + require.NoError(t, err) + require.Equal(t, "test", email) + }) + + t.Run("gaps", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply all migrations except #4 + runnerWithHoles, err := NewMigrationRunner(db, readerWithoutUpdates, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runnerWithHoles.ApplyAll(ctx)) + + // Assert name column exists but is not yet populated + namePtr, _, err := ScanNilString(db.Query(ctx, RawQuery("SELECT name FROM users WHERE email = 'test@gmail.com'"))) + require.NoError(t, err) + require.Nil(t, namePtr) + + // Apply all missing migrations + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.ApplyAll(ctx)) + + // Assert name colum now populated + name, _, err := ScanString(db.Query(ctx, RawQuery("SELECT name FROM users WHERE email = 'test@gmail.com'"))) + require.NoError(t, err) + require.Equal(t, "test", name) + }) +} + +func TestApplyCreateConcurrentIndex(t *testing.T) { + definitions := []RawDefinition{ + {ID: 1, RawUpQuery: "CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL, email TEXT NOT NULL);"}, + {ID: 2, RawUpQuery: "INSERT INTO users (name, email) VALUES ('test1', 'test1@gmail.com');"}, + {ID: 3, RawUpQuery: "CREATE UNIQUE INDEX CONCURRENTLY users_email_idx ON users (email);"}, + {ID: 4, RawUpQuery: "INSERT INTO users (name, email) VALUES ('test2', 'test2@gmail.com');"}, + } + reader := MigrationReaderFunc(func() ([]RawDefinition, error) { return definitions, nil }) + + t.Run("CIC", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply all migrations from scratch + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.ApplyAll(ctx)) + + // Assert last migration (unique index) was applied + err = db.Exec(ctx, Query( + "INSERT INTO users (name, email) VALUES ({:name}, {:email})", + Args{"name": "duplicate", "email": "test2@gmail.com"}, + )) + require.ErrorContains(t, err, "duplicate key value violates unique constraint") + }) + + t.Run("CIC (already created)", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply just the first migration + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.Apply(ctx, 2)) + + // Create the index outside of the migration infrastructure + require.NoError(t, db.Exec(ctx, RawQuery(definitions[2].RawUpQuery))) + + // Apply remaining migrations + require.NoError(t, runner.ApplyAll(ctx)) + + // Assert last migration (unique index) was applied + err = db.Exec(ctx, Query( + "INSERT INTO users (name, email) VALUES ({:name}, {:email})", + Args{"name": "duplicate", "email": "test2@gmail.com"}, + )) + require.ErrorContains(t, err, "duplicate key value violates unique constraint") + }) + + t.Run("CIC (invalid)", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply just the first migration + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.Apply(ctx, 2)) + + // Create the index outside of the migration infrastructure and force it to be invalid + require.NoError(t, db.Exec(ctx, RawQuery(definitions[2].RawUpQuery))) + require.NoError(t, db.Exec(ctx, RawQuery(` + UPDATE pg_index + SET indisvalid = false + WHERE indexrelid = ( + SELECT oid + FROM pg_class + WHERE relname = 'users_email_idx' + ); + `))) + + // Apply remaining migrations + require.NoError(t, runner.ApplyAll(ctx)) + + // Assert last migration (unique index) as applied + err = db.Exec(ctx, Query( + "INSERT INTO users (name, email) VALUES ({:name}, {:email})", + Args{"name": "duplicate", "email": "test2@gmail.com"}, + )) + require.ErrorContains(t, err, "duplicate key value violates unique constraint") + }) + + t.Run("CIC (in progress)", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply the first two migrations + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.Apply(ctx, 2)) + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + async := func(f func() error) { + wg.Add(1) + + go func() { + defer wg.Done() + + if err := f(); err != nil { + errCh <- err + } + }() + } + + // Start a transaction and insert a row in the users table but don't + // commit so that we hold open a lock for the following async tasks. + tx, err := db.Transact(ctx) + require.NoError(t, err) + require.NoError(t, tx.Exec(ctx, RawQuery("INSERT INTO users (name, email) VALUES ('blocking', 'blocking@example.com')"))) + + // Begin creating the index concurrently outside the migration runner + // This will block until the transaction above commits or rolls back + async(func() error { return db.Exec(ctx, RawQuery(definitions[2].RawUpQuery)) }) + + // Jitter time to ensure index creation has started (and is blocked) + <-time.After(1 * time.Second) + + // Apply the index creation and remaining migrations + // This will block until the other index creation completes + async(func() error { return runner.ApplyAll(ctx) }) + + // Jitter time to ensure the ApplyAll has started (and is blocked) + <-time.After(2 * time.Second) + + // Commit the transaction and allow the index creation to complete + require.NoError(t, tx.Done(nil)) + + // Sync on async tasks and check for errors + wg.Wait() + close(errCh) + for err := range errCh { + require.NoError(t, err) + } + + // Assert that the migration runner has unblocked and the index exists + err = db.Exec(ctx, Query( + "INSERT INTO users (name, email) VALUES ({:name}, {:email})", + Args{"name": "duplicate", "email": "test2@gmail.com"}, + )) + require.ErrorContains(t, err, "duplicate key value violates unique constraint") + }) +} + +func TestUndo(t *testing.T) { + definitions := []RawDefinition{ + { + ID: 1, + RawUpQuery: "CREATE TABLE users (id SERIAL PRIMARY KEY, email TEXT);", + RawDownQuery: "DROP TABLE users;", + }, + { + ID: 2, + RawUpQuery: "CREATE TABLE comments (id SERIAL PRIMARY KEY, content TEXT NOT NULL, user_id INTEGER NOT NULL);", + RawDownQuery: "DROP TABLE comments;", + }, + { + ID: 3, + RawUpQuery: "ALTER TABLE comments ADD COLUMN updated_at TIMESTAMP WITH TIME ZONE;", + RawDownQuery: "ALTER TABLE comments DROP COLUMN updated_at;", + }, + { + ID: 4, + RawUpQuery: "ALTER TABLE comments ADD COLUMN created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW();", + RawDownQuery: "ALTER TABLE comments DROP COLUMN created_at;", + }, + + {ID: 5, RawUpQuery: "INSERT INTO users (email) VALUES ('test@gmail.com');"}, + {ID: 6, RawUpQuery: "INSERT INTO comments (content, user_id) VALUES ('test', 1);"}, + {ID: 7, RawUpQuery: "UPDATE comments SET updated_at = NOW();"}, + } + definitionsWithoutCreatedAt := []RawDefinition{definitions[0], definitions[1], definitions[2], definitions[4], definitions[5], definitions[6]} + reader := MigrationReaderFunc(func() ([]RawDefinition, error) { return definitions, nil }) + readerWithoutCreatedAt := MigrationReaderFunc(func() ([]RawDefinition, error) { return definitionsWithoutCreatedAt, nil }) + + t.Run("tail", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply all migrations + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.ApplyAll(ctx)) + + // Assert columns exist and are populated + updatedAt, _, err := ScanNilTimestamp(db.Query(ctx, RawQuery("SELECT created_at FROM comments WHERE user_id = 1"))) + require.NoError(t, err) + require.NotNil(t, updatedAt) + + // Undo migrations that added created_at/updated_at columns + require.NoError(t, runner.Undo(ctx, 3)) + + // Assert columns dropped + _, _, err = ScanString(db.Query(ctx, RawQuery("SELECT updated_at FROM comments WHERE user_id = 1"))) + require.ErrorContains(t, err, "column \"updated_at\" does not exist") + }) + + t.Run("gaps", func(t *testing.T) { + db := NewTestDB(t) + ctx := context.Background() + + // Apply all migrations + runner, err := NewMigrationRunner(db, reader, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runner.ApplyAll(ctx)) + + // Undo migrations but skip #4 + runnerWithHoles, err := NewMigrationRunner(db, readerWithoutCreatedAt, log.NewNilLogger()) + require.NoError(t, err) + require.NoError(t, runnerWithHoles.Undo(ctx, 3)) + + // Assert created_at exists but updated_at does not + _, _, err = ScanNilTimestamp(db.Query(ctx, RawQuery("SELECT created_at FROM comments WHERE user_id = 1"))) + require.NoError(t, err) + _, _, err = ScanString(db.Query(ctx, RawQuery("SELECT updated_at FROM comments WHERE user_id = 1"))) + require.ErrorContains(t, err, "column \"updated_at\" does not exist") + + // Undo migrations including #4 + require.NoError(t, runner.Undo(ctx, 3)) + + // Assert both columns dropped + _, _, err = ScanString(db.Query(ctx, RawQuery("SELECT created_at FROM comments WHERE user_id = 1"))) + require.ErrorContains(t, err, "column \"created_at\" does not exist") + }) +} diff --git a/options.go b/options.go deleted file mode 100644 index 2c0b348..0000000 --- a/options.go +++ /dev/null @@ -1,26 +0,0 @@ -package pgutil - -import "github.com/golang-migrate/migrate/v4/source" - -type ( - options struct { - sourceDriver source.Driver - } - - // ConfigFunc is a function used to configure an initializer. - ConfigFunc func(*options) -) - -// WithMigrationSourceDriver sets the migration source driver. -func WithMigrationSourceDriver(sourceDriver source.Driver) ConfigFunc { - return func(o *options) { o.sourceDriver = sourceDriver } -} - -func getOptions(configs []ConfigFunc) *options { - options := &options{} - for _, f := range configs { - f(options) - } - - return options -} diff --git a/paging.go b/paging.go deleted file mode 100644 index 47347d7..0000000 --- a/paging.go +++ /dev/null @@ -1,59 +0,0 @@ -package pgutil - -import ( - "fmt" - "math" - - "github.com/jmoiron/sqlx" -) - -type ( - PageMeta struct { - Page int - PageSize int - } - - PagedResultMeta struct { - NumPages int `json:"num_pages"` - NumResults int `json:"num_results"` - } -) - -func (m *PageMeta) Limit() int { - return m.PageSize -} - -func (m *PageMeta) Offset() int { - return m.PageSize * (m.Page - 1) -} - -func PagedSelect( - db *LoggingDB, - meta *PageMeta, - baseQuery string, - target interface{}, - args ...interface{}, -) (*PagedResultMeta, error) { - var ( - total int - countQuery = fmt.Sprintf("select count(*) from (%s) q", baseQuery) - ) - - if err := sqlx.Get(db, &total, countQuery, args...); err != nil { - return nil, HandleError(err, "select error") - } - - var ( - limitQuery = fmt.Sprintf("%s limit $%d offset $%d", baseQuery, len(args)+1, len(args)+2) - limitArgs = append(args, meta.Limit(), meta.Offset()) - ) - - if err := sqlx.Select(db, target, limitQuery, limitArgs...); err != nil { - return nil, HandleError(err, "select error") - } - - return &PagedResultMeta{ - NumResults: total, - NumPages: int(math.Ceil(float64(total) / float64(meta.Limit()))), - }, nil -} diff --git a/query.go b/query.go new file mode 100644 index 0000000..d136612 --- /dev/null +++ b/query.go @@ -0,0 +1,144 @@ +package pgutil + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type Q struct { + internalFormat string + replacerPairs []string + parameterizedArgs []any +} + +type Args map[string]any + +func Query(format string, args Args) Q { + var ( + internalFormat string + replacerPairs []string + parameterizedArgs []any + previousIndex = 0 + placeholdersToIndex = map[string]int{} + ) + + for _, part := range tokenize(format) { + name, ok := extractPlaceholderName(part) + if !ok { + // literal, not a placeholder + internalFormat += part + continue + } + + value, ok := args[name] + if !ok { + panic(fmt.Sprintf("no arg supplied for %q", name)) + } + + if q, ok := value.(Q); ok { + // Serialize all internal placeholders transforming `{$X}` -> `{${X+lastIndex}}` + subInternalFormat, subReplacerPairs, subParameterizedArgs := q.bumpPlaceholderIndices(previousIndex) + + // Embed this query into the internal format + internalFormat += subInternalFormat + replacerPairs = append(replacerPairs, subReplacerPairs...) + parameterizedArgs = append(parameterizedArgs, subParameterizedArgs...) + + // Bump indexes by number of placeholders in serialized query + previousIndex += len(subReplacerPairs) / 2 + } else { + // Re-use parameter if possible; otherwise create a new placeholder + index, ok := placeholdersToIndex[name] + if !ok { + previousIndex++ + index = previousIndex + placeholdersToIndex[name] = index + parameterizedArgs = append(parameterizedArgs, value) + } + + placeholder := fmt.Sprintf("$%d", index) + internalPlaceholder := fmt.Sprintf("{%s}", placeholder) + + // Embed placeholder into the internal format + internalFormat += internalPlaceholder + replacerPairs = append(replacerPairs, internalPlaceholder, placeholder) + } + } + + return Q{ + internalFormat: internalFormat, + replacerPairs: replacerPairs, + parameterizedArgs: parameterizedArgs, + } +} + +func Quote(format string) Q { + return RawQuery(format) +} + +func RawQuery(format string, args ...any) Q { + return Q{internalFormat: format, parameterizedArgs: args} +} + +func queryf(format string, args ...any) Q { + return RawQuery(fmt.Sprintf(format, args...)) +} + +func (q Q) Format() (string, []any) { + return replaceWithPairs(q.internalFormat, q.replacerPairs...), q.parameterizedArgs +} + +func (q Q) bumpPlaceholderIndices(offset int) (string, []string, []any) { + var ( + rewriterPairs = make([]string, 0, len(q.replacerPairs)) + replacerPairs = make([]string, 0, len(q.replacerPairs)) + ) + + for i := 0; i < len(q.replacerPairs); i += 2 { + oldPlaceholder := q.replacerPairs[i] + oldPlaceholderValue, _ := strconv.Atoi(q.replacerPairs[i+1][1:]) + + newPlaceholder := fmt.Sprintf("$%d", oldPlaceholderValue+offset) + newInternalPlaceholder := fmt.Sprintf("{%s}", newPlaceholder) + + rewriterPairs = append(rewriterPairs, oldPlaceholder, newInternalPlaceholder) + replacerPairs = append(replacerPairs, newInternalPlaceholder, newPlaceholder) + } + + return replaceWithPairs(q.internalFormat, rewriterPairs...), replacerPairs, q.parameterizedArgs +} + +var placeholderPattern = regexp.MustCompile(`{:(\w+)}`) + +func tokenize(format string) []string { + var ( + matches = placeholderPattern.FindAllStringIndex(format, -1) + parts = make([]string, 0, len(matches)*2+1) + offset = 0 + ) + + for _, match := range matches { + parts = append(parts, format[offset:match[0]]) // capture from last match up to this placeholder + parts = append(parts, format[match[0]:match[1]]) // capture `{:placeholder}` + offset = match[1] + } + + // capture from last match to end of string + // NOTE: if there were no matches offset will be zero + return append(parts, format[offset:]) +} + +func extractPlaceholderName(part string) (string, bool) { + matches := placeholderPattern.FindStringSubmatch(part) + if len(matches) > 0 { + return matches[1], true + } + + return "", false +} + +func replaceWithPairs(format string, replacerPairs ...string) string { + return strings.NewReplacer(replacerPairs...).Replace(format) +} diff --git a/query_test.go b/query_test.go new file mode 100644 index 0000000..880bec5 --- /dev/null +++ b/query_test.go @@ -0,0 +1,142 @@ +package pgutil + +import ( + "testing" + + "github.com/lib/pq" + "github.com/stretchr/testify/assert" +) + +func TestQuery(t *testing.T) { + testQuery := func(t *testing.T, q Q, expectedQuery string, expectedArgs ...any) { + t.Helper() + + query, args := q.Format() + assert.Equal(t, expectedQuery, query) + assert.Equal(t, expectedArgs, args) + } + + t.Run("literal", func(t *testing.T) { + q := Query("SELECT random()", Args{ + // empty + }) + + testQuery(t, q, "SELECT random()") + }) + + t.Run("simple", func(t *testing.T) { + q := Query("SELECT * FROM users WHERE id = {:id}", Args{ + "id": 42, + }) + + testQuery(t, q, "SELECT * FROM users WHERE id = $1", 42) + }) + + t.Run("quoted", func(t *testing.T) { + q := Query("SELECT {:col} FROM users", Args{ + "col": Quote("username"), + }) + + testQuery(t, q, "SELECT username FROM users") + }) + + t.Run("variable reuse", func(t *testing.T) { + q := Query("SELECT * FROM users WHERE (id = {:id} AND NOT blocked) OR (id != {:id} AND admin)", Args{ + "id": 42, + }) + + testQuery(t, q, "SELECT * FROM users WHERE (id = $1 AND NOT blocked) OR (id != $1 AND admin)", 42) + }) + + t.Run("fragments", func(t *testing.T) { + cond := Query("WHERE name = {:name} AND age = {:age}", Args{ + "name": "efritz", + "age": 34, + }) + + limit := Query("LIMIT {:limit} OFFSET {:offset}", Args{ + "limit": 10, + "offset": 20, + }) + + q := Query("SELECT name FROM users {:cond} {:limit}", Args{ + "cond": cond, + "limit": limit, + }) + + testQuery(t, q, + "SELECT name FROM users WHERE name = $1 AND age = $2 LIMIT $3 OFFSET $4", + "efritz", 34, 10, 20, + ) + }) + + t.Run("nested subqueries", func(t *testing.T) { + preferredKeys := pq.Array([]string{"foo", "bar", "baz"}) + selectSubquery := Query("SELECT * FROM pairs WHERE s.key IN {:prefer}", Args{ + "prefer": preferredKeys, + }) + + avoidedKeys := pq.Array([]string{"bonk", "quux", "honk"}) + condSubquery := Query("SELECT s.value FROM pairs WHERE s.key IN {:avoid}", Args{ + "avoid": avoidedKeys, + }) + + q := Query("SELECT {:lit}, s.key, s.value FROM ({:selectSubquery}) s WHERE s.key != {:avoid} s.value NOT IN ({:condSubquery})", Args{ + "lit": "test", + "selectSubquery": selectSubquery, + "avoid": "__invalid", + "condSubquery": condSubquery, + }) + + testQuery(t, q, + "SELECT $1, s.key, s.value FROM (SELECT * FROM pairs WHERE s.key IN $2) s WHERE s.key != $3 s.value NOT IN (SELECT s.value FROM pairs WHERE s.key IN $4)", + "test", preferredKeys, "__invalid", avoidedKeys, + ) + }) + + t.Run("nested nested subqueries", func(t *testing.T) { + q1 := Query("SELECT {:value}", Args{"value": "foo"}) + q2 := Query("SELECT z FROM inner WHERE x = {:value} AND y = ({:q})", Args{"value": "bar", "q": q1}) + q3 := Query("SELECT w FROM outer WHERE x = {:value} AND y = ({:q})", Args{"value": "baz", "q": q2}) + + testQuery(t, q3, + "SELECT w FROM outer WHERE x = $1 AND y = (SELECT z FROM inner WHERE x = $2 AND y = (SELECT $3))", + "baz", "bar", "foo", + ) + }) + + t.Run("literal percent operator", func(t *testing.T) { + q := Query("SELECT * FROM index WHERE a <<% {:term} AND document_id = {:documentID}", Args{ + "term": "how to delete someone else's tweet", + "documentID": 42, + }) + + testQuery(t, q, "SELECT * FROM index WHERE a <<% $1 AND document_id = $2", "how to delete someone else's tweet", 42) + }) + + t.Run("literal arrays", func(t *testing.T) { + t.Run("empty", func(t *testing.T) { + q := Query("SELECT * FROM products WHERE tag IN '{}'", Args{ + // empty + }) + + testQuery(t, q, "SELECT * FROM products WHERE tag IN '{}'") + }) + + t.Run("singleton", func(t *testing.T) { + q := Query("SELECT * FROM products WHERE tag NOT IN '{uselessjunk}'", Args{ + // empty + }) + + testQuery(t, q, "SELECT * FROM products WHERE tag NOT IN '{uselessjunk}'") + }) + + t.Run("multi value", func(t *testing.T) { + q := Query("SELECT * FROM products WHERE tag IN '{sale,electronics}'", Args{ + // empty + }) + + testQuery(t, q, "SELECT * FROM products WHERE tag IN '{sale,electronics}'") + }) + }) +} diff --git a/rows.go b/rows.go new file mode 100644 index 0000000..7425db6 --- /dev/null +++ b/rows.go @@ -0,0 +1,13 @@ +package pgutil + +type Scanner interface { + Scan(dst ...any) error +} + +type Rows interface { + Scanner + + Next() bool + Close() error + Err() error +} diff --git a/rows_scanner.go b/rows_scanner.go new file mode 100644 index 0000000..bde4ad6 --- /dev/null +++ b/rows_scanner.go @@ -0,0 +1,37 @@ +package pgutil + +import "errors" + +type ScanFunc func(Scanner) error +type MaybeScanFunc func(Scanner) (bool, error) + +func newMaybeScanFunc(f ScanFunc) MaybeScanFunc { + return func(s Scanner) (bool, error) { + return true, f(s) + } +} + +type RowScannerFunc func(rows Rows, queryErr error) error + +func NewRowScanner(f ScanFunc) RowScannerFunc { + return NewMaybeRowScanner(newMaybeScanFunc(f)) +} + +func NewMaybeRowScanner(f MaybeScanFunc) RowScannerFunc { + return func(rows Rows, queryErr error) (err error) { + if queryErr != nil { + return queryErr + } + defer func() { err = errors.Join(err, rows.Close(), rows.Err()) }() + + for rows.Next() { + if ok, err := f(rows); err != nil { + return err + } else if !ok { + break + } + } + + return nil + } +} diff --git a/rows_slice_scanner.go b/rows_slice_scanner.go new file mode 100644 index 0000000..7dd3160 --- /dev/null +++ b/rows_slice_scanner.go @@ -0,0 +1,111 @@ +package pgutil + +import "time" + +type SliceScannerFunc[T any] func(rows Rows, queryErr error) ([]T, error) +type FirstScannerFunc[T any] func(rows Rows, queryErr error) (T, bool, error) + +func NewSliceScanner[T any](f ScanValueFunc[T]) SliceScannerFunc[T] { + return NewMaybeSliceScanner(newMaybeScanValueFunc(f)) +} + +func NewMaybeSliceScanner[T any](f MaybeScanValueFunc[T]) SliceScannerFunc[T] { + return func(rows Rows, queryErr error) ([]T, error) { + values := make([]T, 0) + scan := func(s Scanner) (bool, error) { + value, ok, err := f(s) + if err != nil { + return false, err + } + if ok { + values = append(values, value) + } + + return ok, nil + } + + err := NewMaybeRowScanner(scan)(rows, queryErr) + return values, err + } +} + +func NewFirstScanner[T any](f ScanValueFunc[T]) FirstScannerFunc[T] { + return NewMaybeFirstScanner[T](newMaybeScanValueFunc(f)) +} + +func NewMaybeFirstScanner[T any](f MaybeScanValueFunc[T]) FirstScannerFunc[T] { + return func(rows Rows, queryErr error) (value T, called bool, _ error) { + scan := func(s Scanner) (_ bool, err error) { + value, called, err = f(s) + return false, err + } + + err := NewMaybeRowScanner(scan)(rows, queryErr) + return value, called, err + } +} + +var ( + ScanAny = NewFirstScanner(NewAnyValueScanner[any]()) + ScanAnys = NewSliceScanner(NewAnyValueScanner[any]()) + ScanBool = NewFirstScanner(NewAnyValueScanner[bool]()) + ScanBools = NewSliceScanner(NewAnyValueScanner[bool]()) + ScanFloat32 = NewFirstScanner(NewAnyValueScanner[float32]()) + ScanFloat32s = NewSliceScanner(NewAnyValueScanner[float32]()) + ScanFloat64 = NewFirstScanner(NewAnyValueScanner[float64]()) + ScanFloat64s = NewSliceScanner(NewAnyValueScanner[float64]()) + ScanInt = NewFirstScanner(NewAnyValueScanner[int]()) + ScanInts = NewSliceScanner(NewAnyValueScanner[int]()) + ScanInt16 = NewFirstScanner(NewAnyValueScanner[int16]()) + ScanInt16s = NewSliceScanner(NewAnyValueScanner[int16]()) + ScanInt32 = NewFirstScanner(NewAnyValueScanner[int32]()) + ScanInt32s = NewSliceScanner(NewAnyValueScanner[int32]()) + ScanInt64 = NewFirstScanner(NewAnyValueScanner[int64]()) + ScanInt64s = NewSliceScanner(NewAnyValueScanner[int64]()) + ScanInt8 = NewFirstScanner(NewAnyValueScanner[int8]()) + ScanInt8s = NewSliceScanner(NewAnyValueScanner[int8]()) + ScanString = NewFirstScanner(NewAnyValueScanner[string]()) + ScanStrings = NewSliceScanner(NewAnyValueScanner[string]()) + ScanUint = NewFirstScanner(NewAnyValueScanner[uint]()) + ScanUints = NewSliceScanner(NewAnyValueScanner[uint]()) + ScanUint16 = NewFirstScanner(NewAnyValueScanner[uint16]()) + ScanUint16s = NewSliceScanner(NewAnyValueScanner[uint16]()) + ScanUint32 = NewFirstScanner(NewAnyValueScanner[uint32]()) + ScanUint32s = NewSliceScanner(NewAnyValueScanner[uint32]()) + ScanUint64 = NewFirstScanner(NewAnyValueScanner[uint64]()) + ScanUint64s = NewSliceScanner(NewAnyValueScanner[uint64]()) + ScanUint8 = NewFirstScanner(NewAnyValueScanner[uint8]()) + ScanUint8s = NewSliceScanner(NewAnyValueScanner[uint8]()) + ScanTimestamp = NewFirstScanner(NewAnyValueScanner[time.Time]()) + ScanTimestamps = NewSliceScanner(NewAnyValueScanner[time.Time]()) + ScanNilBool = NewFirstScanner(NewAnyValueScanner[*bool]()) + ScanNilBools = NewSliceScanner(NewAnyValueScanner[*bool]()) + ScanNilFloat32 = NewFirstScanner(NewAnyValueScanner[*float32]()) + ScanNilFloat32s = NewSliceScanner(NewAnyValueScanner[*float32]()) + ScanNilFloat64 = NewFirstScanner(NewAnyValueScanner[*float64]()) + ScanNilFloat64s = NewSliceScanner(NewAnyValueScanner[*float64]()) + ScanNilInt = NewFirstScanner(NewAnyValueScanner[*int]()) + ScanNilInts = NewSliceScanner(NewAnyValueScanner[*int]()) + ScanNilInt16 = NewFirstScanner(NewAnyValueScanner[*int16]()) + ScanNilInt16s = NewSliceScanner(NewAnyValueScanner[*int16]()) + ScanNilInt32 = NewFirstScanner(NewAnyValueScanner[*int32]()) + ScanNilInt32s = NewSliceScanner(NewAnyValueScanner[*int32]()) + ScanNilInt64 = NewFirstScanner(NewAnyValueScanner[*int64]()) + ScanNilInt64s = NewSliceScanner(NewAnyValueScanner[*int64]()) + ScanNilInt8 = NewFirstScanner(NewAnyValueScanner[*int8]()) + ScanNilInt8s = NewSliceScanner(NewAnyValueScanner[*int8]()) + ScanNilString = NewFirstScanner(NewAnyValueScanner[*string]()) + ScanNilStrings = NewSliceScanner(NewAnyValueScanner[*string]()) + ScanNilUint = NewFirstScanner(NewAnyValueScanner[*uint]()) + ScanNilUints = NewSliceScanner(NewAnyValueScanner[*uint]()) + ScanNilUint16 = NewFirstScanner(NewAnyValueScanner[*uint16]()) + ScanNilUint16s = NewSliceScanner(NewAnyValueScanner[*uint16]()) + ScanNilUint32 = NewFirstScanner(NewAnyValueScanner[*uint32]()) + ScanNilUint32s = NewSliceScanner(NewAnyValueScanner[*uint32]()) + ScanNilUint64 = NewFirstScanner(NewAnyValueScanner[*uint64]()) + ScanNilUint64s = NewSliceScanner(NewAnyValueScanner[*uint64]()) + ScanNilUint8 = NewFirstScanner(NewAnyValueScanner[*uint8]()) + ScanNilUint8s = NewSliceScanner(NewAnyValueScanner[*uint8]()) + ScanNilTimestamp = NewFirstScanner(NewAnyValueScanner[*time.Time]()) + ScanNilTimestamps = NewSliceScanner(NewAnyValueScanner[*time.Time]()) +) diff --git a/rows_slice_scanner_test.go b/rows_slice_scanner_test.go new file mode 100644 index 0000000..fdb532b --- /dev/null +++ b/rows_slice_scanner_test.go @@ -0,0 +1,137 @@ +package pgutil + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSliceScanner(t *testing.T) { + t.Run("scalar values", func(t *testing.T) { + values, err := ScanInts(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1), (2), (3)) AS t(number)`), + )) + require.NoError(t, err) + assert.Equal(t, []int{1, 2, 3}, values) + }) + + t.Run("custom struct values", func(t *testing.T) { + values, err := scanTestPairs(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1,2), (2,3), (3,4)) AS t(x,y)`), + )) + require.NoError(t, err) + assert.Equal(t, []testPair{{1, 2}, {2, 3}, {3, 4}}, values) + }) + + t.Run("no values", func(t *testing.T) { + values, err := ScanInts(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1), (2), (3)) AS t(number) LIMIT 0`), + )) + require.NoError(t, err) + assert.Empty(t, values) + }) +} + +func TestMaybeSliceScanner(t *testing.T) { + values, err := scanMaybeTestPairs(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1,2), (2,3), (0,0), (3,4)) AS t(x,y)`), + )) + require.NoError(t, err) + assert.Equal(t, []testPair{{1, 2}, {2, 3}}, values) +} + +func TestFirstScanner(t *testing.T) { + t.Run("scalar value", func(t *testing.T) { + value, ok, err := ScanInt(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1)) AS t(number)`), + )) + require.NoError(t, err) + assert.True(t, ok) + assert.Equal(t, 1, value) + }) + + t.Run("scalar value (ignores non-first values)", func(t *testing.T) { + value, ok, err := ScanInt(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1), (2), (3)) AS t(number)`), + )) + require.NoError(t, err) + assert.True(t, ok) + assert.Equal(t, 1, value) + }) + + t.Run("custom struct value", func(t *testing.T) { + value, ok, err := scanFirstTestPair(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1,2)) AS t(x,y)`), + )) + require.NoError(t, err) + assert.True(t, ok) + assert.Equal(t, testPair{1, 2}, value) + }) + + t.Run("no value", func(t *testing.T) { + _, ok, err := ScanInt(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1), (2), (3)) AS t(number) LIMIT 0`), + )) + require.NoError(t, err) + assert.False(t, ok) + }) +} + +func TestMaybeFirstScanner(t *testing.T) { + t.Run("custom struct value", func(t *testing.T) { + value, ok, err := scanMaybeFirstTestPair(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (1,2)) AS t(x,y)`), + )) + require.NoError(t, err) + assert.True(t, ok) + assert.Equal(t, testPair{1, 2}, value) + }) + + t.Run("rejected value", func(t *testing.T) { + type testPair struct { + x int + y int + } + scanner := NewMaybeFirstScanner(func(s Scanner) (p testPair, _ bool, _ error) { + err := s.Scan(&p.x, &p.y) + return p, p.x != 0 && p.y != 0, err + }) + + _, ok, err := scanner(NewTestDB(t).Query(context.Background(), + RawQuery(`SELECT * FROM (VALUES (0,0), (1,2)) AS t(x,y)`), + )) + require.NoError(t, err) + assert.False(t, ok) + }) +} + +// +// +// + +type testPair struct { + x int + y int +} + +var scanTestPairs = NewSliceScanner(func(s Scanner) (p testPair, _ error) { + err := s.Scan(&p.x, &p.y) + return p, err +}) + +var scanMaybeTestPairs = NewMaybeSliceScanner(func(s Scanner) (p testPair, _ bool, _ error) { + err := s.Scan(&p.x, &p.y) + return p, p.x != 0 && p.y != 0, err +}) + +var scanFirstTestPair = NewFirstScanner(func(s Scanner) (p testPair, _ error) { + err := s.Scan(&p.x, &p.y) + return p, err +}) + +var scanMaybeFirstTestPair = NewMaybeFirstScanner(func(s Scanner) (p testPair, _ bool, _ error) { + err := s.Scan(&p.x, &p.y) + return p, p.x != 0 && p.y != 0, err +}) diff --git a/rows_value_scanner.go b/rows_value_scanner.go new file mode 100644 index 0000000..b077755 --- /dev/null +++ b/rows_value_scanner.go @@ -0,0 +1,41 @@ +package pgutil + +type ScanValueFunc[T any] func(Scanner) (T, error) +type MaybeScanValueFunc[T any] func(Scanner) (T, bool, error) + +func newMaybeScanValueFunc[T any](f ScanValueFunc[T]) MaybeScanValueFunc[T] { + return func(s Scanner) (T, bool, error) { + value, err := f(s) + return value, true, err + } +} + +func NewAnyValueScanner[T any]() ScanValueFunc[T] { + return func(s Scanner) (value T, err error) { + err = s.Scan(&value) + return + } +} + +type Collector[T any] struct { + scanner ScanValueFunc[T] + values []T +} + +func NewCollector[T any](scanner ScanValueFunc[T]) *Collector[T] { + return &Collector[T]{ + scanner: NewAnyValueScanner[T](), + } +} + +func (c *Collector[T]) Scanner() ScanFunc { + return func(s Scanner) error { + value, err := c.scanner(s) + c.values = append(c.values, value) + return err + } +} + +func (c *Collector[T]) Slice() []T { + return c.values +} diff --git a/rows_value_scanner_test.go b/rows_value_scanner_test.go new file mode 100644 index 0000000..88c831d --- /dev/null +++ b/rows_value_scanner_test.go @@ -0,0 +1,19 @@ +package pgutil + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCollector(t *testing.T) { + db := NewTestDB(t) + collector := NewCollector[int](NewAnyValueScanner[int]()) + scanner := NewRowScanner(collector.Scanner()) + + require.NoError(t, scanner(db.Query(context.Background(), RawQuery(`SELECT * FROM (VALUES (1), (2), (3)) AS t(number)`)))) + require.NoError(t, scanner(db.Query(context.Background(), RawQuery(`SELECT * FROM (VALUES (4), (5), (6)) AS t(number)`)))) + assert.Equal(t, []int{1, 2, 3, 4, 5, 6}, collector.Slice()) +} diff --git a/testdata/golden/TestDescribeSchema.golden b/testdata/golden/TestDescribeSchema.golden new file mode 100644 index 0000000..7cef74d --- /dev/null +++ b/testdata/golden/TestDescribeSchema.golden @@ -0,0 +1,432 @@ +pgutil.SchemaDescription{ + Extensions: []pgutil.ExtensionDescription{ + { + Namespace: "public", + Name: "pg_trgm", + }, + { + Namespace: "public", + Name: "uuid-ossp", + }, + }, + Enums: []pgutil.EnumDescription{ + { + Namespace: "public", + Name: "mood", + Labels: []string{ + "sad", + "ok", + "happy", + }, + }, + { + Namespace: "public", + Name: "weather", + Labels: []string{ + "sunny", + "rainy", + "cloudy", + "snowy", + }, + }, + }, + Functions: []pgutil.FunctionDescription{ + { + Namespace: "public", + Name: "get_random_mood", + Definition: `CREATE OR REPLACE FUNCTION public.get_random_mood() + RETURNS mood + LANGUAGE plpgsql +AS $function$ +BEGIN + RETURN (ARRAY['sad', 'ok', 'happy'])[floor(random() * 3 + 1)]; +END; +$function$ +`, + ArgTypes: []string{}, + }, + { + Namespace: "public", + Name: "get_weather_description", + Definition: `CREATE OR REPLACE FUNCTION public.get_weather_description(w weather) + RETURNS text + LANGUAGE plpgsql +AS $function$ +BEGIN + CASE + WHEN w = 'sunny' THEN + RETURN 'Pack some SPF!'; + WHEN w = 'rainy' THEN + RETURN 'Bring an umbrella!'; + WHEN w = 'cloudy' THEN + RETURN 'Wear a jacket!'; + WHEN w = 'snowy' THEN + RETURN 'Bundle up!'; + ELSE + RETURN 'Unknown weather'; + END CASE; +END; +$function$ +`, + ArgTypes: []string{"weather"}, + }, + { + Namespace: "public", + Name: "update_last_modified", + Definition: `CREATE OR REPLACE FUNCTION public.update_last_modified() + RETURNS trigger + LANGUAGE plpgsql +AS $function$ +BEGIN + NEW.last_modified = NOW(); + RETURN NEW; +END; +$function$ +`, + ArgTypes: []string{}, + }, + }, + Tables: []pgutil.TableDescription{ + { + Namespace: "public", + Name: "comments", + Columns: []pgutil.ColumnDescription{ + { + Name: "content", + Type: "text", + }, + { + Name: "created_at", + Type: "timestamp with time zone", + IsNullable: true, + Default: "CURRENT_TIMESTAMP", + }, + { + Name: "id", + Type: "uuid", + Default: "uuid_generate_v4()", + }, + { + Name: "post_id", + Type: "uuid", + }, + { + Name: "user_id", + Type: "integer", + }, + }, + Constraints: []pgutil.ConstraintDescription{ + { + Name: "comments_post_id_fkey", + Type: "f", + ReferencedTableName: "posts", + Definition: "FOREIGN KEY (post_id) REFERENCES posts(id) ON DELETE CASCADE", + }, + { + Name: "comments_user_id_fkey", + Type: "f", + ReferencedTableName: "users", + Definition: "FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE", + }, + }, + Indexes: []pgutil.IndexDescription{ + { + Name: "comments_pkey", + IsPrimaryKey: true, + IsUnique: true, + IndexDefinition: "CREATE UNIQUE INDEX comments_pkey ON comments USING btree (id)", + ConstraintType: "p", + ConstraintDefinition: "PRIMARY KEY (id)", + }, + { + Name: "idx_comments_post_id", + IndexDefinition: "CREATE INDEX idx_comments_post_id ON comments USING btree (post_id)", + }, + { + Name: "idx_comments_user_id", + IndexDefinition: "CREATE INDEX idx_comments_user_id ON comments USING btree (user_id)", + }, + }, + }, + { + Namespace: "public", + Name: "posts", + Columns: []pgutil.ColumnDescription{ + { + Name: "content", + Type: "text", + IsNullable: true, + }, + { + Name: "created_at", + Type: "timestamp with time zone", + IsNullable: true, + Default: "CURRENT_TIMESTAMP", + }, + { + Name: "id", + Type: "uuid", + Default: "uuid_generate_v4()", + }, + { + Name: "last_modified", + Type: "timestamp with time zone", + IsNullable: true, + Default: "CURRENT_TIMESTAMP", + }, + { + Name: "title", + Type: "character varying(200)", + CharacterMaximumLength: 200, + }, + { + Name: "user_id", + Type: "integer", + }, + }, + Constraints: []pgutil.ConstraintDescription{{ + Name: "posts_user_id_fkey", + Type: "f", + ReferencedTableName: "users", + Definition: "FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE", + }}, + Indexes: []pgutil.IndexDescription{ + { + Name: "idx_posts_content_trgm", + IndexDefinition: "CREATE INDEX idx_posts_content_trgm ON posts USING gin (content gin_trgm_ops)", + }, + { + Name: "idx_posts_user_id", + IndexDefinition: "CREATE INDEX idx_posts_user_id ON posts USING btree (user_id)", + }, + { + Name: "posts_pkey", + IsPrimaryKey: true, + IsUnique: true, + IndexDefinition: "CREATE UNIQUE INDEX posts_pkey ON posts USING btree (id)", + ConstraintType: "p", + ConstraintDefinition: "PRIMARY KEY (id)", + }, + }, + }, + { + Namespace: "public", + Name: "users", + Columns: []pgutil.ColumnDescription{ + { + Name: "created_at", + Type: "timestamp with time zone", + IsNullable: true, + Default: "CURRENT_TIMESTAMP", + }, + { + Name: "email", + Type: "character varying(100)", + CharacterMaximumLength: 100, + }, + { + Name: "id", + Type: "integer", + Default: "nextval('user_id_seq'::regclass)", + }, + { + Name: "last_modified", + Type: "timestamp with time zone", + IsNullable: true, + Default: "CURRENT_TIMESTAMP", + }, + { + Name: "mood", + Type: "mood", + IsNullable: true, + }, + { + Name: "password_hash", + Type: "character varying(100)", + CharacterMaximumLength: 100, + }, + { + Name: "username", + Type: "character varying(50)", + CharacterMaximumLength: 50, + }, + }, + Indexes: []pgutil.IndexDescription{ + { + Name: "idx_users_email", + IndexDefinition: "CREATE INDEX idx_users_email ON users USING btree (email)", + }, + { + Name: "idx_users_username", + IndexDefinition: "CREATE INDEX idx_users_username ON users USING btree (username)", + }, + { + Name: "users_email_key", + IsUnique: true, + IndexDefinition: "CREATE UNIQUE INDEX users_email_key ON users USING btree (email)", + ConstraintType: "u", + ConstraintDefinition: "UNIQUE (email)", + }, + { + Name: "users_pkey", + IsPrimaryKey: true, + IsUnique: true, + IndexDefinition: "CREATE UNIQUE INDEX users_pkey ON users USING btree (id)", + ConstraintType: "p", + ConstraintDefinition: "PRIMARY KEY (id)", + }, + { + Name: "users_username_key", + IsUnique: true, + IndexDefinition: "CREATE UNIQUE INDEX users_username_key ON users USING btree (username)", + ConstraintType: "u", + ConstraintDefinition: "UNIQUE (username)", + }, + }, + }, + }, + Sequences: []pgutil.SequenceDescription{{ + Namespace: "public", + Name: "user_id_seq", + Type: "bigint", + StartValue: 1000, + MinimumValue: 1, + MaximumValue: 9223372036854775807, + Increment: 1, + CycleOption: "NO", + }}, + Views: []pgutil.ViewDescription{ + { + Namespace: "public", + Name: "active_users", + Definition: ` SELECT id, + username, + email, + mood + FROM users + WHERE (last_modified > (CURRENT_TIMESTAMP - '30 days'::interval));`, + }, + { + Namespace: "public", + Name: "post_stats", + Definition: ` SELECT p.id AS post_id, + p.title, + p.user_id, + u.username, + count(c.id) AS comment_count + FROM ((posts p + JOIN users u ON ((p.user_id = u.id))) + LEFT JOIN comments c ON ((p.id = c.post_id))) + GROUP BY p.id, p.title, p.user_id, u.username;`, + }, + }, + Triggers: []pgutil.TriggerDescription{ + { + Namespace: "public", + Name: "update_post_last_modified", + TableName: "posts", + FunctionNamespace: "public", + Definition: "CREATE TRIGGER update_post_last_modified BEFORE UPDATE ON posts FOR EACH ROW EXECUTE FUNCTION update_last_modified()", + }, + { + Namespace: "public", + Name: "update_user_last_modified", + TableName: "users", + FunctionNamespace: "public", + Definition: "CREATE TRIGGER update_user_last_modified BEFORE UPDATE ON users FOR EACH ROW EXECUTE FUNCTION update_last_modified()", + }, + }, + EnumDependencies: []pgutil.EnumDependency{{ + EnumNamespace: "public", + EnumName: "mood", + TableNamespace: "public", + TableName: "users", + ColumnName: "mood", + }}, + ColumnDependencies: []pgutil.ColumnDependency{ + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "id", + UsedNamespace: "public", + UsedTableOrView: "active_users", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "username", + UsedNamespace: "public", + UsedTableOrView: "active_users", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "email", + UsedNamespace: "public", + UsedTableOrView: "active_users", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "mood", + UsedNamespace: "public", + UsedTableOrView: "active_users", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "last_modified", + UsedNamespace: "public", + UsedTableOrView: "active_users", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "comments", + SourceColumnName: "id", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "comments", + SourceColumnName: "post_id", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "posts", + SourceColumnName: "id", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "posts", + SourceColumnName: "user_id", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "posts", + SourceColumnName: "title", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "id", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + { + SourceNamespace: "public", + SourceTableOrViewName: "users", + SourceColumnName: "username", + UsedNamespace: "public", + UsedTableOrView: "post_stats", + }, + }, +} diff --git a/testdata/migrations/cic_in_down_migration/1_first/down.sql b/testdata/migrations/cic_in_down_migration/1_first/down.sql new file mode 100644 index 0000000..2332b61 --- /dev/null +++ b/testdata/migrations/cic_in_down_migration/1_first/down.sql @@ -0,0 +1,2 @@ +-- Drop the sample table +DROP TABLE IF EXISTS users; diff --git a/testdata/migrations/cic_in_down_migration/1_first/up.sql b/testdata/migrations/cic_in_down_migration/1_first/up.sql new file mode 100644 index 0000000..e93c3ac --- /dev/null +++ b/testdata/migrations/cic_in_down_migration/1_first/up.sql @@ -0,0 +1,6 @@ +-- Create a sample table +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + username TEXT NOT NULL, + email TEXT NOT NULL +); diff --git a/testdata/migrations/cic_in_down_migration/2_second/down.sql b/testdata/migrations/cic_in_down_migration/2_second/down.sql new file mode 100644 index 0000000..854c795 --- /dev/null +++ b/testdata/migrations/cic_in_down_migration/2_second/down.sql @@ -0,0 +1,3 @@ +-- Remove the index and drop the column +DROP INDEX IF EXISTS idx_users_created_at; +ALTER TABLE users DROP COLUMN IF EXISTS created_at; diff --git a/testdata/migrations/cic_in_down_migration/2_second/up.sql b/testdata/migrations/cic_in_down_migration/2_second/up.sql new file mode 100644 index 0000000..ae260ef --- /dev/null +++ b/testdata/migrations/cic_in_down_migration/2_second/up.sql @@ -0,0 +1,3 @@ +-- Add a new column and create an index +ALTER TABLE users ADD COLUMN created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(); +CREATE INDEX idx_users_created_at ON users(created_at); diff --git a/testdata/migrations/cic_in_down_migration/3_third/down.sql b/testdata/migrations/cic_in_down_migration/3_third/down.sql new file mode 100644 index 0000000..55048d7 --- /dev/null +++ b/testdata/migrations/cic_in_down_migration/3_third/down.sql @@ -0,0 +1,2 @@ +-- Recreate the index using CREATE INDEX CONCURRENTLY +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_created_at ON users(created_at); diff --git a/testdata/migrations/cic_in_down_migration/3_third/up.sql b/testdata/migrations/cic_in_down_migration/3_third/up.sql new file mode 100644 index 0000000..d48ebc4 --- /dev/null +++ b/testdata/migrations/cic_in_down_migration/3_third/up.sql @@ -0,0 +1,2 @@ +-- Drop the index created in the second migration +DROP INDEX IF EXISTS idx_users_created_at; diff --git a/testdata/migrations/cic_pattern/1_first/down.sql b/testdata/migrations/cic_pattern/1_first/down.sql new file mode 100755 index 0000000..26ce8a7 --- /dev/null +++ b/testdata/migrations/cic_pattern/1_first/down.sql @@ -0,0 +1,2 @@ +-- Drop the users table +DROP TABLE IF EXISTS users; diff --git a/testdata/migrations/cic_pattern/1_first/up.sql b/testdata/migrations/cic_pattern/1_first/up.sql new file mode 100755 index 0000000..14d93e8 --- /dev/null +++ b/testdata/migrations/cic_pattern/1_first/up.sql @@ -0,0 +1,7 @@ +-- Create the users table +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT NOT NULL UNIQUE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/cic_pattern/2_second/down.sql b/testdata/migrations/cic_pattern/2_second/down.sql new file mode 100755 index 0000000..d032b51 --- /dev/null +++ b/testdata/migrations/cic_pattern/2_second/down.sql @@ -0,0 +1,2 @@ +-- Drop the posts table +DROP TABLE IF EXISTS posts; diff --git a/testdata/migrations/cic_pattern/2_second/up.sql b/testdata/migrations/cic_pattern/2_second/up.sql new file mode 100755 index 0000000..309d840 --- /dev/null +++ b/testdata/migrations/cic_pattern/2_second/up.sql @@ -0,0 +1,8 @@ +-- Create the posts table +CREATE TABLE posts ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + title TEXT NOT NULL, + content TEXT, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/cic_pattern/3_third/down.sql b/testdata/migrations/cic_pattern/3_third/down.sql new file mode 100755 index 0000000..549f08a --- /dev/null +++ b/testdata/migrations/cic_pattern/3_third/down.sql @@ -0,0 +1,2 @@ +-- Drop the comments table +DROP TABLE IF EXISTS comments; diff --git a/testdata/migrations/cic_pattern/3_third/up.sql b/testdata/migrations/cic_pattern/3_third/up.sql new file mode 100755 index 0000000..e83b493 --- /dev/null +++ b/testdata/migrations/cic_pattern/3_third/up.sql @@ -0,0 +1,8 @@ +-- Create the comments table +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + post_id INTEGER NOT NULL REFERENCES posts(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + content TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/cic_pattern/4_fourth/down.sql b/testdata/migrations/cic_pattern/4_fourth/down.sql new file mode 100644 index 0000000..ba07e2b --- /dev/null +++ b/testdata/migrations/cic_pattern/4_fourth/down.sql @@ -0,0 +1,2 @@ +-- Drop the concurrent index +DROP INDEX CONCURRENTLY IF EXISTS idx_users_email; diff --git a/testdata/migrations/cic_pattern/4_fourth/up.sql b/testdata/migrations/cic_pattern/4_fourth/up.sql new file mode 100644 index 0000000..dbacb99 --- /dev/null +++ b/testdata/migrations/cic_pattern/4_fourth/up.sql @@ -0,0 +1,2 @@ +-- Create a concurrent index +CREATE INDEX CONCURRENTLY idx_users_email ON users (email); diff --git a/testdata/migrations/cic_with_additional_queries/1_first/down.sql b/testdata/migrations/cic_with_additional_queries/1_first/down.sql new file mode 100755 index 0000000..26ce8a7 --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/1_first/down.sql @@ -0,0 +1,2 @@ +-- Drop the users table +DROP TABLE IF EXISTS users; diff --git a/testdata/migrations/cic_with_additional_queries/1_first/up.sql b/testdata/migrations/cic_with_additional_queries/1_first/up.sql new file mode 100755 index 0000000..14d93e8 --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/1_first/up.sql @@ -0,0 +1,7 @@ +-- Create the users table +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT NOT NULL UNIQUE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/cic_with_additional_queries/2_second/down.sql b/testdata/migrations/cic_with_additional_queries/2_second/down.sql new file mode 100755 index 0000000..d032b51 --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/2_second/down.sql @@ -0,0 +1,2 @@ +-- Drop the posts table +DROP TABLE IF EXISTS posts; diff --git a/testdata/migrations/cic_with_additional_queries/2_second/up.sql b/testdata/migrations/cic_with_additional_queries/2_second/up.sql new file mode 100755 index 0000000..309d840 --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/2_second/up.sql @@ -0,0 +1,8 @@ +-- Create the posts table +CREATE TABLE posts ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + title TEXT NOT NULL, + content TEXT, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/cic_with_additional_queries/3_third/down.sql b/testdata/migrations/cic_with_additional_queries/3_third/down.sql new file mode 100755 index 0000000..549f08a --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/3_third/down.sql @@ -0,0 +1,2 @@ +-- Drop the comments table +DROP TABLE IF EXISTS comments; diff --git a/testdata/migrations/cic_with_additional_queries/3_third/up.sql b/testdata/migrations/cic_with_additional_queries/3_third/up.sql new file mode 100755 index 0000000..e83b493 --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/3_third/up.sql @@ -0,0 +1,8 @@ +-- Create the comments table +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + post_id INTEGER NOT NULL REFERENCES posts(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + content TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/cic_with_additional_queries/4_fourth/down.sql b/testdata/migrations/cic_with_additional_queries/4_fourth/down.sql new file mode 100644 index 0000000..be079dc --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/4_fourth/down.sql @@ -0,0 +1,6 @@ +-- Drop new indexes +DROP INDEX idx_users_created_at; +DROP INDEX idx_users_email; + +-- Drop new column +ALTER TABLE users DROP COLUMN last_login; diff --git a/testdata/migrations/cic_with_additional_queries/4_fourth/up.sql b/testdata/migrations/cic_with_additional_queries/4_fourth/up.sql new file mode 100644 index 0000000..7b16a34 --- /dev/null +++ b/testdata/migrations/cic_with_additional_queries/4_fourth/up.sql @@ -0,0 +1,9 @@ +-- Add and backfill last_login column +ALTER TABLE users ADD COLUMN last_login TIMESTAMP; +UPDATE users SET last_login = NOW() WHERE email IN ('user1@example.com', 'user2@example.com'); + +-- Create a index concurrently +CREATE INDEX CONCURRENTLY idx_users_email ON users (email); + +-- Create another index +CREATE INDEX idx_users_created_at ON users (created_at); diff --git a/testdata/migrations/duplicate_identifiers/1_first/down.sql b/testdata/migrations/duplicate_identifiers/1_first/down.sql new file mode 100755 index 0000000..26ce8a7 --- /dev/null +++ b/testdata/migrations/duplicate_identifiers/1_first/down.sql @@ -0,0 +1,2 @@ +-- Drop the users table +DROP TABLE IF EXISTS users; diff --git a/testdata/migrations/duplicate_identifiers/1_first/up.sql b/testdata/migrations/duplicate_identifiers/1_first/up.sql new file mode 100755 index 0000000..14d93e8 --- /dev/null +++ b/testdata/migrations/duplicate_identifiers/1_first/up.sql @@ -0,0 +1,7 @@ +-- Create the users table +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT NOT NULL UNIQUE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/duplicate_identifiers/2_second/down.sql b/testdata/migrations/duplicate_identifiers/2_second/down.sql new file mode 100755 index 0000000..d032b51 --- /dev/null +++ b/testdata/migrations/duplicate_identifiers/2_second/down.sql @@ -0,0 +1,2 @@ +-- Drop the posts table +DROP TABLE IF EXISTS posts; diff --git a/testdata/migrations/duplicate_identifiers/2_second/up.sql b/testdata/migrations/duplicate_identifiers/2_second/up.sql new file mode 100755 index 0000000..309d840 --- /dev/null +++ b/testdata/migrations/duplicate_identifiers/2_second/up.sql @@ -0,0 +1,8 @@ +-- Create the posts table +CREATE TABLE posts ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + title TEXT NOT NULL, + content TEXT, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/duplicate_identifiers/2_third/down.sql b/testdata/migrations/duplicate_identifiers/2_third/down.sql new file mode 100755 index 0000000..549f08a --- /dev/null +++ b/testdata/migrations/duplicate_identifiers/2_third/down.sql @@ -0,0 +1,2 @@ +-- Drop the comments table +DROP TABLE IF EXISTS comments; diff --git a/testdata/migrations/duplicate_identifiers/2_third/up.sql b/testdata/migrations/duplicate_identifiers/2_third/up.sql new file mode 100755 index 0000000..e83b493 --- /dev/null +++ b/testdata/migrations/duplicate_identifiers/2_third/up.sql @@ -0,0 +1,8 @@ +-- Create the comments table +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + post_id INTEGER NOT NULL REFERENCES posts(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + content TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/valid/1_first/down.sql b/testdata/migrations/valid/1_first/down.sql new file mode 100755 index 0000000..26ce8a7 --- /dev/null +++ b/testdata/migrations/valid/1_first/down.sql @@ -0,0 +1,2 @@ +-- Drop the users table +DROP TABLE IF EXISTS users; diff --git a/testdata/migrations/valid/1_first/up.sql b/testdata/migrations/valid/1_first/up.sql new file mode 100755 index 0000000..14d93e8 --- /dev/null +++ b/testdata/migrations/valid/1_first/up.sql @@ -0,0 +1,7 @@ +-- Create the users table +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT NOT NULL UNIQUE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/valid/2_second/down.sql b/testdata/migrations/valid/2_second/down.sql new file mode 100755 index 0000000..d032b51 --- /dev/null +++ b/testdata/migrations/valid/2_second/down.sql @@ -0,0 +1,2 @@ +-- Drop the posts table +DROP TABLE IF EXISTS posts; diff --git a/testdata/migrations/valid/2_second/up.sql b/testdata/migrations/valid/2_second/up.sql new file mode 100755 index 0000000..309d840 --- /dev/null +++ b/testdata/migrations/valid/2_second/up.sql @@ -0,0 +1,8 @@ +-- Create the posts table +CREATE TABLE posts ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + title TEXT NOT NULL, + content TEXT, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/migrations/valid/3_third/down.sql b/testdata/migrations/valid/3_third/down.sql new file mode 100755 index 0000000..549f08a --- /dev/null +++ b/testdata/migrations/valid/3_third/down.sql @@ -0,0 +1,2 @@ +-- Drop the comments table +DROP TABLE IF EXISTS comments; diff --git a/testdata/migrations/valid/3_third/up.sql b/testdata/migrations/valid/3_third/up.sql new file mode 100755 index 0000000..e83b493 --- /dev/null +++ b/testdata/migrations/valid/3_third/up.sql @@ -0,0 +1,8 @@ +-- Create the comments table +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + post_id INTEGER NOT NULL REFERENCES posts(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + content TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); diff --git a/testdata/schemas/describe.sql b/testdata/schemas/describe.sql new file mode 100644 index 0000000..7754083 --- /dev/null +++ b/testdata/schemas/describe.sql @@ -0,0 +1,110 @@ +-- Extensions +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; +CREATE EXTENSION IF NOT EXISTS "pg_trgm"; + +-- Enums +CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); +CREATE TYPE weather AS ENUM ('sunny', 'rainy', 'cloudy', 'snowy'); + +-- Functions +CREATE OR REPLACE FUNCTION get_random_mood() RETURNS mood AS $$ +BEGIN + RETURN (ARRAY['sad', 'ok', 'happy'])[floor(random() * 3 + 1)]; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION get_weather_description(w weather) RETURNS TEXT AS $$ +BEGIN + CASE + WHEN w = 'sunny' THEN + RETURN 'Pack some SPF!'; + WHEN w = 'rainy' THEN + RETURN 'Bring an umbrella!'; + WHEN w = 'cloudy' THEN + RETURN 'Wear a jacket!'; + WHEN w = 'snowy' THEN + RETURN 'Bundle up!'; + ELSE + RETURN 'Unknown weather'; + END CASE; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION update_last_modified() RETURNS TRIGGER AS $$ +BEGIN + NEW.last_modified = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Sequences +CREATE SEQUENCE IF NOT EXISTS user_id_seq START 1000; + +-- Tables +CREATE TABLE users ( + id INTEGER PRIMARY KEY DEFAULT nextval('user_id_seq'), + username VARCHAR(50) UNIQUE NOT NULL, + email VARCHAR(100) UNIQUE NOT NULL, + password_hash VARCHAR(100) NOT NULL, + mood mood, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + last_modified TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE posts ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + title VARCHAR(200) NOT NULL, + content TEXT, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + last_modified TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE comments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + post_id UUID NOT NULL REFERENCES posts(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + content TEXT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +); + +-- Views +CREATE VIEW active_users AS +SELECT id, username, email, mood +FROM users +WHERE last_modified > CURRENT_TIMESTAMP - INTERVAL '30 days'; + +CREATE VIEW post_stats AS +SELECT p.id AS post_id, p.title, p.user_id, u.username, COUNT(c.id) AS comment_count +FROM posts p +JOIN users u ON p.user_id = u.id +LEFT JOIN comments c ON p.id = c.post_id +GROUP BY p.id, p.title, p.user_id, u.username; + +-- Triggers +CREATE TRIGGER update_user_last_modified +BEFORE UPDATE ON users +FOR EACH ROW +EXECUTE FUNCTION update_last_modified(); + +CREATE TRIGGER update_post_last_modified +BEFORE UPDATE ON posts +FOR EACH ROW +EXECUTE FUNCTION update_last_modified(); + +-- Indexes +CREATE INDEX idx_users_username ON users USING btree (username); +CREATE INDEX idx_users_email ON users USING btree (email); +CREATE INDEX idx_posts_user_id ON posts USING btree (user_id); +CREATE INDEX idx_comments_post_id ON comments USING btree (post_id); +CREATE INDEX idx_comments_user_id ON comments USING btree (user_id); + +-- Full-text search index +CREATE INDEX idx_posts_content_trgm ON posts USING gin (content gin_trgm_ops); + +-- EnumDependencies +-- The 'mood' enum is used in the 'users' table + +-- ColumnDependencies +-- The 'users.id' column is referenced by 'posts.user_id' and 'comments.user_id' +-- The 'posts.id' column is referenced by 'comments.post_id' diff --git a/testing.go b/testing.go new file mode 100644 index 0000000..a3837fe --- /dev/null +++ b/testing.go @@ -0,0 +1,69 @@ +package pgutil + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "os" + "testing" + + "github.com/go-nacelle/log/v2" + "github.com/lib/pq" + "github.com/stretchr/testify/require" +) + +func NewTestDB(t testing.TB) DB { + return NewTestDBWithLogger(t, log.NewNilLogger()) +} + +func NewTestDBWithLogger(t testing.TB, logger log.Logger) DB { + t.Helper() + + id, err := randomHexString(16) + require.NoError(t, err) + + var ( + testDatabaseName = fmt.Sprintf("pgutil-test-%s", id) + quotedTestDatabaseName = pq.QuoteIdentifier(testDatabaseName) + quotedTemplateDatabaseName = pq.QuoteIdentifier(os.Getenv("TEMPLATEDB")) + + // NOTE: Must interpolate identifiers here as placeholders aren't valid in this position. + createDatabaseQuery = queryf("CREATE DATABASE %s TEMPLATE %s", quotedTestDatabaseName, quotedTemplateDatabaseName) + dropDatabaseQuery = queryf("DROP DATABASE %s", quotedTestDatabaseName) + terminateConnectionsQuery = Query("SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = {:name}", Args{"name": testDatabaseName}) + ) + + // Resolve "control" database URL + baseURL := BuildDatabaseURL() + parsedURL, err := url.Parse(baseURL) + require.NoError(t, err) + + // Resolve "test" database URL + testDBURL := parsedURL.ResolveReference(&url.URL{ + Path: "/" + testDatabaseName, + RawQuery: parsedURL.RawQuery, + }) + + // Open "control" database + rawDB, err := sql.Open("postgres", baseURL) + require.NoError(t, err) + rawLoggingDB := newLoggingDB(rawDB, log.NewNilLogger()) + + // Create "test" database + require.NoError(t, rawLoggingDB.Exec(context.Background(), createDatabaseQuery)) + + // Open "test" database + testDB, err := sql.Open("postgres", testDBURL.String()) + require.NoError(t, err) + + t.Cleanup(func() { + defer rawDB.Close() + + require.NoError(t, testDB.Close()) + require.NoError(t, rawLoggingDB.Exec(context.Background(), terminateConnectionsQuery)) + require.NoError(t, rawLoggingDB.Exec(context.Background(), dropDatabaseQuery)) + }) + + return newLoggingDB(testDB, logger) +} diff --git a/url.go b/url.go new file mode 100644 index 0000000..e01e27a --- /dev/null +++ b/url.go @@ -0,0 +1,35 @@ +package pgutil + +import ( + "fmt" + "net/url" + "os" +) + +func BuildDatabaseURL() string { + var ( + host = getEnvOrDefault("PGHOST", "localhost") + port = getEnvOrDefault("PGPORT", "5432") + user = getEnvOrDefault("PGUSER", "") + password = getEnvOrDefault("PGPASSWORD", "") + database = getEnvOrDefault("PGDATABASE", "") + sslmode = getEnvOrDefault("PGSSLMODE", "disable") + ) + + u := &url.URL{ + Scheme: "postgres", + Host: fmt.Sprintf("%s:%s", host, port), + User: url.UserPassword(user, password), + Path: database, + RawQuery: url.Values{"sslmode": []string{sslmode}}.Encode(), + } + return (u).String() +} + +func getEnvOrDefault(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + + return defaultValue +} diff --git a/util.go b/util.go new file mode 100644 index 0000000..36477a9 --- /dev/null +++ b/util.go @@ -0,0 +1,18 @@ +package pgutil + +import ( + "crypto/rand" + "encoding/hex" + "io" +) + +func randomHexString(n int) (string, error) { + uuid := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, uuid); err != nil { + return "", err + } + + payload := make([]byte, n*2) + hex.Encode(payload, uuid) + return string(payload), nil +}