Compare commits
10 Commits
11def45666
...
e659d04ebe
Author | SHA1 | Date | |
---|---|---|---|
e659d04ebe | |||
bd9103f359 | |||
56383939ed | |||
bd54486fb3 | |||
d08f5f55aa | |||
02544da725 | |||
![]() |
062d1f70be | ||
![]() |
73098499dd | ||
![]() |
6da0fcb7be | ||
![]() |
62c53a6b4d |
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@ -27,10 +27,5 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Start containers
|
|
||||||
run: |
|
|
||||||
docker-compose up -d
|
|
||||||
sleep 3
|
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -p 1 ./...
|
run: make test
|
||||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1 +1,2 @@
|
|||||||
.idea
|
data.db*
|
||||||
|
|
||||||
|
31
.vscode/launch.json
vendored
Normal file
31
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Launch main",
|
||||||
|
"type": "go",
|
||||||
|
"request": "launch",
|
||||||
|
"mode": "debug",
|
||||||
|
"program": "${workspaceFolder}/cmd/web/main.go"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Launch test function",
|
||||||
|
"type": "go",
|
||||||
|
"request": "launch",
|
||||||
|
"mode": "test",
|
||||||
|
"program": "${workspaceFolder}/pkg",
|
||||||
|
"args": [
|
||||||
|
"-test.run",
|
||||||
|
"FindAllSubStr"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Test Current File",
|
||||||
|
"type": "go",
|
||||||
|
"request": "launch",
|
||||||
|
"mode": "test",
|
||||||
|
"program": "./${relativeFileDirname}",
|
||||||
|
"showLog": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
81
Makefile
81
Makefile
@ -1,72 +1,6 @@
|
|||||||
# Determine if you have docker-compose or docker compose installed locally
|
|
||||||
# If this does not work on your system, just set the name of the executable you have installed
|
|
||||||
DCO_BIN := $(shell { command -v docker-compose || command -v docker compose; } 2>/dev/null)
|
|
||||||
|
|
||||||
# Connect to the primary database
|
|
||||||
.PHONY: db
|
|
||||||
db:
|
|
||||||
docker exec -it pagoda_db psql postgresql://admin:admin@localhost:5432/app
|
|
||||||
|
|
||||||
# Connect to the test database (you must run tests first before running this)
|
|
||||||
.PHONY: db-test
|
|
||||||
db-test:
|
|
||||||
docker exec -it pagoda_db psql postgresql://admin:admin@localhost:5432/app_test
|
|
||||||
|
|
||||||
# Connect to the primary cache
|
|
||||||
.PHONY: cache
|
|
||||||
cache:
|
|
||||||
docker exec -it pagoda_cache redis-cli
|
|
||||||
|
|
||||||
# Clear the primary cache
|
|
||||||
.PHONY: cache-clear
|
|
||||||
cache-clear:
|
|
||||||
docker exec -it pagoda_cache redis-cli flushall
|
|
||||||
|
|
||||||
# Connect to the test cache
|
|
||||||
.PHONY: cache-test
|
|
||||||
cache-test:
|
|
||||||
docker exec -it pagoda_cache redis-cli -n 1
|
|
||||||
|
|
||||||
# Install Ent code-generation module
|
|
||||||
.PHONY: ent-install
|
|
||||||
ent-install:
|
|
||||||
go get -d entgo.io/ent/cmd/ent
|
|
||||||
|
|
||||||
# Generate Ent code
|
|
||||||
.PHONY: ent-gen
|
|
||||||
ent-gen:
|
|
||||||
go generate ./ent
|
|
||||||
|
|
||||||
# Create a new Ent entity
|
|
||||||
.PHONY: ent-new
|
|
||||||
ent-new:
|
|
||||||
go run entgo.io/ent/cmd/ent new $(name)
|
|
||||||
|
|
||||||
# Start the Docker containers
|
|
||||||
.PHONY: up
|
|
||||||
up:
|
|
||||||
$(DCO_BIN) up -d
|
|
||||||
sleep 3
|
|
||||||
|
|
||||||
# Stop the Docker containers
|
|
||||||
.PHONY: stop
|
|
||||||
stop:
|
|
||||||
$(DCO_BIN) stop
|
|
||||||
|
|
||||||
# Drop the Docker containers to wipe all data
|
|
||||||
.PHONY: down
|
|
||||||
down:
|
|
||||||
$(DCO_BIN) down
|
|
||||||
|
|
||||||
# Rebuild Docker containers to wipe all data
|
|
||||||
.PHONY: reset
|
|
||||||
reset:
|
|
||||||
$(DCO_BIN) down
|
|
||||||
make up
|
|
||||||
|
|
||||||
# Run the application
|
# Run the application
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run:
|
run: sqlc templ
|
||||||
clear
|
clear
|
||||||
go run cmd/web/main.go
|
go run cmd/web/main.go
|
||||||
|
|
||||||
@ -75,11 +9,14 @@ run:
|
|||||||
test:
|
test:
|
||||||
go test -count=1 -p 1 ./...
|
go test -count=1 -p 1 ./...
|
||||||
|
|
||||||
# Run the worker
|
.PHONY: sqlc
|
||||||
.PHONY: worker
|
sqlc:
|
||||||
worker:
|
rm -f pkg/models/sqlc/*
|
||||||
clear
|
sqlc generate
|
||||||
go run cmd/worker/main.go
|
|
||||||
|
.PHONY: templ
|
||||||
|
templ:
|
||||||
|
templ generate
|
||||||
|
|
||||||
# Check for direct dependency updates
|
# Check for direct dependency updates
|
||||||
.PHONY: check-updates
|
.PHONY: check-updates
|
||||||
|
288
README.md
288
README.md
@ -6,6 +6,7 @@
|
|||||||
[](https://pkg.go.dev/github.com/mikestefanello/pagoda)
|
[](https://pkg.go.dev/github.com/mikestefanello/pagoda)
|
||||||
[](https://go.dev)
|
[](https://go.dev)
|
||||||
[](https://github.com/avelino/awesome-go)
|
[](https://github.com/avelino/awesome-go)
|
||||||
|
[](https://gophers.slack.com/messages/pagoda)
|
||||||
|
|
||||||
<p align="center"><img alt="Logo" src="https://user-images.githubusercontent.com/552328/147838644-0efac538-a97e-4a46-86a0-41e3abdf9f20.png" height="200px"/></p>
|
<p align="center"><img alt="Logo" src="https://user-images.githubusercontent.com/552328/147838644-0efac538-a97e-4a46-86a0-41e3abdf9f20.png" height="200px"/></p>
|
||||||
|
|
||||||
@ -21,7 +22,6 @@
|
|||||||
* [Dependencies](#dependencies)
|
* [Dependencies](#dependencies)
|
||||||
* [Start the application](#start-the-application)
|
* [Start the application](#start-the-application)
|
||||||
* [Running tests](#running-tests)
|
* [Running tests](#running-tests)
|
||||||
* [Clients](#clients)
|
|
||||||
* [Service container](#service-container)
|
* [Service container](#service-container)
|
||||||
* [Dependency injection](#dependency-injection)
|
* [Dependency injection](#dependency-injection)
|
||||||
* [Test dependencies](#test-dependencies)
|
* [Test dependencies](#test-dependencies)
|
||||||
@ -31,9 +31,7 @@
|
|||||||
* [Database](#database)
|
* [Database](#database)
|
||||||
* [Auto-migrations](#auto-migrations)
|
* [Auto-migrations](#auto-migrations)
|
||||||
* [Separate test database](#separate-test-database)
|
* [Separate test database](#separate-test-database)
|
||||||
* [ORM](#orm)
|
* [SQLC](#sqlc)
|
||||||
* [Entity types](#entity-types)
|
|
||||||
* [New entity type](#new-entity-type)
|
|
||||||
* [Sessions](#sessions)
|
* [Sessions](#sessions)
|
||||||
* [Encryption](#encryption)
|
* [Encryption](#encryption)
|
||||||
* [Authentication](#authentication)
|
* [Authentication](#authentication)
|
||||||
@ -82,9 +80,8 @@
|
|||||||
* [Flush tags](#flush-tags)
|
* [Flush tags](#flush-tags)
|
||||||
* [Tasks](#tasks)
|
* [Tasks](#tasks)
|
||||||
* [Queues](#queues)
|
* [Queues](#queues)
|
||||||
* [Scheduled tasks](#scheduled-tasks)
|
* [Runner](#runner)
|
||||||
* [Worker](#worker)
|
* [Cron](#cron)
|
||||||
* [Monitoring](#monitoring)
|
|
||||||
* [Static files](#static-files)
|
* [Static files](#static-files)
|
||||||
* [Cache control headers](#cache-control-headers)
|
* [Cache control headers](#cache-control-headers)
|
||||||
* [Cache-buster](#cache-buster)
|
* [Cache-buster](#cache-buster)
|
||||||
@ -123,8 +120,9 @@ Go server-side rendered HTML combined with the projects below enable you to crea
|
|||||||
|
|
||||||
#### Storage
|
#### Storage
|
||||||
|
|
||||||
- [PostgreSQL](https://www.postgresql.org/): The world's most advanced open source relational database.
|
- [SQLite](https://sqlite.org/): A small, fast, self-contained, high-reliability, full-featured, SQL database engine and the most used database engine in the world.
|
||||||
- [Redis](https://redis.io/): In-memory data structure store, used as a database, cache, and message broker.
|
|
||||||
|
Originally, Postgres and Redis were chosen as defaults but since the aim of this project is rapid, simple development, it was changed to SQLite which now provides the primary data storage as well as persistent, background [task queues](#tasks). For [caching](#cache), a simple in-memory solution is provided. If you need to use something like Postgres or Redis, swapping those in can be done quickly and easily. For reference, [this branch](https://github.com/mikestefanello/pagoda/tree/postgres-redis) contains the code that included those (but is no longer maintained).
|
||||||
|
|
||||||
### Screenshots
|
### Screenshots
|
||||||
|
|
||||||
@ -144,40 +142,27 @@ Go server-side rendered HTML combined with the projects below enable you to crea
|
|||||||
|
|
||||||
### Dependencies
|
### Dependencies
|
||||||
|
|
||||||
Ensure the following are installed on your system:
|
Ensure that [Go](https://go.dev/) is installed on your system.
|
||||||
|
|
||||||
- [Go](https://go.dev/)
|
|
||||||
- [Docker](https://www.docker.com/)
|
|
||||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
|
||||||
|
|
||||||
### Start the application
|
### Start the application
|
||||||
|
|
||||||
After checking out the repository, from within the root, start the Docker containers for the database and cache by executing `make up`:
|
After checking out the repository, from within the root, simply run `make run`:
|
||||||
|
|
||||||
```
|
```
|
||||||
git clone git@github.com:mikestefanello/pagoda.git
|
git clone git@github.com:mikestefanello/pagoda.git
|
||||||
cd pagoda
|
cd pagoda
|
||||||
make up
|
make run
|
||||||
```
|
```
|
||||||
|
|
||||||
Since this repository is a _template_ and not a Go _library_, you **do not** use `go get`.
|
Since this repository is a _template_ and not a Go _library_, you **do not** use `go get`.
|
||||||
|
|
||||||
Once that completes, you can start the application by executing `make run`. By default, you should be able to access the application in your browser at `localhost:8000`.
|
By default, you should be able to access the application in your browser at `localhost:8000`. This can be changed via the [configuration](#configuration).
|
||||||
|
|
||||||
If you ever want to quickly drop the Docker containers and restart them in order to wipe all data, execute `make reset`.
|
By default, your data will be stored within the `dbs` directory. If you ever want to quickly delete all data just remove this directory.
|
||||||
|
|
||||||
### Running tests
|
### Running tests
|
||||||
|
|
||||||
To run all tests in the application, execute `make test`. This ensures that the tests from each package are not run in parallel. This is required since many packages contain tests that connect to the test database which is dropped and recreated automatically for each package.
|
To run all tests in the application, execute `make test`. This ensures that the tests from each package are not run in parallel. This is required since many packages contain tests that connect to the test database which is stored in memory and reset automatically for each package.
|
||||||
|
|
||||||
### Clients
|
|
||||||
|
|
||||||
The following _make_ commands are available to make it easy to connect to the database and cache.
|
|
||||||
|
|
||||||
- `make db`: Connects to the primary database
|
|
||||||
- `make db-test`: Connects to the test database
|
|
||||||
- `make cache`: Connects to the primary cache
|
|
||||||
- `make cache-test`: Connects to the test cache
|
|
||||||
|
|
||||||
## Service container
|
## Service container
|
||||||
|
|
||||||
@ -186,7 +171,6 @@ The container is located at `pkg/services/container.go` and is meant to house al
|
|||||||
- Configuration
|
- Configuration
|
||||||
- Cache
|
- Cache
|
||||||
- Database
|
- Database
|
||||||
- ORM
|
|
||||||
- Web
|
- Web
|
||||||
- Validator
|
- Validator
|
||||||
- Authentication
|
- Authentication
|
||||||
@ -198,7 +182,7 @@ A new container can be created and initialized via `services.NewContainer()`. It
|
|||||||
|
|
||||||
### Dependency injection
|
### Dependency injection
|
||||||
|
|
||||||
The container exists to faciliate easy dependency-injection both for services within the container as well as areas of your application that require any of these dependencies. For example, the container is automatically passed to the `Init()` method of your route handlers so that the handlers have full, easy access to all services.
|
The container exists to faciliate easy dependency-injection both for services within the container as well as areas of your application that require any of these dependencies. For example, the container is automatically passed to the `Init()` method of your route [handlers](#handlers) so that the handlers have full, easy access to all services.
|
||||||
|
|
||||||
### Test dependencies
|
### Test dependencies
|
||||||
|
|
||||||
@ -217,11 +201,11 @@ Leveraging the functionality of [viper](https://github.com/spf13/viper) to manag
|
|||||||
In `config/config.go`, the prefix is set as `pagoda` via `viper.SetEnvPrefix("pagoda")`. Nested fields require an underscore between levels. For example:
|
In `config/config.go`, the prefix is set as `pagoda` via `viper.SetEnvPrefix("pagoda")`. Nested fields require an underscore between levels. For example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cache:
|
http:
|
||||||
port: 1234
|
port: 1234
|
||||||
```
|
```
|
||||||
|
|
||||||
can be overridden by setting an environment variable with the name `PAGODA_CACHE_PORT`.
|
can be overridden by setting an environment variable with the name `PAGODA_HTTP_PORT`.
|
||||||
|
|
||||||
### Environments
|
### Environments
|
||||||
|
|
||||||
@ -251,55 +235,28 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
## Database
|
## Database
|
||||||
|
|
||||||
The database currently used is [PostgreSQL](https://www.postgresql.org/) but you are free to use whatever you prefer. If you plan to continue using [Ent](https://entgo.io/), the incredible ORM, you can check their supported databases [here](https://entgo.io/docs/dialects). The database-driver and client is provided by [pgx](https://github.com/jackc/pgx/tree/v4) and included in the `Container`.
|
The database currently used is [SQLite](https://sqlite.org/) but you are free to use whatever you prefer. The database driver is provided by [go-sqlite3](https://github.com/mattn/go-sqlite3). A reference to the database is included in the `Container` if direct access is required.
|
||||||
|
|
||||||
Database configuration can be found and managed within the `config` package.
|
Database configuration can be found and managed within the `config` package.
|
||||||
|
|
||||||
### Auto-migrations
|
### Auto-migrations
|
||||||
|
|
||||||
[Ent](https://entgo.io/) provides automatic migrations which are executed on the database whenever the `Container` is created, which means they will run when the application starts.
|
[Golang Migrate](github.com/golang-migrate/migrate) provides automati migrat-ns on the database whenever the application starts. Migrations are written in the `db/migrations` directory, which must be present at runtime in the container.
|
||||||
|
|
||||||
### Separate test database
|
### Separate test database
|
||||||
|
|
||||||
Since many tests can require a database, this application supports a separate database specifically for tests. Within the `config`, the test database name can be specified at `Config.Database.TestDatabase`.
|
Since many tests can require a database, this application supports a separate database specifically for tests. Within the `config`, the test database can be specified at `Config.Database.TestConnection`, which is the database connection string that will be used. By default, this will be an in-memory SQLite database.
|
||||||
|
|
||||||
When a `Container` is created, if the [environment](#environments) is set to `config.EnvTest`, the database client will connect to the test database instead, drop the database, recreate it, and run migrations so your tests start with a clean, ready-to-go database. Another benefit is that after the tests execute in a given package, you can connect to the test database to audit the data which can be useful for debugging.
|
When a `Container` is created, if the [environment](#environments) is set to `config.EnvTest`, the database client will connect to the test database instead and run migrations so your tests start with a clean, ready-to-go database.
|
||||||
|
|
||||||
## ORM
|
When this project was using Postgres, it would automatically drop and recreate the test database. Since the current default is in-memory, that is no longer needed. If you decide to use a test database not in-memory, you can alter the `Container` initialization code to do this for you.
|
||||||
|
|
||||||
As previously mentioned, [Ent](https://entgo.io/) is the supplied ORM. It can swapped out, but I highly recommend it. I don't think there is anything comparable for Go, at the current time. If you're not familiar with Ent, take a look through their top-notch [documentation](https://entgo.io/docs/getting-started).
|
## SQLC
|
||||||
|
|
||||||
An Ent client is included in the `Container` to provide easy access to the ORM throughout the application.
|
Database interactions are handled using [sqlc](https://sqlc.dev) by writing raw SQL queries in the `database/queries` directory and then generating the necessary boilerplate code with `make sqlc`. This creates functions available on the `DBClient.C` object for each written query.
|
||||||
|
|
||||||
Ent relies on code-generation for the entities you create to provide robust, type-safe data operations. Everything within the `ent` package in this repository is generated code for the two entity types listed below with the exception of the schema declaration.
|
If you want to group business logic for the database into functions beyond what can be performed in a single SQL query, you can create a sub-client, such as the example`DBUserClient` and attach methods which have access to the database.
|
||||||
|
|
||||||
### Entity types
|
|
||||||
|
|
||||||
The two included entity types are:
|
|
||||||
- User
|
|
||||||
- PasswordToken
|
|
||||||
|
|
||||||
### New entity type
|
|
||||||
|
|
||||||
While you should refer to their [documentation](https://entgo.io/docs/getting-started) for detailed usage, it's helpful to understand how to create an entity type and generate code. To make this easier, the `Makefile` contains some helpers.
|
|
||||||
|
|
||||||
1. Ensure all Ent code is downloaded by executing `make ent-install`.
|
|
||||||
2. Create the new entity type by executing `make ent-new name=User` where `User` is the name of the entity type. This will generate a file like you can see in `ent/schema/user.go` though the `Fields()` and `Edges()` will be left empty.
|
|
||||||
3. Populate the `Fields()` and optionally the `Edges()` (which are the relationships to other entity types).
|
|
||||||
4. When done, generate all code by executing `make ent-gen`.
|
|
||||||
|
|
||||||
The generated code is extremely flexible and impressive. An example to highlight this is one used within this application:
|
|
||||||
|
|
||||||
```go
|
|
||||||
entity, err := c.ORM.PasswordToken.
|
|
||||||
Query().
|
|
||||||
Where(passwordtoken.ID(tokenID)).
|
|
||||||
Where(passwordtoken.HasUserWith(user.ID(userID))).
|
|
||||||
Where(passwordtoken.CreatedAtGTE(expiration)).
|
|
||||||
Only(ctx.Request().Context())
|
|
||||||
```
|
|
||||||
|
|
||||||
This executes a database query to return the _password token_ entity with a given ID that belong to a user with a given ID and has a _created at_ timestamp field that is greater than or equal to a given time.
|
|
||||||
|
|
||||||
## Sessions
|
## Sessions
|
||||||
|
|
||||||
@ -397,13 +354,13 @@ A `Handler` is a simple type that handles one or more of your routes and allows
|
|||||||
|
|
||||||
The provided patterns are not required, but were designed to make development as easy as possible.
|
The provided patterns are not required, but were designed to make development as easy as possible.
|
||||||
|
|
||||||
For this example, we'll create a new handler which includes a GET and POST route and uses the ORM. Start by creating a file at `pkg/handlers/example.go`.
|
For this example, we'll create a new handler which includes a GET and POST route and uses the database. Start by creating a file at `pkg/handlers/example.go`.
|
||||||
|
|
||||||
1) Define the handler type:
|
1) Define the handler type:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
type Example struct {
|
type Example struct {
|
||||||
orm *ent.Client
|
db *services.DBClient
|
||||||
*services.TemplateRenderer
|
*services.TemplateRenderer
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -416,12 +373,12 @@ func init() {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
3) Initialize the handler (and inject any required dependencies from the _Container_)
|
3) Initialize the handler (and inject any required dependencies from the _Container_). This will be called automatically.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
func (e *Example) Init(c *services.Container) error {
|
func (e *Example) Init(c *services.Container) error {
|
||||||
e.TemplateRenderer = c.TemplateRenderer
|
e.TemplateRenderer = c.TemplateRenderer
|
||||||
e.orm = c.ORM
|
e.db = c.DB
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -453,7 +410,7 @@ func (e *Example) PageSubmit(ctx echo.Context) error {
|
|||||||
|
|
||||||
Routes can return errors to indicate that something wrong happened. Ideally, the error is of type `*echo.HTTPError` to indicate the intended HTTP response code. You can use `return echo.NewHTTPError(http.StatusInternalServerError)`, for example. If an error of a different type is returned, an _Internal Server Error_ is assumed.
|
Routes can return errors to indicate that something wrong happened. Ideally, the error is of type `*echo.HTTPError` to indicate the intended HTTP response code. You can use `return echo.NewHTTPError(http.StatusInternalServerError)`, for example. If an error of a different type is returned, an _Internal Server Error_ is assumed.
|
||||||
|
|
||||||
The [error handler](https://echo.labstack.com/guide/error-handling/) is set to a provided route `pkg/handlers/error.go` in the `BuildRouter()` function. That means that if any middleware or route return an error, the request gets routed there. This route conveniently constructs and renders a `Page` which uses the template `templates/pages/error.go`. The status code is passed to the template so you can easily alter the markup depending on the error type.
|
The [error handler](https://echo.labstack.com/guide/error-handling/) is set to the provided `Handler` in `pkg/handlers/error.go` in the `BuildRouter()` function. That means that if any middleware or route return an error, the request gets routed there. This route conveniently constructs and renders a `Page` which uses the template `templates/pages/error.gohtml`. The status code is passed to the template so you can easily alter the markup depending on the error type.
|
||||||
|
|
||||||
### Redirects
|
### Redirects
|
||||||
|
|
||||||
@ -926,13 +883,11 @@ To include additional custom functions, add to the map in `NewFuncMap()` and def
|
|||||||
|
|
||||||
## Cache
|
## Cache
|
||||||
|
|
||||||
As previously mentioned, [Redis](https://redis.io/) was chosen as the cache but it can be easily swapped out for something else. [go-redis](https://github.com/go-redis/redis) is used as the underlying client but the `Container` contains a custom client wrapper (`CacheClient`) that makes typical cache operations extremely simple. This wrapper does expose the [go-redis]() client however, at `CacheClient.Client`, in case you have a need for it.
|
As previously mentioned, the default cache implementation is a simple in-memory store, backed by [otter](https://github.com/maypok86/otter), a lockless cache that uses [S3-FIFO](https://s3fifo.com/) eviction. The `Container` houses a `CacheClient` which is a useful, wrapper to interact with the cache (see examples below). Within the `CacheClient` is the underlying store interface `CacheStore`. If you wish to use a different store, such as Redis, and want to keep using the `CacheClient`, simply implement the `CacheStore` interface with a Redis library and adjust the `Container` initialization to use that.
|
||||||
|
|
||||||
The cache functionality within the `CacheClient` is powered by [gocache](https://github.com/eko/gocache) which was chosen because it makes interfacing with the cache service much easier, and it provides a consistent interface if you were to use a cache backend other than Redis.
|
The built-in usage of the cache is currently only for optional [page caching](#cached-responses) and a simple example route located at `/cache` where you can set and view the value of a given cache entry.
|
||||||
|
|
||||||
The built-in usage of the cache is currently only for optional [page caching](#cached-responses) but it can be used for practically anything. See examples below:
|
Since the current cache is in-memory, there's no need to adjust the `Container` during tests. When this project used Redis, the configuration had a separate database that would be used strictly for tests to avoid writing to your primary database. If you need that functionality, it is easy to add back in.
|
||||||
|
|
||||||
Similar to how there is a separate [test database](#separate-test-database) to avoid writing to your primary database when running tests, the cache supports a separate database as well for tests. Within the `config`, the test database number can be specified at `Config.Cache.TestDatabase`. By default, the primary database is `0` and the test database is `1`.
|
|
||||||
|
|
||||||
### Set data
|
### Set data
|
||||||
|
|
||||||
@ -943,6 +898,7 @@ err := c.Cache.
|
|||||||
Set().
|
Set().
|
||||||
Key("my-key").
|
Key("my-key").
|
||||||
Data(myData).
|
Data(myData).
|
||||||
|
Expiration(time.Hour * 2).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -953,6 +909,7 @@ err := c.Cache.
|
|||||||
Set().
|
Set().
|
||||||
Group("my-group").
|
Group("my-group").
|
||||||
Key("my-key").
|
Key("my-key").
|
||||||
|
Expiration(time.Hour * 2).
|
||||||
Data(myData).
|
Data(myData).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
```
|
```
|
||||||
@ -964,16 +921,6 @@ err := c.Cache.
|
|||||||
Set().
|
Set().
|
||||||
Key("my-key").
|
Key("my-key").
|
||||||
Tags("tag1", "tag2").
|
Tags("tag1", "tag2").
|
||||||
Data(myData).
|
|
||||||
Save(ctx)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Include an expiration:**
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := c.Cache.
|
|
||||||
Set().
|
|
||||||
Key("my-key").
|
|
||||||
Expiration(time.Hour * 2).
|
Expiration(time.Hour * 2).
|
||||||
Data(myData).
|
Data(myData).
|
||||||
Save(ctx)
|
Save(ctx)
|
||||||
@ -986,12 +933,9 @@ data, err := c.Cache.
|
|||||||
Get().
|
Get().
|
||||||
Group("my-group").
|
Group("my-group").
|
||||||
Key("my-key").
|
Key("my-key").
|
||||||
Type(myType).
|
|
||||||
Fetch(ctx)
|
Fetch(ctx)
|
||||||
```
|
```
|
||||||
|
|
||||||
The `Type` method tells the cache what type of data you stored so it can be cast afterwards with: `result, ok := data.(myType)`
|
|
||||||
|
|
||||||
### Flush data
|
### Flush data
|
||||||
|
|
||||||
```go
|
```go
|
||||||
@ -1013,29 +957,62 @@ err := c.Cache.
|
|||||||
Execute(ctx)
|
Execute(ctx)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Tagging
|
||||||
|
|
||||||
|
As shown in the previous examples, cache tags were provided because they can be convenient. However, maintaining them comes at a cost and it may not be a good fit for your application depending on your needs. When including tags, the `CacheClient` must lock in order to keep the tag index in sync. And since the tag index cannot support eviction, since that could result in a flush call not actually flushing the tag's keys, the maps that provide the index do not have a size limit. See the code for more details.
|
||||||
|
|
||||||
## Tasks
|
## Tasks
|
||||||
|
|
||||||
Tasks are operations to be executed in the background, either in a queue, at a specfic time, after a given amount of time, or according to a periodic interval (like _cron_). Some examples of tasks could be long-running operations, bulk processing, cleanup, notifications, and so on.
|
Tasks are queued operations to be executed in the background, either immediately, at a specfic time, or after a given amount of time has passed. Some examples of tasks could be long-running operations, bulk processing, cleanup, notifications, etc.
|
||||||
|
|
||||||
Since we're already using [Redis](https://redis.io) as a _cache_, it's available to act as a message broker as well and handle the processing of queued tasks. [Asynq](https://github.com/hibiken/asynq) is the library chosen to interface with Redis and handle queueing tasks and processing them asynchronously with workers.
|
Since we're already using [SQLite](https://sqlite.org/) for our database, it's available to act as a persistent store for queued tasks so that tasks are never lost, can be retried until successful, and their concurrent execution can be managed. [Goqite](https://github.com/maragudk/goqite) is the library chosen to interface with [SQLite](https://sqlite.org/) and handle queueing tasks and processing them asynchronously.
|
||||||
|
|
||||||
To make things even easier, a custom client (`TaskClient`) is provided as a _Service_ on the `Container` which exposes a simple interface with [asynq](https://github.com/hibiken/asynq).
|
To make things even easier, a custom client (`TaskClient`) is provided as a _Service_ on the `Container` which exposes a simple interface with [goqite](https://github.com/maragudk/goqite) that supports type-safe tasks and queues.
|
||||||
|
|
||||||
For more detailed information about [asynq](https://github.com/hibiken/asynq) and it's usage, review the [wiki](https://github.com/hibiken/asynq/wiki).
|
|
||||||
|
|
||||||
### Queues
|
### Queues
|
||||||
|
|
||||||
All tasks must be placed in to queues in order to be executed by the [worker](#worker). You are not required to specify a queue when creating a task, as it will be placed in the default queue if one is not provided. [Asynq](https://github.com/hibiken/asynq) supports multiple queues which allows for functionality such as [prioritization](https://github.com/hibiken/asynq/wiki/Queue-Priority).
|
A full example of a queue implementation can be found in `pkg/tasks` with an interactive form to create a task and add to the queue at `/task` (see `pkg/handlers/task.go`).
|
||||||
|
|
||||||
Creating a queued task is easy and at the minimum only requires the name of the task:
|
A queue starts by declaring a `Task` _type_, which is the object that gets placed in to a queue and eventually passed to a queue subscriber (a callback function to process the task). A `Task` must implement the `Name()` method which returns a unique name for the task. For example:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
err := c.Tasks.
|
type MyTask struct {
|
||||||
New("my_task").
|
Text string
|
||||||
Save()
|
Num int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t MyTask) Name() string {
|
||||||
|
return "my_task"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
This will add a task to the _default_ queue with a task _type_ of `my_task`. The type is used to route the task to the correct [worker](#worker).
|
Then, create the queue for `MyTask` tasks:
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := services.NewQueue[MyTask](func(ctx context.Context, task MyTask) error {
|
||||||
|
// This is where you process the task
|
||||||
|
fmt.Println("Processed %s task!", task.Text)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
And finally, register the queue with the `TaskClient`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
c.Tasks.Register(q)
|
||||||
|
```
|
||||||
|
|
||||||
|
See `pkg/tasks/register.go` for a simple way to register all of your queues and to easily pass the `Container` to them so the queue subscriber callbacks have access to all of your app's dependencies.
|
||||||
|
|
||||||
|
Now you can easily add a task to the queue using the `TaskClient`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
task := MyTask{Text: "Hello world!", Num: 10}
|
||||||
|
|
||||||
|
err := c.Tasks.
|
||||||
|
New(task).
|
||||||
|
Save()
|
||||||
|
```
|
||||||
|
|
||||||
#### Options
|
#### Options
|
||||||
|
|
||||||
@ -1043,98 +1020,26 @@ Tasks can be created and queued with various chained options:
|
|||||||
|
|
||||||
```go
|
```go
|
||||||
err := c.Tasks.
|
err := c.Tasks.
|
||||||
New("my_task").
|
New(task).
|
||||||
Payload(taskData).
|
Wait(30 * time.Second). // Wait 30 seconds before passing the task to the subscriber
|
||||||
Queue("critical").
|
At(time.Date(...)). // Wait until a given date before passing the task to the subscriber
|
||||||
MaxRetries(5).
|
Tx(tx). // Include the queueing of this task in a database transaction
|
||||||
Timeout(30 * time.Second).
|
|
||||||
Wait(5 * time.Second).
|
|
||||||
Retain(2 * time.Hour).
|
|
||||||
Save()
|
Save()
|
||||||
```
|
```
|
||||||
|
|
||||||
In this example, this task will be:
|
### Runner
|
||||||
- Assigned a task type of `my_task`
|
|
||||||
- The task worker will be sent `taskData` as the payload
|
|
||||||
- Put in to the `critical` queue
|
|
||||||
- Be retried up to 5 times in the event of a failure
|
|
||||||
- Timeout after 30 seconds of execution
|
|
||||||
- Wait 5 seconds before execution starts
|
|
||||||
- Retain the task data in Redis for 2 hours after execution completes
|
|
||||||
|
|
||||||
### Scheduled tasks
|
The _task runner_ is what manages periodically polling the database for available queued tasks to process and passing them to the queue's subscriber callback. This must be started in order for this to happen. In `cmd/web/main.go`, the _task runner_ is started by using the `TaskClient`:
|
||||||
|
|
||||||
Tasks can be scheduled to execute at a single point in the future or at a periodic interval. These tasks can also use the options highlighted in the previous section.
|
|
||||||
|
|
||||||
**To execute a task once at a specific time:**
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
err := c.Tasks.
|
go c.Tasks.StartRunner(ctx)
|
||||||
New("my_task").
|
|
||||||
At(time.Date(2022, time.November, 10, 23, 0, 0, 0, time.UTC)).
|
|
||||||
Save()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**To execute a periodic task using a cron schedule:**
|
The app [configuration](#configuration) contains values to configure the runner including how often to poll the database for tasks, the maximum amount of retries for a given task, and the amount of tasks that can be processed concurrently.
|
||||||
|
|
||||||
```go
|
## Cron
|
||||||
err := c.Tasks.
|
|
||||||
New("my_task").
|
|
||||||
Periodic("*/10 * * * *")
|
|
||||||
Save()
|
|
||||||
```
|
|
||||||
|
|
||||||
**To execute a periodic task using a simple syntax:**
|
By default, no cron solution is provided because it's very easy to add yourself if you need this. You can either use a [ticker](https://pkg.go.dev/time#Ticker) or a [library](https://github.com/robfig/cron).
|
||||||
|
|
||||||
```go
|
|
||||||
err := c.Tasks.
|
|
||||||
New("my_task").
|
|
||||||
Periodic("@every 10m")
|
|
||||||
Save()
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Scheduler
|
|
||||||
|
|
||||||
A service needs to run in order to add periodic tasks to the queue at the specified intervals. When the application is started, this _scheduler_ service will also be started. In `cmd/web/main.go`, this is done with the following code:
|
|
||||||
|
|
||||||
```go
|
|
||||||
go func() {
|
|
||||||
if err := c.Tasks.StartScheduler(); err != nil {
|
|
||||||
log.Fatalf("scheduler shutdown: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
```
|
|
||||||
|
|
||||||
In the event of an application restart, periodic tasks must be re-registered with the _scheduler_ in order to continue being queued for execution.
|
|
||||||
|
|
||||||
### Worker
|
|
||||||
|
|
||||||
The worker is a service that executes the queued tasks using task processors. Included is a basic implementation of a separate worker service that will listen for and execute tasks being added to the queues. If you prefer to move the worker so it runs alongside the web server, you can do that, though it's recommended to keep these processes separate for performance and scalability reasons.
|
|
||||||
|
|
||||||
The underlying functionality of the worker service is provided by [asynq](https://github.com/hibiken/asynq), so it's highly recommended that you review the documentation for that project first.
|
|
||||||
|
|
||||||
#### Starting the worker
|
|
||||||
|
|
||||||
A make target was added to allow you to start the worker service easily. From the root of the repository, execute `make worker`.
|
|
||||||
|
|
||||||
#### Understanding the service
|
|
||||||
|
|
||||||
The worker service is located in [cmd/worker/main.go](/cmd/worker/main.go) and starts with the creation of a new `*asynq.Server` provided by `asynq.NewServer()`. There are various configuration options available, so be sure to review them all.
|
|
||||||
|
|
||||||
Prior to starting the service, we need to route tasks according to their _type_ to their handlers which will process the tasks. This is done by using `async.ServeMux` much like you would use an HTTP router:
|
|
||||||
|
|
||||||
```go
|
|
||||||
mux := asynq.NewServeMux()
|
|
||||||
mux.Handle(tasks.TypeExample, new(tasks.ExampleProcessor))
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example, all tasks of _type_ `tasks.TypeExample` will be routed to `ExampleProcessor` which is a struct that implements `ProcessTask()`. See the included [basic example](/pkg/tasks/example.go).
|
|
||||||
|
|
||||||
Finally, the service is started with `async.Server.Run(mux)`.
|
|
||||||
|
|
||||||
### Monitoring
|
|
||||||
|
|
||||||
[Asynq](https://github.com/hibiken/asynq) comes with two options to monitor your queues: 1) [Command-line tool](https://github.com/hibiken/asynq#command-line-tool) and 2) [Web UI](https://github.com/hibiken/asynqmon)
|
|
||||||
|
|
||||||
## Static files
|
## Static files
|
||||||
|
|
||||||
@ -1152,7 +1057,7 @@ While it's ideal to use cache control headers on your static files so browsers c
|
|||||||
|
|
||||||
For example, to render a file located in `static/picture.png`, you would use:
|
For example, to render a file located in `static/picture.png`, you would use:
|
||||||
```html
|
```html
|
||||||
<img src="{{File "picture.png"}}"/>
|
<img src="{{file "picture.png"}}"/>
|
||||||
```
|
```
|
||||||
|
|
||||||
Which would result in:
|
Which would result in:
|
||||||
@ -1266,22 +1171,19 @@ Future work includes but is not limited to:
|
|||||||
Thank you to all of the following amazing projects for making this possible.
|
Thank you to all of the following amazing projects for making this possible.
|
||||||
|
|
||||||
- [alpinejs](https://github.com/alpinejs/alpine)
|
- [alpinejs](https://github.com/alpinejs/alpine)
|
||||||
- [asynq](https://github.com/hibiken/asynq)
|
|
||||||
- [bulma](https://github.com/jgthms/bulma)
|
- [bulma](https://github.com/jgthms/bulma)
|
||||||
- [docker](https://www.docker.com/)
|
|
||||||
- [echo](https://github.com/labstack/echo)
|
- [echo](https://github.com/labstack/echo)
|
||||||
- [ent](https://github.com/ent/ent)
|
- [golang-migrate](https://github.com/golang-migrate/migrate)
|
||||||
- [go](https://go.dev/)
|
- [go](https://go.dev/)
|
||||||
- [gocache](https://github.com/eko/gocache)
|
- [go-sqlite3](https://github.com/mattn/go-sqlite3)
|
||||||
|
- [goqite](https://github.com/maragudk/goqite)
|
||||||
- [goquery](https://github.com/PuerkitoBio/goquery)
|
- [goquery](https://github.com/PuerkitoBio/goquery)
|
||||||
- [go-redis](https://github.com/go-redis/redis)
|
|
||||||
- [htmx](https://github.com/bigskysoftware/htmx)
|
- [htmx](https://github.com/bigskysoftware/htmx)
|
||||||
- [jwt](https://github.com/golang-jwt/jwt)
|
- [jwt](https://github.com/golang-jwt/jwt)
|
||||||
- [pgx](https://github.com/jackc/pgx)
|
- [otter](https://github.com/maypok86/otter)
|
||||||
- [postgresql](https://www.postgresql.org/)
|
|
||||||
- [redis](https://redis.io/)
|
|
||||||
- [sprig](https://github.com/Masterminds/sprig)
|
|
||||||
- [sessions](https://github.com/gorilla/sessions)
|
- [sessions](https://github.com/gorilla/sessions)
|
||||||
|
- [sprig](https://github.com/Masterminds/sprig)
|
||||||
|
- [sqlite](https://sqlite.org/)
|
||||||
- [testify](https://github.com/stretchr/testify)
|
- [testify](https://github.com/stretchr/testify)
|
||||||
- [validator](https://github.com/go-playground/validator)
|
- [validator](https://github.com/go-playground/validator)
|
||||||
- [viper](https://github.com/spf13/viper)
|
- [viper](https://github.com/spf13/viper)
|
||||||
|
@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -10,8 +11,9 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/handlers"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/handlers"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tasks"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -49,24 +51,25 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.Web.StartServer(&srv); err != http.ErrServerClosed {
|
if err := c.Web.StartServer(&srv); errors.Is(err, http.ErrServerClosed) {
|
||||||
log.Fatalf("shutting down the server: %v", err)
|
log.Fatalf("shutting down the server: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Start the scheduler service to queue periodic tasks
|
// Register all task queues
|
||||||
go func() {
|
tasks.Register(c)
|
||||||
if err := c.Tasks.StartScheduler(); err != nil {
|
|
||||||
log.Fatalf("scheduler shutdown: %v", err)
|
// Start the task runner to execute queued tasks
|
||||||
}
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
}()
|
go c.Tasks.StartRunner(ctx)
|
||||||
|
|
||||||
// Wait for interrupt signal to gracefully shut down the server with a timeout of 10 seconds.
|
// Wait for interrupt signal to gracefully shut down the server with a timeout of 10 seconds.
|
||||||
quit := make(chan os.Signal, 1)
|
quit := make(chan os.Signal, 1)
|
||||||
signal.Notify(quit, os.Interrupt)
|
signal.Notify(quit, os.Interrupt)
|
||||||
signal.Notify(quit, os.Kill)
|
signal.Notify(quit, os.Kill)
|
||||||
<-quit
|
<-quit
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
cancel()
|
||||||
|
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := c.Web.Shutdown(ctx); err != nil {
|
if err := c.Web.Shutdown(ctx); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tasks"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Load the configuration
|
|
||||||
cfg, err := config.GetConfig()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to load config: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the worker server
|
|
||||||
srv := asynq.NewServer(
|
|
||||||
asynq.RedisClientOpt{
|
|
||||||
Addr: fmt.Sprintf("%s:%d", cfg.Cache.Hostname, cfg.Cache.Port),
|
|
||||||
DB: cfg.Cache.Database,
|
|
||||||
Password: cfg.Cache.Password,
|
|
||||||
},
|
|
||||||
asynq.Config{
|
|
||||||
// See asynq.Config for all available options and explanation
|
|
||||||
Concurrency: 10,
|
|
||||||
Queues: map[string]int{
|
|
||||||
"critical": 6,
|
|
||||||
"default": 3,
|
|
||||||
"low": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Map task types to the handlers
|
|
||||||
mux := asynq.NewServeMux()
|
|
||||||
mux.Handle(tasks.TypeExample, new(tasks.ExampleProcessor))
|
|
||||||
|
|
||||||
// Start the worker server
|
|
||||||
if err := srv.Run(mux); err != nil {
|
|
||||||
log.Fatalf("could not run worker server: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -21,22 +21,19 @@ app:
|
|||||||
emailVerificationTokenExpiration: "12h"
|
emailVerificationTokenExpiration: "12h"
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
hostname: "localhost"
|
capacity: 100000
|
||||||
port: 6379
|
|
||||||
password: ""
|
|
||||||
database: 0
|
|
||||||
testDatabase: 1
|
|
||||||
expiration:
|
expiration:
|
||||||
staticFile: "4380h"
|
staticFile: "4380h"
|
||||||
page: "24h"
|
page: "24h"
|
||||||
|
|
||||||
database:
|
storage:
|
||||||
hostname: "localhost"
|
databaseFile: "data.db"
|
||||||
port: 5432
|
migrationsDir: db/migrations
|
||||||
user: "admin"
|
|
||||||
password: "admin"
|
tasks:
|
||||||
database: "app"
|
pollInterval: "1s"
|
||||||
testDatabase: "app_test"
|
maxRetries: 10
|
||||||
|
goroutines: 1
|
||||||
|
|
||||||
mail:
|
mail:
|
||||||
hostname: "localhost"
|
hostname: "localhost"
|
@ -2,6 +2,7 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -56,7 +57,8 @@ type (
|
|||||||
HTTP HTTPConfig
|
HTTP HTTPConfig
|
||||||
App AppConfig
|
App AppConfig
|
||||||
Cache CacheConfig
|
Cache CacheConfig
|
||||||
Database DatabaseConfig
|
Storage StorageConfig
|
||||||
|
Tasks TasksConfig
|
||||||
Mail MailConfig
|
Mail MailConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,25 +91,29 @@ type (
|
|||||||
|
|
||||||
// CacheConfig stores the cache configuration
|
// CacheConfig stores the cache configuration
|
||||||
CacheConfig struct {
|
CacheConfig struct {
|
||||||
Hostname string
|
Capacity int
|
||||||
Port uint16
|
|
||||||
Password string
|
|
||||||
Database int
|
|
||||||
TestDatabase int
|
|
||||||
Expiration struct {
|
Expiration struct {
|
||||||
StaticFile time.Duration
|
StaticFile time.Duration
|
||||||
Page time.Duration
|
Page time.Duration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DatabaseConfig stores the database configuration
|
// StorageConfig stores the storage configuration
|
||||||
DatabaseConfig struct {
|
StorageConfig struct {
|
||||||
Hostname string
|
// DatabaseFile is the path used to find the database file.
|
||||||
Port uint16
|
// It can be an absolute path, or a path relative to the config file location.
|
||||||
User string
|
DatabaseFile string
|
||||||
Password string
|
|
||||||
Database string
|
// MigrationsDir is the path used to find the migration files.
|
||||||
TestDatabase string
|
// It can be an absolute path, or a path relative to the config file location.
|
||||||
|
MigrationsDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TasksConfig stores the tasks configuration
|
||||||
|
TasksConfig struct {
|
||||||
|
PollInterval time.Duration
|
||||||
|
MaxRetries int
|
||||||
|
Goroutines int
|
||||||
}
|
}
|
||||||
|
|
||||||
// MailConfig stores the mail configuration
|
// MailConfig stores the mail configuration
|
||||||
@ -129,8 +135,8 @@ func GetConfig() (Config, error) {
|
|||||||
viper.SetConfigType("yaml")
|
viper.SetConfigType("yaml")
|
||||||
viper.AddConfigPath(".")
|
viper.AddConfigPath(".")
|
||||||
viper.AddConfigPath("config")
|
viper.AddConfigPath("config")
|
||||||
viper.AddConfigPath("../config")
|
viper.AddConfigPath("../")
|
||||||
viper.AddConfigPath("../../config")
|
viper.AddConfigPath("../../")
|
||||||
|
|
||||||
// Load env variables
|
// Load env variables
|
||||||
viper.SetEnvPrefix("pagoda")
|
viper.SetEnvPrefix("pagoda")
|
||||||
@ -145,5 +151,14 @@ func GetConfig() (Config, error) {
|
|||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
usedConfigFilePath := viper.GetViper().ConfigFileUsed()
|
||||||
|
configFileDir := filepath.Dir(usedConfigFilePath)
|
||||||
|
if !filepath.IsAbs(c.Storage.DatabaseFile) {
|
||||||
|
c.Storage.DatabaseFile = filepath.Join(configFileDir, c.Storage.DatabaseFile)
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(c.Storage.MigrationsDir) {
|
||||||
|
c.Storage.MigrationsDir = filepath.Join(configFileDir, c.Storage.MigrationsDir)
|
||||||
|
}
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
23
db/migrations/001_init.up.sql
Normal file
23
db/migrations/001_init.up.sql
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
CREATE TABLE password_tokens (
|
||||||
|
id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
hash text NOT NULL,
|
||||||
|
created_at text NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
password_token_user integer NOT NULL,
|
||||||
|
|
||||||
|
CONSTRAINT `password_tokens_users_user`
|
||||||
|
FOREIGN KEY (`password_token_user`)
|
||||||
|
REFERENCES `users` (`id`)
|
||||||
|
ON DELETE NO ACTION
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE users (
|
||||||
|
id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
name text NOT NULL,
|
||||||
|
email text NOT NULL,
|
||||||
|
password text NOT NULL,
|
||||||
|
verified integer NOT NULL DEFAULT 0,
|
||||||
|
created_at text NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE UNIQUE INDEX `users_email_key` ON `users` (`email`);
|
||||||
|
|
30
db/queries/password_tokens.sql
Normal file
30
db/queries/password_tokens.sql
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
-- name: GetAllPasswordTokensForUser :many
|
||||||
|
-- GetAllPasswordTokensForUser retrieves all password tokens without checking expiration.
|
||||||
|
SELECT * FROM password_tokens
|
||||||
|
WHERE password_token_user = ?;
|
||||||
|
|
||||||
|
-- name: GetValidPasswordToken :one
|
||||||
|
-- GetValidPasswordToken returns only valid password tokens for the provided
|
||||||
|
-- user, and only if the created_at time is greater than the provided time.
|
||||||
|
SELECT * FROM password_tokens
|
||||||
|
WHERE
|
||||||
|
id = ?
|
||||||
|
AND password_token_user = ?
|
||||||
|
AND datetime(created_at) > datetime(?)
|
||||||
|
LIMIT 1;
|
||||||
|
|
||||||
|
-- name: CreatePasswordToken :one
|
||||||
|
INSERT INTO password_tokens (
|
||||||
|
hash, password_token_user
|
||||||
|
) VALUES (
|
||||||
|
?, ?
|
||||||
|
) RETURNING *;
|
||||||
|
|
||||||
|
-- name: UpdatePasswordTokenCreatedAt :exec
|
||||||
|
UPDATE password_tokens
|
||||||
|
SET created_at = ?
|
||||||
|
WHERE id = ?;
|
||||||
|
|
||||||
|
-- name: DeletePasswordTokens :exec
|
||||||
|
DELETE FROM password_tokens
|
||||||
|
WHERE password_token_user = ?;
|
27
db/queries/users.sql
Normal file
27
db/queries/users.sql
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
-- name: GetUserByEmail :one
|
||||||
|
SELECT * FROM users
|
||||||
|
WHERE email = lower(?)
|
||||||
|
LIMIT 1;
|
||||||
|
|
||||||
|
-- name: GetUserByID :one
|
||||||
|
SELECT * FROM users
|
||||||
|
WHERE id = ?
|
||||||
|
LIMIT 1;
|
||||||
|
|
||||||
|
-- name: CreateUser :one
|
||||||
|
INSERT INTO users (
|
||||||
|
name, email, password
|
||||||
|
) VALUES (
|
||||||
|
?, ?, ?
|
||||||
|
) RETURNING *;
|
||||||
|
|
||||||
|
-- name: UpdateUserPassword :exec
|
||||||
|
UPDATE users
|
||||||
|
SET password = ?
|
||||||
|
WHERE id = ?;
|
||||||
|
|
||||||
|
-- name: UpdateUserSetVerified :exec
|
||||||
|
UPDATE users
|
||||||
|
SET verified = 1
|
||||||
|
WHERE email = ?;
|
||||||
|
|
@ -1,18 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
cache:
|
|
||||||
image: "redis:alpine"
|
|
||||||
container_name: pagoda_cache
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:6379:6379"
|
|
||||||
db:
|
|
||||||
# PG 16 is currently not supported https://github.com/ent/ent/issues/3750
|
|
||||||
image: postgres:15-alpine
|
|
||||||
container_name: pagoda_db
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:5432:5432"
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=admin
|
|
||||||
- POSTGRES_PASSWORD=admin
|
|
||||||
- POSTGRES_DB=app
|
|
517
ent/client.go
517
ent/client.go
@ -1,517 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent/migrate"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client is the client that holds all ent builders.
|
|
||||||
type Client struct {
|
|
||||||
config
|
|
||||||
// Schema is the client for creating, migrating and dropping schema.
|
|
||||||
Schema *migrate.Schema
|
|
||||||
// PasswordToken is the client for interacting with the PasswordToken builders.
|
|
||||||
PasswordToken *PasswordTokenClient
|
|
||||||
// User is the client for interacting with the User builders.
|
|
||||||
User *UserClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient creates a new client configured with the given options.
|
|
||||||
func NewClient(opts ...Option) *Client {
|
|
||||||
client := &Client{config: newConfig(opts...)}
|
|
||||||
client.init()
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) init() {
|
|
||||||
c.Schema = migrate.NewSchema(c.driver)
|
|
||||||
c.PasswordToken = NewPasswordTokenClient(c.config)
|
|
||||||
c.User = NewUserClient(c.config)
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// config is the configuration for the client and its builder.
|
|
||||||
config struct {
|
|
||||||
// driver used for executing database requests.
|
|
||||||
driver dialect.Driver
|
|
||||||
// debug enable a debug logging.
|
|
||||||
debug bool
|
|
||||||
// log used for logging on debug mode.
|
|
||||||
log func(...any)
|
|
||||||
// hooks to execute on mutations.
|
|
||||||
hooks *hooks
|
|
||||||
// interceptors to execute on queries.
|
|
||||||
inters *inters
|
|
||||||
}
|
|
||||||
// Option function to configure the client.
|
|
||||||
Option func(*config)
|
|
||||||
)
|
|
||||||
|
|
||||||
// newConfig creates a new config for the client.
|
|
||||||
func newConfig(opts ...Option) config {
|
|
||||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
|
||||||
cfg.options(opts...)
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// options applies the options on the config object.
|
|
||||||
func (c *config) options(opts ...Option) {
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(c)
|
|
||||||
}
|
|
||||||
if c.debug {
|
|
||||||
c.driver = dialect.Debug(c.driver, c.log)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug enables debug logging on the ent.Driver.
|
|
||||||
func Debug() Option {
|
|
||||||
return func(c *config) {
|
|
||||||
c.debug = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log sets the logging function for debug mode.
|
|
||||||
func Log(fn func(...any)) Option {
|
|
||||||
return func(c *config) {
|
|
||||||
c.log = fn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver configures the client driver.
|
|
||||||
func Driver(driver dialect.Driver) Option {
|
|
||||||
return func(c *config) {
|
|
||||||
c.driver = driver
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens a database/sql.DB specified by the driver name and
|
|
||||||
// the data source name, and returns a new client attached to it.
|
|
||||||
// Optional parameters can be added for configuring the client.
|
|
||||||
func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
|
|
||||||
switch driverName {
|
|
||||||
case dialect.MySQL, dialect.Postgres, dialect.SQLite:
|
|
||||||
drv, err := sql.Open(driverName, dataSourceName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewClient(append(options, Driver(drv))...), nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported driver: %q", driverName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
|
|
||||||
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
|
|
||||||
|
|
||||||
// Tx returns a new transactional client. The provided context
|
|
||||||
// is used until the transaction is committed or rolled back.
|
|
||||||
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
|
||||||
if _, ok := c.driver.(*txDriver); ok {
|
|
||||||
return nil, ErrTxStarted
|
|
||||||
}
|
|
||||||
tx, err := newTx(ctx, c.driver)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ent: starting a transaction: %w", err)
|
|
||||||
}
|
|
||||||
cfg := c.config
|
|
||||||
cfg.driver = tx
|
|
||||||
return &Tx{
|
|
||||||
ctx: ctx,
|
|
||||||
config: cfg,
|
|
||||||
PasswordToken: NewPasswordTokenClient(cfg),
|
|
||||||
User: NewUserClient(cfg),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeginTx returns a transactional client with specified options.
|
|
||||||
func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
|
|
||||||
if _, ok := c.driver.(*txDriver); ok {
|
|
||||||
return nil, errors.New("ent: cannot start a transaction within a transaction")
|
|
||||||
}
|
|
||||||
tx, err := c.driver.(interface {
|
|
||||||
BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
|
|
||||||
}).BeginTx(ctx, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("ent: starting a transaction: %w", err)
|
|
||||||
}
|
|
||||||
cfg := c.config
|
|
||||||
cfg.driver = &txDriver{tx: tx, drv: c.driver}
|
|
||||||
return &Tx{
|
|
||||||
ctx: ctx,
|
|
||||||
config: cfg,
|
|
||||||
PasswordToken: NewPasswordTokenClient(cfg),
|
|
||||||
User: NewUserClient(cfg),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
|
|
||||||
//
|
|
||||||
// client.Debug().
|
|
||||||
// PasswordToken.
|
|
||||||
// Query().
|
|
||||||
// Count(ctx)
|
|
||||||
func (c *Client) Debug() *Client {
|
|
||||||
if c.debug {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
cfg := c.config
|
|
||||||
cfg.driver = dialect.Debug(c.driver, c.log)
|
|
||||||
client := &Client{config: cfg}
|
|
||||||
client.init()
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the database connection and prevents new queries from starting.
|
|
||||||
func (c *Client) Close() error {
|
|
||||||
return c.driver.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use adds the mutation hooks to all the entity clients.
|
|
||||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
|
||||||
func (c *Client) Use(hooks ...Hook) {
|
|
||||||
c.PasswordToken.Use(hooks...)
|
|
||||||
c.User.Use(hooks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intercept adds the query interceptors to all the entity clients.
|
|
||||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
|
||||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
|
||||||
c.PasswordToken.Intercept(interceptors...)
|
|
||||||
c.User.Intercept(interceptors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutate implements the ent.Mutator interface.
|
|
||||||
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
switch m := m.(type) {
|
|
||||||
case *PasswordTokenMutation:
|
|
||||||
return c.PasswordToken.mutate(ctx, m)
|
|
||||||
case *UserMutation:
|
|
||||||
return c.User.mutate(ctx, m)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("ent: unknown mutation type %T", m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenClient is a client for the PasswordToken schema.
|
|
||||||
type PasswordTokenClient struct {
|
|
||||||
config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPasswordTokenClient returns a client for the PasswordToken from the given config.
|
|
||||||
func NewPasswordTokenClient(c config) *PasswordTokenClient {
|
|
||||||
return &PasswordTokenClient{config: c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use adds a list of mutation hooks to the hooks stack.
|
|
||||||
// A call to `Use(f, g, h)` equals to `passwordtoken.Hooks(f(g(h())))`.
|
|
||||||
func (c *PasswordTokenClient) Use(hooks ...Hook) {
|
|
||||||
c.hooks.PasswordToken = append(c.hooks.PasswordToken, hooks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
|
||||||
// A call to `Intercept(f, g, h)` equals to `passwordtoken.Intercept(f(g(h())))`.
|
|
||||||
func (c *PasswordTokenClient) Intercept(interceptors ...Interceptor) {
|
|
||||||
c.inters.PasswordToken = append(c.inters.PasswordToken, interceptors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create returns a builder for creating a PasswordToken entity.
|
|
||||||
func (c *PasswordTokenClient) Create() *PasswordTokenCreate {
|
|
||||||
mutation := newPasswordTokenMutation(c.config, OpCreate)
|
|
||||||
return &PasswordTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateBulk returns a builder for creating a bulk of PasswordToken entities.
|
|
||||||
func (c *PasswordTokenClient) CreateBulk(builders ...*PasswordTokenCreate) *PasswordTokenCreateBulk {
|
|
||||||
return &PasswordTokenCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
|
||||||
// a builder and applies setFunc on it.
|
|
||||||
func (c *PasswordTokenClient) MapCreateBulk(slice any, setFunc func(*PasswordTokenCreate, int)) *PasswordTokenCreateBulk {
|
|
||||||
rv := reflect.ValueOf(slice)
|
|
||||||
if rv.Kind() != reflect.Slice {
|
|
||||||
return &PasswordTokenCreateBulk{err: fmt.Errorf("calling to PasswordTokenClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
|
||||||
}
|
|
||||||
builders := make([]*PasswordTokenCreate, rv.Len())
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
builders[i] = c.Create()
|
|
||||||
setFunc(builders[i], i)
|
|
||||||
}
|
|
||||||
return &PasswordTokenCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns an update builder for PasswordToken.
|
|
||||||
func (c *PasswordTokenClient) Update() *PasswordTokenUpdate {
|
|
||||||
mutation := newPasswordTokenMutation(c.config, OpUpdate)
|
|
||||||
return &PasswordTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOne returns an update builder for the given entity.
|
|
||||||
func (c *PasswordTokenClient) UpdateOne(pt *PasswordToken) *PasswordTokenUpdateOne {
|
|
||||||
mutation := newPasswordTokenMutation(c.config, OpUpdateOne, withPasswordToken(pt))
|
|
||||||
return &PasswordTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOneID returns an update builder for the given id.
|
|
||||||
func (c *PasswordTokenClient) UpdateOneID(id int) *PasswordTokenUpdateOne {
|
|
||||||
mutation := newPasswordTokenMutation(c.config, OpUpdateOne, withPasswordTokenID(id))
|
|
||||||
return &PasswordTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete returns a delete builder for PasswordToken.
|
|
||||||
func (c *PasswordTokenClient) Delete() *PasswordTokenDelete {
|
|
||||||
mutation := newPasswordTokenMutation(c.config, OpDelete)
|
|
||||||
return &PasswordTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteOne returns a builder for deleting the given entity.
|
|
||||||
func (c *PasswordTokenClient) DeleteOne(pt *PasswordToken) *PasswordTokenDeleteOne {
|
|
||||||
return c.DeleteOneID(pt.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
|
||||||
func (c *PasswordTokenClient) DeleteOneID(id int) *PasswordTokenDeleteOne {
|
|
||||||
builder := c.Delete().Where(passwordtoken.ID(id))
|
|
||||||
builder.mutation.id = &id
|
|
||||||
builder.mutation.op = OpDeleteOne
|
|
||||||
return &PasswordTokenDeleteOne{builder}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns a query builder for PasswordToken.
|
|
||||||
func (c *PasswordTokenClient) Query() *PasswordTokenQuery {
|
|
||||||
return &PasswordTokenQuery{
|
|
||||||
config: c.config,
|
|
||||||
ctx: &QueryContext{Type: TypePasswordToken},
|
|
||||||
inters: c.Interceptors(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a PasswordToken entity by its id.
|
|
||||||
func (c *PasswordTokenClient) Get(ctx context.Context, id int) (*PasswordToken, error) {
|
|
||||||
return c.Query().Where(passwordtoken.ID(id)).Only(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetX is like Get, but panics if an error occurs.
|
|
||||||
func (c *PasswordTokenClient) GetX(ctx context.Context, id int) *PasswordToken {
|
|
||||||
obj, err := c.Get(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryUser queries the user edge of a PasswordToken.
|
|
||||||
func (c *PasswordTokenClient) QueryUser(pt *PasswordToken) *UserQuery {
|
|
||||||
query := (&UserClient{config: c.config}).Query()
|
|
||||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
|
||||||
id := pt.ID
|
|
||||||
step := sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(passwordtoken.Table, passwordtoken.FieldID, id),
|
|
||||||
sqlgraph.To(user.Table, user.FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.M2O, false, passwordtoken.UserTable, passwordtoken.UserColumn),
|
|
||||||
)
|
|
||||||
fromV = sqlgraph.Neighbors(pt.driver.Dialect(), step)
|
|
||||||
return fromV, nil
|
|
||||||
}
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hooks returns the client hooks.
|
|
||||||
func (c *PasswordTokenClient) Hooks() []Hook {
|
|
||||||
return c.hooks.PasswordToken
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interceptors returns the client interceptors.
|
|
||||||
func (c *PasswordTokenClient) Interceptors() []Interceptor {
|
|
||||||
return c.inters.PasswordToken
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *PasswordTokenClient) mutate(ctx context.Context, m *PasswordTokenMutation) (Value, error) {
|
|
||||||
switch m.Op() {
|
|
||||||
case OpCreate:
|
|
||||||
return (&PasswordTokenCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpUpdate:
|
|
||||||
return (&PasswordTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpUpdateOne:
|
|
||||||
return (&PasswordTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpDelete, OpDeleteOne:
|
|
||||||
return (&PasswordTokenDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("ent: unknown PasswordToken mutation op: %q", m.Op())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserClient is a client for the User schema.
|
|
||||||
type UserClient struct {
|
|
||||||
config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUserClient returns a client for the User from the given config.
|
|
||||||
func NewUserClient(c config) *UserClient {
|
|
||||||
return &UserClient{config: c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use adds a list of mutation hooks to the hooks stack.
|
|
||||||
// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`.
|
|
||||||
func (c *UserClient) Use(hooks ...Hook) {
|
|
||||||
c.hooks.User = append(c.hooks.User, hooks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
|
||||||
// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`.
|
|
||||||
func (c *UserClient) Intercept(interceptors ...Interceptor) {
|
|
||||||
c.inters.User = append(c.inters.User, interceptors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create returns a builder for creating a User entity.
|
|
||||||
func (c *UserClient) Create() *UserCreate {
|
|
||||||
mutation := newUserMutation(c.config, OpCreate)
|
|
||||||
return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateBulk returns a builder for creating a bulk of User entities.
|
|
||||||
func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk {
|
|
||||||
return &UserCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
|
||||||
// a builder and applies setFunc on it.
|
|
||||||
func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk {
|
|
||||||
rv := reflect.ValueOf(slice)
|
|
||||||
if rv.Kind() != reflect.Slice {
|
|
||||||
return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
|
||||||
}
|
|
||||||
builders := make([]*UserCreate, rv.Len())
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
builders[i] = c.Create()
|
|
||||||
setFunc(builders[i], i)
|
|
||||||
}
|
|
||||||
return &UserCreateBulk{config: c.config, builders: builders}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns an update builder for User.
|
|
||||||
func (c *UserClient) Update() *UserUpdate {
|
|
||||||
mutation := newUserMutation(c.config, OpUpdate)
|
|
||||||
return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOne returns an update builder for the given entity.
|
|
||||||
func (c *UserClient) UpdateOne(u *User) *UserUpdateOne {
|
|
||||||
mutation := newUserMutation(c.config, OpUpdateOne, withUser(u))
|
|
||||||
return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOneID returns an update builder for the given id.
|
|
||||||
func (c *UserClient) UpdateOneID(id int) *UserUpdateOne {
|
|
||||||
mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id))
|
|
||||||
return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete returns a delete builder for User.
|
|
||||||
func (c *UserClient) Delete() *UserDelete {
|
|
||||||
mutation := newUserMutation(c.config, OpDelete)
|
|
||||||
return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteOne returns a builder for deleting the given entity.
|
|
||||||
func (c *UserClient) DeleteOne(u *User) *UserDeleteOne {
|
|
||||||
return c.DeleteOneID(u.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
|
||||||
func (c *UserClient) DeleteOneID(id int) *UserDeleteOne {
|
|
||||||
builder := c.Delete().Where(user.ID(id))
|
|
||||||
builder.mutation.id = &id
|
|
||||||
builder.mutation.op = OpDeleteOne
|
|
||||||
return &UserDeleteOne{builder}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns a query builder for User.
|
|
||||||
func (c *UserClient) Query() *UserQuery {
|
|
||||||
return &UserQuery{
|
|
||||||
config: c.config,
|
|
||||||
ctx: &QueryContext{Type: TypeUser},
|
|
||||||
inters: c.Interceptors(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a User entity by its id.
|
|
||||||
func (c *UserClient) Get(ctx context.Context, id int) (*User, error) {
|
|
||||||
return c.Query().Where(user.ID(id)).Only(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetX is like Get, but panics if an error occurs.
|
|
||||||
func (c *UserClient) GetX(ctx context.Context, id int) *User {
|
|
||||||
obj, err := c.Get(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryOwner queries the owner edge of a User.
|
|
||||||
func (c *UserClient) QueryOwner(u *User) *PasswordTokenQuery {
|
|
||||||
query := (&PasswordTokenClient{config: c.config}).Query()
|
|
||||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
|
||||||
id := u.ID
|
|
||||||
step := sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(user.Table, user.FieldID, id),
|
|
||||||
sqlgraph.To(passwordtoken.Table, passwordtoken.FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.O2M, true, user.OwnerTable, user.OwnerColumn),
|
|
||||||
)
|
|
||||||
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
|
|
||||||
return fromV, nil
|
|
||||||
}
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hooks returns the client hooks.
|
|
||||||
func (c *UserClient) Hooks() []Hook {
|
|
||||||
hooks := c.hooks.User
|
|
||||||
return append(hooks[:len(hooks):len(hooks)], user.Hooks[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interceptors returns the client interceptors.
|
|
||||||
func (c *UserClient) Interceptors() []Interceptor {
|
|
||||||
return c.inters.User
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) {
|
|
||||||
switch m.Op() {
|
|
||||||
case OpCreate:
|
|
||||||
return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpUpdate:
|
|
||||||
return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpUpdateOne:
|
|
||||||
return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
|
||||||
case OpDelete, OpDeleteOne:
|
|
||||||
return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hooks and interceptors per client, for fast access.
|
|
||||||
type (
|
|
||||||
hooks struct {
|
|
||||||
PasswordToken, User []ent.Hook
|
|
||||||
}
|
|
||||||
inters struct {
|
|
||||||
PasswordToken, User []ent.Interceptor
|
|
||||||
}
|
|
||||||
)
|
|
610
ent/ent.go
610
ent/ent.go
@ -1,610 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ent aliases to avoid import conflicts in user's code.
|
|
||||||
type (
|
|
||||||
Op = ent.Op
|
|
||||||
Hook = ent.Hook
|
|
||||||
Value = ent.Value
|
|
||||||
Query = ent.Query
|
|
||||||
QueryContext = ent.QueryContext
|
|
||||||
Querier = ent.Querier
|
|
||||||
QuerierFunc = ent.QuerierFunc
|
|
||||||
Interceptor = ent.Interceptor
|
|
||||||
InterceptFunc = ent.InterceptFunc
|
|
||||||
Traverser = ent.Traverser
|
|
||||||
TraverseFunc = ent.TraverseFunc
|
|
||||||
Policy = ent.Policy
|
|
||||||
Mutator = ent.Mutator
|
|
||||||
Mutation = ent.Mutation
|
|
||||||
MutateFunc = ent.MutateFunc
|
|
||||||
)
|
|
||||||
|
|
||||||
type clientCtxKey struct{}
|
|
||||||
|
|
||||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
|
||||||
func FromContext(ctx context.Context) *Client {
|
|
||||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContext returns a new context with the given Client attached.
|
|
||||||
func NewContext(parent context.Context, c *Client) context.Context {
|
|
||||||
return context.WithValue(parent, clientCtxKey{}, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
type txCtxKey struct{}
|
|
||||||
|
|
||||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
|
||||||
func TxFromContext(ctx context.Context) *Tx {
|
|
||||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
|
||||||
return tx
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTxContext returns a new context with the given Tx attached.
|
|
||||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
|
||||||
return context.WithValue(parent, txCtxKey{}, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderFunc applies an ordering on the sql selector.
|
|
||||||
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
|
||||||
type OrderFunc func(*sql.Selector)
|
|
||||||
|
|
||||||
var (
|
|
||||||
initCheck sync.Once
|
|
||||||
columnCheck sql.ColumnCheck
|
|
||||||
)
|
|
||||||
|
|
||||||
// columnChecker checks if the column exists in the given table.
|
|
||||||
func checkColumn(table, column string) error {
|
|
||||||
initCheck.Do(func() {
|
|
||||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
|
||||||
passwordtoken.Table: passwordtoken.ValidColumn,
|
|
||||||
user.Table: user.ValidColumn,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
return columnCheck(table, column)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asc applies the given fields in ASC order.
|
|
||||||
func Asc(fields ...string) func(*sql.Selector) {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
for _, f := range fields {
|
|
||||||
if err := checkColumn(s.TableName(), f); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
}
|
|
||||||
s.OrderBy(sql.Asc(s.C(f)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Desc applies the given fields in DESC order.
|
|
||||||
func Desc(fields ...string) func(*sql.Selector) {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
for _, f := range fields {
|
|
||||||
if err := checkColumn(s.TableName(), f); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
}
|
|
||||||
s.OrderBy(sql.Desc(s.C(f)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
|
||||||
type AggregateFunc func(*sql.Selector) string
|
|
||||||
|
|
||||||
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
|
||||||
//
|
|
||||||
// GroupBy(field1, field2).
|
|
||||||
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
return sql.As(fn(s), end)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count applies the "count" aggregation function on each group.
|
|
||||||
func Count() AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
return sql.Count("*")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max applies the "max" aggregation function on the given field of each group.
|
|
||||||
func Max(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Max(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
|
||||||
func Mean(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Avg(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min applies the "min" aggregation function on the given field of each group.
|
|
||||||
func Min(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Min(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
|
||||||
func Sum(field string) AggregateFunc {
|
|
||||||
return func(s *sql.Selector) string {
|
|
||||||
if err := checkColumn(s.TableName(), field); err != nil {
|
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sql.Sum(s.C(field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidationError returns when validating a field or edge fails.
|
|
||||||
type ValidationError struct {
|
|
||||||
Name string // Field or edge name.
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *ValidationError) Error() string {
|
|
||||||
return e.err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap implements the errors.Wrapper interface.
|
|
||||||
func (e *ValidationError) Unwrap() error {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidationError returns a boolean indicating whether the error is a validation error.
|
|
||||||
func IsValidationError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *ValidationError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
|
|
||||||
type NotFoundError struct {
|
|
||||||
label string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *NotFoundError) Error() string {
|
|
||||||
return "ent: " + e.label + " not found"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNotFound returns a boolean indicating whether the error is a not found error.
|
|
||||||
func IsNotFound(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *NotFoundError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaskNotFound masks not found error.
|
|
||||||
func MaskNotFound(err error) error {
|
|
||||||
if IsNotFound(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
|
|
||||||
type NotSingularError struct {
|
|
||||||
label string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *NotSingularError) Error() string {
|
|
||||||
return "ent: " + e.label + " not singular"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
|
|
||||||
func IsNotSingular(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *NotSingularError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotLoadedError returns when trying to get a node that was not loaded by the query.
|
|
||||||
type NotLoadedError struct {
|
|
||||||
edge string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e *NotLoadedError) Error() string {
|
|
||||||
return "ent: " + e.edge + " edge was not loaded"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
|
|
||||||
func IsNotLoaded(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *NotLoadedError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConstraintError returns when trying to create/update one or more entities and
|
|
||||||
// one or more of their constraints failed. For example, violation of edge or
|
|
||||||
// field uniqueness.
|
|
||||||
type ConstraintError struct {
|
|
||||||
msg string
|
|
||||||
wrap error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e ConstraintError) Error() string {
|
|
||||||
return "ent: constraint failed: " + e.msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap implements the errors.Wrapper interface.
|
|
||||||
func (e *ConstraintError) Unwrap() error {
|
|
||||||
return e.wrap
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
|
|
||||||
func IsConstraintError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var e *ConstraintError
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// selector embedded by the different Select/GroupBy builders.
|
|
||||||
type selector struct {
|
|
||||||
label string
|
|
||||||
flds *[]string
|
|
||||||
fns []AggregateFunc
|
|
||||||
scan func(context.Context, any) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanX is like Scan, but panics if an error occurs.
|
|
||||||
func (s *selector) ScanX(ctx context.Context, v any) {
|
|
||||||
if err := s.scan(ctx, v); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Strings(ctx context.Context) ([]string, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []string
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringsX is like Strings, but panics if an error occurs.
|
|
||||||
func (s *selector) StringsX(ctx context.Context) []string {
|
|
||||||
v, err := s.Strings(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a single string from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) String(ctx context.Context) (_ string, err error) {
|
|
||||||
var v []string
|
|
||||||
if v, err = s.Strings(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringX is like String, but panics if an error occurs.
|
|
||||||
func (s *selector) StringX(ctx context.Context) string {
|
|
||||||
v, err := s.String(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Ints(ctx context.Context) ([]int, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []int
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntsX is like Ints, but panics if an error occurs.
|
|
||||||
func (s *selector) IntsX(ctx context.Context) []int {
|
|
||||||
v, err := s.Ints(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int returns a single int from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Int(ctx context.Context) (_ int, err error) {
|
|
||||||
var v []int
|
|
||||||
if v, err = s.Ints(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntX is like Int, but panics if an error occurs.
|
|
||||||
func (s *selector) IntX(ctx context.Context) int {
|
|
||||||
v, err := s.Int(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []float64
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64sX is like Float64s, but panics if an error occurs.
|
|
||||||
func (s *selector) Float64sX(ctx context.Context) []float64 {
|
|
||||||
v, err := s.Float64s(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
|
|
||||||
var v []float64
|
|
||||||
if v, err = s.Float64s(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64X is like Float64, but panics if an error occurs.
|
|
||||||
func (s *selector) Float64X(ctx context.Context) float64 {
|
|
||||||
v, err := s.Float64(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
|
|
||||||
if len(*s.flds) > 1 {
|
|
||||||
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
|
|
||||||
}
|
|
||||||
var v []bool
|
|
||||||
if err := s.scan(ctx, &v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolsX is like Bools, but panics if an error occurs.
|
|
||||||
func (s *selector) BoolsX(ctx context.Context) []bool {
|
|
||||||
v, err := s.Bools(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
|
|
||||||
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
|
|
||||||
var v []bool
|
|
||||||
if v, err = s.Bools(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(v) {
|
|
||||||
case 1:
|
|
||||||
return v[0], nil
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{s.label}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolX is like Bool, but panics if an error occurs.
|
|
||||||
func (s *selector) BoolX(ctx context.Context) bool {
|
|
||||||
v, err := s.Bool(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// withHooks invokes the builder operation with the given hooks, if any.
|
|
||||||
func withHooks[V Value, M any, PM interface {
|
|
||||||
*M
|
|
||||||
Mutation
|
|
||||||
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
|
|
||||||
if len(hooks) == 0 {
|
|
||||||
return exec(ctx)
|
|
||||||
}
|
|
||||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
mutationT, ok := any(m).(PM)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
|
||||||
}
|
|
||||||
// Set the mutation to the builder.
|
|
||||||
*mutation = *mutationT
|
|
||||||
return exec(ctx)
|
|
||||||
})
|
|
||||||
for i := len(hooks) - 1; i >= 0; i-- {
|
|
||||||
if hooks[i] == nil {
|
|
||||||
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
|
||||||
}
|
|
||||||
mut = hooks[i](mut)
|
|
||||||
}
|
|
||||||
v, err := mut.Mutate(ctx, mutation)
|
|
||||||
if err != nil {
|
|
||||||
return value, err
|
|
||||||
}
|
|
||||||
nv, ok := v.(V)
|
|
||||||
if !ok {
|
|
||||||
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
|
|
||||||
}
|
|
||||||
return nv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
|
|
||||||
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
|
|
||||||
if ent.QueryFromContext(ctx) == nil {
|
|
||||||
qc.Op = op
|
|
||||||
ctx = ent.NewQueryContext(ctx, qc)
|
|
||||||
}
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func querierAll[V Value, Q interface {
|
|
||||||
sqlAll(context.Context, ...queryHook) (V, error)
|
|
||||||
}]() Querier {
|
|
||||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
|
||||||
query, ok := q.(Q)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
|
||||||
}
|
|
||||||
return query.sqlAll(ctx)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func querierCount[Q interface {
|
|
||||||
sqlCount(context.Context) (int, error)
|
|
||||||
}]() Querier {
|
|
||||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
|
||||||
query, ok := q.(Q)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
|
||||||
}
|
|
||||||
return query.sqlCount(ctx)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
|
|
||||||
for i := len(inters) - 1; i >= 0; i-- {
|
|
||||||
qr = inters[i].Intercept(qr)
|
|
||||||
}
|
|
||||||
rv, err := qr.Query(ctx, q)
|
|
||||||
if err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
vt, ok := rv.(V)
|
|
||||||
if !ok {
|
|
||||||
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
|
|
||||||
}
|
|
||||||
return vt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
|
|
||||||
sqlScan(context.Context, Q1, any) error
|
|
||||||
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
|
||||||
query, ok := q.(Q1)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
|
||||||
}
|
|
||||||
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
|
|
||||||
return rv.Elem().Interface(), nil
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
})
|
|
||||||
for i := len(inters) - 1; i >= 0; i-- {
|
|
||||||
qr = inters[i].Intercept(qr)
|
|
||||||
}
|
|
||||||
vv, err := qr.Query(ctx, rootQuery)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch rv2 := reflect.ValueOf(vv); {
|
|
||||||
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
|
|
||||||
case rv.Type() == rv2.Type():
|
|
||||||
rv.Elem().Set(rv2.Elem())
|
|
||||||
case rv.Elem().Type() == rv2.Type():
|
|
||||||
rv.Elem().Set(rv2)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryHook describes an internal hook for the different sqlAll methods.
|
|
||||||
type queryHook func(context.Context, *sqlgraph.QuerySpec)
|
|
@ -1,84 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package enttest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
// required by schema hooks.
|
|
||||||
_ "github.com/mikestefanello/pagoda/ent/runtime"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/migrate"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// TestingT is the interface that is shared between
|
|
||||||
// testing.T and testing.B and used by enttest.
|
|
||||||
TestingT interface {
|
|
||||||
FailNow()
|
|
||||||
Error(...any)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option configures client creation.
|
|
||||||
Option func(*options)
|
|
||||||
|
|
||||||
options struct {
|
|
||||||
opts []ent.Option
|
|
||||||
migrateOpts []schema.MigrateOption
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithOptions forwards options to client creation.
|
|
||||||
func WithOptions(opts ...ent.Option) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.opts = append(o.opts, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMigrateOptions forwards options to auto migration.
|
|
||||||
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.migrateOpts = append(o.migrateOpts, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOptions(opts []Option) *options {
|
|
||||||
o := &options{}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(o)
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open calls ent.Open and auto-run migration.
|
|
||||||
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
|
|
||||||
o := newOptions(opts)
|
|
||||||
c, err := ent.Open(driverName, dataSourceName, o.opts...)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
migrateSchema(t, c, o)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient calls ent.NewClient and auto-run migration.
|
|
||||||
func NewClient(t TestingT, opts ...Option) *ent.Client {
|
|
||||||
o := newOptions(opts)
|
|
||||||
c := ent.NewClient(o.opts...)
|
|
||||||
migrateSchema(t, c, o)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
func migrateSchema(t TestingT, c *ent.Client, o *options) {
|
|
||||||
tables, err := schema.CopyTables(migrate.Tables)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,3 +0,0 @@
|
|||||||
package ent
|
|
||||||
|
|
||||||
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema
|
|
211
ent/hook/hook.go
211
ent/hook/hook.go
@ -1,211 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package hook
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The PasswordTokenFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as PasswordToken mutator.
|
|
||||||
type PasswordTokenFunc func(context.Context, *ent.PasswordTokenMutation) (ent.Value, error)
|
|
||||||
|
|
||||||
// Mutate calls f(ctx, m).
|
|
||||||
func (f PasswordTokenFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
|
||||||
if mv, ok := m.(*ent.PasswordTokenMutation); ok {
|
|
||||||
return f(ctx, mv)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PasswordTokenMutation", m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The UserFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as User mutator.
|
|
||||||
type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error)
|
|
||||||
|
|
||||||
// Mutate calls f(ctx, m).
|
|
||||||
func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
|
||||||
if mv, ok := m.(*ent.UserMutation); ok {
|
|
||||||
return f(ctx, mv)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Condition is a hook condition function.
|
|
||||||
type Condition func(context.Context, ent.Mutation) bool
|
|
||||||
|
|
||||||
// And groups conditions with the AND operator.
|
|
||||||
func And(first, second Condition, rest ...Condition) Condition {
|
|
||||||
return func(ctx context.Context, m ent.Mutation) bool {
|
|
||||||
if !first(ctx, m) || !second(ctx, m) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, cond := range rest {
|
|
||||||
if !cond(ctx, m) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or groups conditions with the OR operator.
|
|
||||||
func Or(first, second Condition, rest ...Condition) Condition {
|
|
||||||
return func(ctx context.Context, m ent.Mutation) bool {
|
|
||||||
if first(ctx, m) || second(ctx, m) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, cond := range rest {
|
|
||||||
if cond(ctx, m) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not negates a given condition.
|
|
||||||
func Not(cond Condition) Condition {
|
|
||||||
return func(ctx context.Context, m ent.Mutation) bool {
|
|
||||||
return !cond(ctx, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasOp is a condition testing mutation operation.
|
|
||||||
func HasOp(op ent.Op) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
return m.Op().Is(op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasAddedFields is a condition validating `.AddedField` on fields.
|
|
||||||
func HasAddedFields(field string, fields ...string) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
if _, exists := m.AddedField(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
if _, exists := m.AddedField(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasClearedFields is a condition validating `.FieldCleared` on fields.
|
|
||||||
func HasClearedFields(field string, fields ...string) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
if exists := m.FieldCleared(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
if exists := m.FieldCleared(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasFields is a condition validating `.Field` on fields.
|
|
||||||
func HasFields(field string, fields ...string) Condition {
|
|
||||||
return func(_ context.Context, m ent.Mutation) bool {
|
|
||||||
if _, exists := m.Field(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
if _, exists := m.Field(field); !exists {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If executes the given hook under condition.
|
|
||||||
//
|
|
||||||
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
|
|
||||||
func If(hk ent.Hook, cond Condition) ent.Hook {
|
|
||||||
return func(next ent.Mutator) ent.Mutator {
|
|
||||||
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
|
||||||
if cond(ctx, m) {
|
|
||||||
return hk(next).Mutate(ctx, m)
|
|
||||||
}
|
|
||||||
return next.Mutate(ctx, m)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// On executes the given hook only for the given operation.
|
|
||||||
//
|
|
||||||
// hook.On(Log, ent.Delete|ent.Create)
|
|
||||||
func On(hk ent.Hook, op ent.Op) ent.Hook {
|
|
||||||
return If(hk, HasOp(op))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unless skips the given hook only for the given operation.
|
|
||||||
//
|
|
||||||
// hook.Unless(Log, ent.Update|ent.UpdateOne)
|
|
||||||
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
|
|
||||||
return If(hk, Not(HasOp(op)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FixedError is a hook returning a fixed error.
|
|
||||||
func FixedError(err error) ent.Hook {
|
|
||||||
return func(ent.Mutator) ent.Mutator {
|
|
||||||
return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) {
|
|
||||||
return nil, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject returns a hook that rejects all operations that match op.
|
|
||||||
//
|
|
||||||
// func (T) Hooks() []ent.Hook {
|
|
||||||
// return []ent.Hook{
|
|
||||||
// Reject(ent.Delete|ent.Update),
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
func Reject(op ent.Op) ent.Hook {
|
|
||||||
hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
|
|
||||||
return On(hk, op)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chain acts as a list of hooks and is effectively immutable.
|
|
||||||
// Once created, it will always hold the same set of hooks in the same order.
|
|
||||||
type Chain struct {
|
|
||||||
hooks []ent.Hook
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewChain creates a new chain of hooks.
|
|
||||||
func NewChain(hooks ...ent.Hook) Chain {
|
|
||||||
return Chain{append([]ent.Hook(nil), hooks...)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook chains the list of hooks and returns the final hook.
|
|
||||||
func (c Chain) Hook() ent.Hook {
|
|
||||||
return func(mutator ent.Mutator) ent.Mutator {
|
|
||||||
for i := len(c.hooks) - 1; i >= 0; i-- {
|
|
||||||
mutator = c.hooks[i](mutator)
|
|
||||||
}
|
|
||||||
return mutator
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append extends a chain, adding the specified hook
|
|
||||||
// as the last ones in the mutation flow.
|
|
||||||
func (c Chain) Append(hooks ...ent.Hook) Chain {
|
|
||||||
newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
|
|
||||||
newHooks = append(newHooks, c.hooks...)
|
|
||||||
newHooks = append(newHooks, hooks...)
|
|
||||||
return Chain{newHooks}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend extends a chain, adding the specified chain
|
|
||||||
// as the last ones in the mutation flow.
|
|
||||||
func (c Chain) Extend(chain Chain) Chain {
|
|
||||||
return c.Append(chain.hooks...)
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package migrate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// WithGlobalUniqueID sets the universal ids options to the migration.
|
|
||||||
// If this option is enabled, ent migration will allocate a 1<<32 range
|
|
||||||
// for the ids of each entity (table).
|
|
||||||
// Note that this option cannot be applied on tables that already exist.
|
|
||||||
WithGlobalUniqueID = schema.WithGlobalUniqueID
|
|
||||||
// WithDropColumn sets the drop column option to the migration.
|
|
||||||
// If this option is enabled, ent migration will drop old columns
|
|
||||||
// that were used for both fields and edges. This defaults to false.
|
|
||||||
WithDropColumn = schema.WithDropColumn
|
|
||||||
// WithDropIndex sets the drop index option to the migration.
|
|
||||||
// If this option is enabled, ent migration will drop old indexes
|
|
||||||
// that were defined in the schema. This defaults to false.
|
|
||||||
// Note that unique constraints are defined using `UNIQUE INDEX`,
|
|
||||||
// and therefore, it's recommended to enable this option to get more
|
|
||||||
// flexibility in the schema changes.
|
|
||||||
WithDropIndex = schema.WithDropIndex
|
|
||||||
// WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true.
|
|
||||||
WithForeignKeys = schema.WithForeignKeys
|
|
||||||
)
|
|
||||||
|
|
||||||
// Schema is the API for creating, migrating and dropping a schema.
|
|
||||||
type Schema struct {
|
|
||||||
drv dialect.Driver
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSchema creates a new schema client.
|
|
||||||
func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} }
|
|
||||||
|
|
||||||
// Create creates all schema resources.
|
|
||||||
func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error {
|
|
||||||
return Create(ctx, s, Tables, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates all table resources using the given schema driver.
|
|
||||||
func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error {
|
|
||||||
migrate, err := schema.NewMigrate(s.drv, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("ent/migrate: %w", err)
|
|
||||||
}
|
|
||||||
return migrate.Create(ctx, tables...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTo writes the schema changes to w instead of running them against the database.
|
|
||||||
//
|
|
||||||
// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
|
|
||||||
// log.Fatal(err)
|
|
||||||
// }
|
|
||||||
func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error {
|
|
||||||
return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...)
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package migrate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// PasswordTokensColumns holds the columns for the "password_tokens" table.
|
|
||||||
PasswordTokensColumns = []*schema.Column{
|
|
||||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
|
||||||
{Name: "hash", Type: field.TypeString},
|
|
||||||
{Name: "created_at", Type: field.TypeTime},
|
|
||||||
{Name: "password_token_user", Type: field.TypeInt},
|
|
||||||
}
|
|
||||||
// PasswordTokensTable holds the schema information for the "password_tokens" table.
|
|
||||||
PasswordTokensTable = &schema.Table{
|
|
||||||
Name: "password_tokens",
|
|
||||||
Columns: PasswordTokensColumns,
|
|
||||||
PrimaryKey: []*schema.Column{PasswordTokensColumns[0]},
|
|
||||||
ForeignKeys: []*schema.ForeignKey{
|
|
||||||
{
|
|
||||||
Symbol: "password_tokens_users_user",
|
|
||||||
Columns: []*schema.Column{PasswordTokensColumns[3]},
|
|
||||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
|
||||||
OnDelete: schema.NoAction,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// UsersColumns holds the columns for the "users" table.
|
|
||||||
UsersColumns = []*schema.Column{
|
|
||||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
|
||||||
{Name: "name", Type: field.TypeString},
|
|
||||||
{Name: "email", Type: field.TypeString, Unique: true},
|
|
||||||
{Name: "password", Type: field.TypeString},
|
|
||||||
{Name: "verified", Type: field.TypeBool, Default: false},
|
|
||||||
{Name: "created_at", Type: field.TypeTime},
|
|
||||||
}
|
|
||||||
// UsersTable holds the schema information for the "users" table.
|
|
||||||
UsersTable = &schema.Table{
|
|
||||||
Name: "users",
|
|
||||||
Columns: UsersColumns,
|
|
||||||
PrimaryKey: []*schema.Column{UsersColumns[0]},
|
|
||||||
}
|
|
||||||
// Tables holds all the tables in the schema.
|
|
||||||
Tables = []*schema.Table{
|
|
||||||
PasswordTokensTable,
|
|
||||||
UsersTable,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
PasswordTokensTable.ForeignKeys[0].RefTable = UsersTable
|
|
||||||
}
|
|
1112
ent/mutation.go
1112
ent/mutation.go
File diff suppressed because it is too large
Load Diff
@ -1,157 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordToken is the model entity for the PasswordToken schema.
|
|
||||||
type PasswordToken struct {
|
|
||||||
config `json:"-"`
|
|
||||||
// ID of the ent.
|
|
||||||
ID int `json:"id,omitempty"`
|
|
||||||
// Hash holds the value of the "hash" field.
|
|
||||||
Hash string `json:"-"`
|
|
||||||
// CreatedAt holds the value of the "created_at" field.
|
|
||||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
|
||||||
// The values are being populated by the PasswordTokenQuery when eager-loading is set.
|
|
||||||
Edges PasswordTokenEdges `json:"edges"`
|
|
||||||
password_token_user *int
|
|
||||||
selectValues sql.SelectValues
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenEdges holds the relations/edges for other nodes in the graph.
|
|
||||||
type PasswordTokenEdges struct {
|
|
||||||
// User holds the value of the user edge.
|
|
||||||
User *User `json:"user,omitempty"`
|
|
||||||
// loadedTypes holds the information for reporting if a
|
|
||||||
// type was loaded (or requested) in eager-loading or not.
|
|
||||||
loadedTypes [1]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserOrErr returns the User value or an error if the edge
|
|
||||||
// was not loaded in eager-loading, or loaded but was not found.
|
|
||||||
func (e PasswordTokenEdges) UserOrErr() (*User, error) {
|
|
||||||
if e.loadedTypes[0] {
|
|
||||||
if e.User == nil {
|
|
||||||
// Edge was loaded but was not found.
|
|
||||||
return nil, &NotFoundError{label: user.Label}
|
|
||||||
}
|
|
||||||
return e.User, nil
|
|
||||||
}
|
|
||||||
return nil, &NotLoadedError{edge: "user"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanValues returns the types for scanning values from sql.Rows.
|
|
||||||
func (*PasswordToken) scanValues(columns []string) ([]any, error) {
|
|
||||||
values := make([]any, len(columns))
|
|
||||||
for i := range columns {
|
|
||||||
switch columns[i] {
|
|
||||||
case passwordtoken.FieldID:
|
|
||||||
values[i] = new(sql.NullInt64)
|
|
||||||
case passwordtoken.FieldHash:
|
|
||||||
values[i] = new(sql.NullString)
|
|
||||||
case passwordtoken.FieldCreatedAt:
|
|
||||||
values[i] = new(sql.NullTime)
|
|
||||||
case passwordtoken.ForeignKeys[0]: // password_token_user
|
|
||||||
values[i] = new(sql.NullInt64)
|
|
||||||
default:
|
|
||||||
values[i] = new(sql.UnknownType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
|
||||||
// to the PasswordToken fields.
|
|
||||||
func (pt *PasswordToken) assignValues(columns []string, values []any) error {
|
|
||||||
if m, n := len(values), len(columns); m < n {
|
|
||||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
|
||||||
}
|
|
||||||
for i := range columns {
|
|
||||||
switch columns[i] {
|
|
||||||
case passwordtoken.FieldID:
|
|
||||||
value, ok := values[i].(*sql.NullInt64)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field id", value)
|
|
||||||
}
|
|
||||||
pt.ID = int(value.Int64)
|
|
||||||
case passwordtoken.FieldHash:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field hash", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
pt.Hash = value.String
|
|
||||||
}
|
|
||||||
case passwordtoken.FieldCreatedAt:
|
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
pt.CreatedAt = value.Time
|
|
||||||
}
|
|
||||||
case passwordtoken.ForeignKeys[0]:
|
|
||||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for edge-field password_token_user", value)
|
|
||||||
} else if value.Valid {
|
|
||||||
pt.password_token_user = new(int)
|
|
||||||
*pt.password_token_user = int(value.Int64)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
pt.selectValues.Set(columns[i], values[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the ent.Value that was dynamically selected and assigned to the PasswordToken.
|
|
||||||
// This includes values selected through modifiers, order, etc.
|
|
||||||
func (pt *PasswordToken) Value(name string) (ent.Value, error) {
|
|
||||||
return pt.selectValues.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryUser queries the "user" edge of the PasswordToken entity.
|
|
||||||
func (pt *PasswordToken) QueryUser() *UserQuery {
|
|
||||||
return NewPasswordTokenClient(pt.config).QueryUser(pt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns a builder for updating this PasswordToken.
|
|
||||||
// Note that you need to call PasswordToken.Unwrap() before calling this method if this PasswordToken
|
|
||||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
|
||||||
func (pt *PasswordToken) Update() *PasswordTokenUpdateOne {
|
|
||||||
return NewPasswordTokenClient(pt.config).UpdateOne(pt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap unwraps the PasswordToken entity that was returned from a transaction after it was closed,
|
|
||||||
// so that all future queries will be executed through the driver which created the transaction.
|
|
||||||
func (pt *PasswordToken) Unwrap() *PasswordToken {
|
|
||||||
_tx, ok := pt.config.driver.(*txDriver)
|
|
||||||
if !ok {
|
|
||||||
panic("ent: PasswordToken is not a transactional entity")
|
|
||||||
}
|
|
||||||
pt.config.driver = _tx.drv
|
|
||||||
return pt
|
|
||||||
}
|
|
||||||
|
|
||||||
// String implements the fmt.Stringer.
|
|
||||||
func (pt *PasswordToken) String() string {
|
|
||||||
var builder strings.Builder
|
|
||||||
builder.WriteString("PasswordToken(")
|
|
||||||
builder.WriteString(fmt.Sprintf("id=%v, ", pt.ID))
|
|
||||||
builder.WriteString("hash=<sensitive>")
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("created_at=")
|
|
||||||
builder.WriteString(pt.CreatedAt.Format(time.ANSIC))
|
|
||||||
builder.WriteByte(')')
|
|
||||||
return builder.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokens is a parsable slice of PasswordToken.
|
|
||||||
type PasswordTokens []*PasswordToken
|
|
@ -1,99 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package passwordtoken
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Label holds the string label denoting the passwordtoken type in the database.
|
|
||||||
Label = "password_token"
|
|
||||||
// FieldID holds the string denoting the id field in the database.
|
|
||||||
FieldID = "id"
|
|
||||||
// FieldHash holds the string denoting the hash field in the database.
|
|
||||||
FieldHash = "hash"
|
|
||||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
|
||||||
FieldCreatedAt = "created_at"
|
|
||||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
|
||||||
EdgeUser = "user"
|
|
||||||
// Table holds the table name of the passwordtoken in the database.
|
|
||||||
Table = "password_tokens"
|
|
||||||
// UserTable is the table that holds the user relation/edge.
|
|
||||||
UserTable = "password_tokens"
|
|
||||||
// UserInverseTable is the table name for the User entity.
|
|
||||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
|
||||||
UserInverseTable = "users"
|
|
||||||
// UserColumn is the table column denoting the user relation/edge.
|
|
||||||
UserColumn = "password_token_user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Columns holds all SQL columns for passwordtoken fields.
|
|
||||||
var Columns = []string{
|
|
||||||
FieldID,
|
|
||||||
FieldHash,
|
|
||||||
FieldCreatedAt,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForeignKeys holds the SQL foreign-keys that are owned by the "password_tokens"
|
|
||||||
// table and are not defined as standalone fields in the schema.
|
|
||||||
var ForeignKeys = []string{
|
|
||||||
"password_token_user",
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
|
||||||
func ValidColumn(column string) bool {
|
|
||||||
for i := range Columns {
|
|
||||||
if column == Columns[i] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := range ForeignKeys {
|
|
||||||
if column == ForeignKeys[i] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// HashValidator is a validator for the "hash" field. It is called by the builders before save.
|
|
||||||
HashValidator func(string) error
|
|
||||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
|
||||||
DefaultCreatedAt func() time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the PasswordToken queries.
|
|
||||||
type OrderOption func(*sql.Selector)
|
|
||||||
|
|
||||||
// ByID orders the results by the id field.
|
|
||||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByHash orders the results by the hash field.
|
|
||||||
func ByHash(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldHash, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByCreatedAt orders the results by the created_at field.
|
|
||||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByUserField orders the results by user field.
|
|
||||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func newUserStep() *sqlgraph.Step {
|
|
||||||
return sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(Table, FieldID),
|
|
||||||
sqlgraph.To(UserInverseTable, FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.M2O, false, UserTable, UserColumn),
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,209 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package passwordtoken
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ID filters vertices based on their ID field.
|
|
||||||
func ID(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEQ(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDEQ applies the EQ predicate on the ID field.
|
|
||||||
func IDEQ(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEQ(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDNEQ applies the NEQ predicate on the ID field.
|
|
||||||
func IDNEQ(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldNEQ(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDIn applies the In predicate on the ID field.
|
|
||||||
func IDIn(ids ...int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldIn(FieldID, ids...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDNotIn applies the NotIn predicate on the ID field.
|
|
||||||
func IDNotIn(ids ...int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldNotIn(FieldID, ids...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDGT applies the GT predicate on the ID field.
|
|
||||||
func IDGT(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldGT(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDGTE applies the GTE predicate on the ID field.
|
|
||||||
func IDGTE(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldGTE(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDLT applies the LT predicate on the ID field.
|
|
||||||
func IDLT(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldLT(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDLTE applies the LTE predicate on the ID field.
|
|
||||||
func IDLTE(id int) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldLTE(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash applies equality check predicate on the "hash" field. It's identical to HashEQ.
|
|
||||||
func Hash(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEQ(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
|
||||||
func CreatedAt(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEQ(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashEQ applies the EQ predicate on the "hash" field.
|
|
||||||
func HashEQ(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEQ(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashNEQ applies the NEQ predicate on the "hash" field.
|
|
||||||
func HashNEQ(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldNEQ(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashIn applies the In predicate on the "hash" field.
|
|
||||||
func HashIn(vs ...string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldIn(FieldHash, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashNotIn applies the NotIn predicate on the "hash" field.
|
|
||||||
func HashNotIn(vs ...string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldNotIn(FieldHash, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashGT applies the GT predicate on the "hash" field.
|
|
||||||
func HashGT(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldGT(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashGTE applies the GTE predicate on the "hash" field.
|
|
||||||
func HashGTE(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldGTE(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashLT applies the LT predicate on the "hash" field.
|
|
||||||
func HashLT(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldLT(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashLTE applies the LTE predicate on the "hash" field.
|
|
||||||
func HashLTE(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldLTE(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashContains applies the Contains predicate on the "hash" field.
|
|
||||||
func HashContains(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldContains(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashHasPrefix applies the HasPrefix predicate on the "hash" field.
|
|
||||||
func HashHasPrefix(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldHasPrefix(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashHasSuffix applies the HasSuffix predicate on the "hash" field.
|
|
||||||
func HashHasSuffix(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldHasSuffix(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashEqualFold applies the EqualFold predicate on the "hash" field.
|
|
||||||
func HashEqualFold(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEqualFold(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashContainsFold applies the ContainsFold predicate on the "hash" field.
|
|
||||||
func HashContainsFold(v string) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldContainsFold(FieldHash, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
|
||||||
func CreatedAtEQ(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldEQ(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
|
||||||
func CreatedAtNEQ(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldNEQ(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
|
||||||
func CreatedAtIn(vs ...time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldIn(FieldCreatedAt, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
|
||||||
func CreatedAtNotIn(vs ...time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldNotIn(FieldCreatedAt, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
|
||||||
func CreatedAtGT(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldGT(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
|
||||||
func CreatedAtGTE(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldGTE(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
|
||||||
func CreatedAtLT(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldLT(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
|
||||||
func CreatedAtLTE(v time.Time) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.FieldLTE(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
|
||||||
func HasUser() predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(func(s *sql.Selector) {
|
|
||||||
step := sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(Table, FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.M2O, false, UserTable, UserColumn),
|
|
||||||
)
|
|
||||||
sqlgraph.HasNeighbors(s, step)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
|
||||||
func HasUserWith(preds ...predicate.User) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(func(s *sql.Selector) {
|
|
||||||
step := newUserStep()
|
|
||||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
|
||||||
for _, p := range preds {
|
|
||||||
p(s)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// And groups predicates with the AND operator between them.
|
|
||||||
func And(predicates ...predicate.PasswordToken) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.AndPredicates(predicates...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or groups predicates with the OR operator between them.
|
|
||||||
func Or(predicates ...predicate.PasswordToken) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.OrPredicates(predicates...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not applies the not operator on the given predicate.
|
|
||||||
func Not(p predicate.PasswordToken) predicate.PasswordToken {
|
|
||||||
return predicate.PasswordToken(sql.NotPredicates(p))
|
|
||||||
}
|
|
@ -1,252 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordTokenCreate is the builder for creating a PasswordToken entity.
|
|
||||||
type PasswordTokenCreate struct {
|
|
||||||
config
|
|
||||||
mutation *PasswordTokenMutation
|
|
||||||
hooks []Hook
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHash sets the "hash" field.
|
|
||||||
func (ptc *PasswordTokenCreate) SetHash(s string) *PasswordTokenCreate {
|
|
||||||
ptc.mutation.SetHash(s)
|
|
||||||
return ptc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
|
||||||
func (ptc *PasswordTokenCreate) SetCreatedAt(t time.Time) *PasswordTokenCreate {
|
|
||||||
ptc.mutation.SetCreatedAt(t)
|
|
||||||
return ptc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
|
||||||
func (ptc *PasswordTokenCreate) SetNillableCreatedAt(t *time.Time) *PasswordTokenCreate {
|
|
||||||
if t != nil {
|
|
||||||
ptc.SetCreatedAt(*t)
|
|
||||||
}
|
|
||||||
return ptc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUserID sets the "user" edge to the User entity by ID.
|
|
||||||
func (ptc *PasswordTokenCreate) SetUserID(id int) *PasswordTokenCreate {
|
|
||||||
ptc.mutation.SetUserID(id)
|
|
||||||
return ptc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
|
||||||
func (ptc *PasswordTokenCreate) SetUser(u *User) *PasswordTokenCreate {
|
|
||||||
return ptc.SetUserID(u.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the PasswordTokenMutation object of the builder.
|
|
||||||
func (ptc *PasswordTokenCreate) Mutation() *PasswordTokenMutation {
|
|
||||||
return ptc.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save creates the PasswordToken in the database.
|
|
||||||
func (ptc *PasswordTokenCreate) Save(ctx context.Context) (*PasswordToken, error) {
|
|
||||||
ptc.defaults()
|
|
||||||
return withHooks(ctx, ptc.sqlSave, ptc.mutation, ptc.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX calls Save and panics if Save returns an error.
|
|
||||||
func (ptc *PasswordTokenCreate) SaveX(ctx context.Context) *PasswordToken {
|
|
||||||
v, err := ptc.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (ptc *PasswordTokenCreate) Exec(ctx context.Context) error {
|
|
||||||
_, err := ptc.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ptc *PasswordTokenCreate) ExecX(ctx context.Context) {
|
|
||||||
if err := ptc.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaults sets the default values of the builder before save.
|
|
||||||
func (ptc *PasswordTokenCreate) defaults() {
|
|
||||||
if _, ok := ptc.mutation.CreatedAt(); !ok {
|
|
||||||
v := passwordtoken.DefaultCreatedAt()
|
|
||||||
ptc.mutation.SetCreatedAt(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (ptc *PasswordTokenCreate) check() error {
|
|
||||||
if _, ok := ptc.mutation.Hash(); !ok {
|
|
||||||
return &ValidationError{Name: "hash", err: errors.New(`ent: missing required field "PasswordToken.hash"`)}
|
|
||||||
}
|
|
||||||
if v, ok := ptc.mutation.Hash(); ok {
|
|
||||||
if err := passwordtoken.HashValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "hash", err: fmt.Errorf(`ent: validator failed for field "PasswordToken.hash": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := ptc.mutation.CreatedAt(); !ok {
|
|
||||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "PasswordToken.created_at"`)}
|
|
||||||
}
|
|
||||||
if _, ok := ptc.mutation.UserID(); !ok {
|
|
||||||
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "PasswordToken.user"`)}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptc *PasswordTokenCreate) sqlSave(ctx context.Context) (*PasswordToken, error) {
|
|
||||||
if err := ptc.check(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_node, _spec := ptc.createSpec()
|
|
||||||
if err := sqlgraph.CreateNode(ctx, ptc.driver, _spec); err != nil {
|
|
||||||
if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
id := _spec.ID.Value.(int64)
|
|
||||||
_node.ID = int(id)
|
|
||||||
ptc.mutation.id = &_node.ID
|
|
||||||
ptc.mutation.done = true
|
|
||||||
return _node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptc *PasswordTokenCreate) createSpec() (*PasswordToken, *sqlgraph.CreateSpec) {
|
|
||||||
var (
|
|
||||||
_node = &PasswordToken{config: ptc.config}
|
|
||||||
_spec = sqlgraph.NewCreateSpec(passwordtoken.Table, sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt))
|
|
||||||
)
|
|
||||||
if value, ok := ptc.mutation.Hash(); ok {
|
|
||||||
_spec.SetField(passwordtoken.FieldHash, field.TypeString, value)
|
|
||||||
_node.Hash = value
|
|
||||||
}
|
|
||||||
if value, ok := ptc.mutation.CreatedAt(); ok {
|
|
||||||
_spec.SetField(passwordtoken.FieldCreatedAt, field.TypeTime, value)
|
|
||||||
_node.CreatedAt = value
|
|
||||||
}
|
|
||||||
if nodes := ptc.mutation.UserIDs(); len(nodes) > 0 {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.M2O,
|
|
||||||
Inverse: false,
|
|
||||||
Table: passwordtoken.UserTable,
|
|
||||||
Columns: []string{passwordtoken.UserColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_node.password_token_user = &nodes[0]
|
|
||||||
_spec.Edges = append(_spec.Edges, edge)
|
|
||||||
}
|
|
||||||
return _node, _spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenCreateBulk is the builder for creating many PasswordToken entities in bulk.
|
|
||||||
type PasswordTokenCreateBulk struct {
|
|
||||||
config
|
|
||||||
err error
|
|
||||||
builders []*PasswordTokenCreate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save creates the PasswordToken entities in the database.
|
|
||||||
func (ptcb *PasswordTokenCreateBulk) Save(ctx context.Context) ([]*PasswordToken, error) {
|
|
||||||
if ptcb.err != nil {
|
|
||||||
return nil, ptcb.err
|
|
||||||
}
|
|
||||||
specs := make([]*sqlgraph.CreateSpec, len(ptcb.builders))
|
|
||||||
nodes := make([]*PasswordToken, len(ptcb.builders))
|
|
||||||
mutators := make([]Mutator, len(ptcb.builders))
|
|
||||||
for i := range ptcb.builders {
|
|
||||||
func(i int, root context.Context) {
|
|
||||||
builder := ptcb.builders[i]
|
|
||||||
builder.defaults()
|
|
||||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
mutation, ok := m.(*PasswordTokenMutation)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
|
||||||
}
|
|
||||||
if err := builder.check(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
builder.mutation = mutation
|
|
||||||
var err error
|
|
||||||
nodes[i], specs[i] = builder.createSpec()
|
|
||||||
if i < len(mutators)-1 {
|
|
||||||
_, err = mutators[i+1].Mutate(root, ptcb.builders[i+1].mutation)
|
|
||||||
} else {
|
|
||||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
|
||||||
// Invoke the actual operation on the latest mutation in the chain.
|
|
||||||
if err = sqlgraph.BatchCreate(ctx, ptcb.driver, spec); err != nil {
|
|
||||||
if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mutation.id = &nodes[i].ID
|
|
||||||
if specs[i].ID.Value != nil {
|
|
||||||
id := specs[i].ID.Value.(int64)
|
|
||||||
nodes[i].ID = int(id)
|
|
||||||
}
|
|
||||||
mutation.done = true
|
|
||||||
return nodes[i], nil
|
|
||||||
})
|
|
||||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
|
||||||
mut = builder.hooks[i](mut)
|
|
||||||
}
|
|
||||||
mutators[i] = mut
|
|
||||||
}(i, ctx)
|
|
||||||
}
|
|
||||||
if len(mutators) > 0 {
|
|
||||||
if _, err := mutators[0].Mutate(ctx, ptcb.builders[0].mutation); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (ptcb *PasswordTokenCreateBulk) SaveX(ctx context.Context) []*PasswordToken {
|
|
||||||
v, err := ptcb.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (ptcb *PasswordTokenCreateBulk) Exec(ctx context.Context) error {
|
|
||||||
_, err := ptcb.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ptcb *PasswordTokenCreateBulk) ExecX(ctx context.Context) {
|
|
||||||
if err := ptcb.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordTokenDelete is the builder for deleting a PasswordToken entity.
|
|
||||||
type PasswordTokenDelete struct {
|
|
||||||
config
|
|
||||||
hooks []Hook
|
|
||||||
mutation *PasswordTokenMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the PasswordTokenDelete builder.
|
|
||||||
func (ptd *PasswordTokenDelete) Where(ps ...predicate.PasswordToken) *PasswordTokenDelete {
|
|
||||||
ptd.mutation.Where(ps...)
|
|
||||||
return ptd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
|
||||||
func (ptd *PasswordTokenDelete) Exec(ctx context.Context) (int, error) {
|
|
||||||
return withHooks(ctx, ptd.sqlExec, ptd.mutation, ptd.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ptd *PasswordTokenDelete) ExecX(ctx context.Context) int {
|
|
||||||
n, err := ptd.Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptd *PasswordTokenDelete) sqlExec(ctx context.Context) (int, error) {
|
|
||||||
_spec := sqlgraph.NewDeleteSpec(passwordtoken.Table, sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt))
|
|
||||||
if ps := ptd.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
affected, err := sqlgraph.DeleteNodes(ctx, ptd.driver, _spec)
|
|
||||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
ptd.mutation.done = true
|
|
||||||
return affected, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenDeleteOne is the builder for deleting a single PasswordToken entity.
|
|
||||||
type PasswordTokenDeleteOne struct {
|
|
||||||
ptd *PasswordTokenDelete
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the PasswordTokenDelete builder.
|
|
||||||
func (ptdo *PasswordTokenDeleteOne) Where(ps ...predicate.PasswordToken) *PasswordTokenDeleteOne {
|
|
||||||
ptdo.ptd.mutation.Where(ps...)
|
|
||||||
return ptdo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the deletion query.
|
|
||||||
func (ptdo *PasswordTokenDeleteOne) Exec(ctx context.Context) error {
|
|
||||||
n, err := ptdo.ptd.Exec(ctx)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
case n == 0:
|
|
||||||
return &NotFoundError{passwordtoken.Label}
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ptdo *PasswordTokenDeleteOne) ExecX(ctx context.Context) {
|
|
||||||
if err := ptdo.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,613 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordTokenQuery is the builder for querying PasswordToken entities.
|
|
||||||
type PasswordTokenQuery struct {
|
|
||||||
config
|
|
||||||
ctx *QueryContext
|
|
||||||
order []passwordtoken.OrderOption
|
|
||||||
inters []Interceptor
|
|
||||||
predicates []predicate.PasswordToken
|
|
||||||
withUser *UserQuery
|
|
||||||
withFKs bool
|
|
||||||
// intermediate query (i.e. traversal path).
|
|
||||||
sql *sql.Selector
|
|
||||||
path func(context.Context) (*sql.Selector, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where adds a new predicate for the PasswordTokenQuery builder.
|
|
||||||
func (ptq *PasswordTokenQuery) Where(ps ...predicate.PasswordToken) *PasswordTokenQuery {
|
|
||||||
ptq.predicates = append(ptq.predicates, ps...)
|
|
||||||
return ptq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit the number of records to be returned by this query.
|
|
||||||
func (ptq *PasswordTokenQuery) Limit(limit int) *PasswordTokenQuery {
|
|
||||||
ptq.ctx.Limit = &limit
|
|
||||||
return ptq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offset to start from.
|
|
||||||
func (ptq *PasswordTokenQuery) Offset(offset int) *PasswordTokenQuery {
|
|
||||||
ptq.ctx.Offset = &offset
|
|
||||||
return ptq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unique configures the query builder to filter duplicate records on query.
|
|
||||||
// By default, unique is set to true, and can be disabled using this method.
|
|
||||||
func (ptq *PasswordTokenQuery) Unique(unique bool) *PasswordTokenQuery {
|
|
||||||
ptq.ctx.Unique = &unique
|
|
||||||
return ptq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Order specifies how the records should be ordered.
|
|
||||||
func (ptq *PasswordTokenQuery) Order(o ...passwordtoken.OrderOption) *PasswordTokenQuery {
|
|
||||||
ptq.order = append(ptq.order, o...)
|
|
||||||
return ptq
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryUser chains the current query on the "user" edge.
|
|
||||||
func (ptq *PasswordTokenQuery) QueryUser() *UserQuery {
|
|
||||||
query := (&UserClient{config: ptq.config}).Query()
|
|
||||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
|
||||||
if err := ptq.prepareQuery(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
selector := ptq.sqlQuery(ctx)
|
|
||||||
if err := selector.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
step := sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(passwordtoken.Table, passwordtoken.FieldID, selector),
|
|
||||||
sqlgraph.To(user.Table, user.FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.M2O, false, passwordtoken.UserTable, passwordtoken.UserColumn),
|
|
||||||
)
|
|
||||||
fromU = sqlgraph.SetNeighbors(ptq.driver.Dialect(), step)
|
|
||||||
return fromU, nil
|
|
||||||
}
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
|
|
||||||
// First returns the first PasswordToken entity from the query.
|
|
||||||
// Returns a *NotFoundError when no PasswordToken was found.
|
|
||||||
func (ptq *PasswordTokenQuery) First(ctx context.Context) (*PasswordToken, error) {
|
|
||||||
nodes, err := ptq.Limit(1).All(setContextOp(ctx, ptq.ctx, "First"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nil, &NotFoundError{passwordtoken.Label}
|
|
||||||
}
|
|
||||||
return nodes[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstX is like First, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) FirstX(ctx context.Context) *PasswordToken {
|
|
||||||
node, err := ptq.First(ctx)
|
|
||||||
if err != nil && !IsNotFound(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstID returns the first PasswordToken ID from the query.
|
|
||||||
// Returns a *NotFoundError when no PasswordToken ID was found.
|
|
||||||
func (ptq *PasswordTokenQuery) FirstID(ctx context.Context) (id int, err error) {
|
|
||||||
var ids []int
|
|
||||||
if ids, err = ptq.Limit(1).IDs(setContextOp(ctx, ptq.ctx, "FirstID")); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(ids) == 0 {
|
|
||||||
err = &NotFoundError{passwordtoken.Label}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return ids[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) FirstIDX(ctx context.Context) int {
|
|
||||||
id, err := ptq.FirstID(ctx)
|
|
||||||
if err != nil && !IsNotFound(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only returns a single PasswordToken entity found by the query, ensuring it only returns one.
|
|
||||||
// Returns a *NotSingularError when more than one PasswordToken entity is found.
|
|
||||||
// Returns a *NotFoundError when no PasswordToken entities are found.
|
|
||||||
func (ptq *PasswordTokenQuery) Only(ctx context.Context) (*PasswordToken, error) {
|
|
||||||
nodes, err := ptq.Limit(2).All(setContextOp(ctx, ptq.ctx, "Only"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch len(nodes) {
|
|
||||||
case 1:
|
|
||||||
return nodes[0], nil
|
|
||||||
case 0:
|
|
||||||
return nil, &NotFoundError{passwordtoken.Label}
|
|
||||||
default:
|
|
||||||
return nil, &NotSingularError{passwordtoken.Label}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyX is like Only, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) OnlyX(ctx context.Context) *PasswordToken {
|
|
||||||
node, err := ptq.Only(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyID is like Only, but returns the only PasswordToken ID in the query.
|
|
||||||
// Returns a *NotSingularError when more than one PasswordToken ID is found.
|
|
||||||
// Returns a *NotFoundError when no entities are found.
|
|
||||||
func (ptq *PasswordTokenQuery) OnlyID(ctx context.Context) (id int, err error) {
|
|
||||||
var ids []int
|
|
||||||
if ids, err = ptq.Limit(2).IDs(setContextOp(ctx, ptq.ctx, "OnlyID")); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(ids) {
|
|
||||||
case 1:
|
|
||||||
id = ids[0]
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{passwordtoken.Label}
|
|
||||||
default:
|
|
||||||
err = &NotSingularError{passwordtoken.Label}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) OnlyIDX(ctx context.Context) int {
|
|
||||||
id, err := ptq.OnlyID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// All executes the query and returns a list of PasswordTokens.
|
|
||||||
func (ptq *PasswordTokenQuery) All(ctx context.Context) ([]*PasswordToken, error) {
|
|
||||||
ctx = setContextOp(ctx, ptq.ctx, "All")
|
|
||||||
if err := ptq.prepareQuery(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
qr := querierAll[[]*PasswordToken, *PasswordTokenQuery]()
|
|
||||||
return withInterceptors[[]*PasswordToken](ctx, ptq, qr, ptq.inters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllX is like All, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) AllX(ctx context.Context) []*PasswordToken {
|
|
||||||
nodes, err := ptq.All(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDs executes the query and returns a list of PasswordToken IDs.
|
|
||||||
func (ptq *PasswordTokenQuery) IDs(ctx context.Context) (ids []int, err error) {
|
|
||||||
if ptq.ctx.Unique == nil && ptq.path != nil {
|
|
||||||
ptq.Unique(true)
|
|
||||||
}
|
|
||||||
ctx = setContextOp(ctx, ptq.ctx, "IDs")
|
|
||||||
if err = ptq.Select(passwordtoken.FieldID).Scan(ctx, &ids); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDsX is like IDs, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) IDsX(ctx context.Context) []int {
|
|
||||||
ids, err := ptq.IDs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ids
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the count of the given query.
|
|
||||||
func (ptq *PasswordTokenQuery) Count(ctx context.Context) (int, error) {
|
|
||||||
ctx = setContextOp(ctx, ptq.ctx, "Count")
|
|
||||||
if err := ptq.prepareQuery(ctx); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return withInterceptors[int](ctx, ptq, querierCount[*PasswordTokenQuery](), ptq.inters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountX is like Count, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) CountX(ctx context.Context) int {
|
|
||||||
count, err := ptq.Count(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exist returns true if the query has elements in the graph.
|
|
||||||
func (ptq *PasswordTokenQuery) Exist(ctx context.Context) (bool, error) {
|
|
||||||
ctx = setContextOp(ctx, ptq.ctx, "Exist")
|
|
||||||
switch _, err := ptq.FirstID(ctx); {
|
|
||||||
case IsNotFound(err):
|
|
||||||
return false, nil
|
|
||||||
case err != nil:
|
|
||||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
|
||||||
default:
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistX is like Exist, but panics if an error occurs.
|
|
||||||
func (ptq *PasswordTokenQuery) ExistX(ctx context.Context) bool {
|
|
||||||
exist, err := ptq.Exist(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return exist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a duplicate of the PasswordTokenQuery builder, including all associated steps. It can be
|
|
||||||
// used to prepare common query builders and use them differently after the clone is made.
|
|
||||||
func (ptq *PasswordTokenQuery) Clone() *PasswordTokenQuery {
|
|
||||||
if ptq == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &PasswordTokenQuery{
|
|
||||||
config: ptq.config,
|
|
||||||
ctx: ptq.ctx.Clone(),
|
|
||||||
order: append([]passwordtoken.OrderOption{}, ptq.order...),
|
|
||||||
inters: append([]Interceptor{}, ptq.inters...),
|
|
||||||
predicates: append([]predicate.PasswordToken{}, ptq.predicates...),
|
|
||||||
withUser: ptq.withUser.Clone(),
|
|
||||||
// clone intermediate query.
|
|
||||||
sql: ptq.sql.Clone(),
|
|
||||||
path: ptq.path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
|
||||||
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
|
||||||
func (ptq *PasswordTokenQuery) WithUser(opts ...func(*UserQuery)) *PasswordTokenQuery {
|
|
||||||
query := (&UserClient{config: ptq.config}).Query()
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(query)
|
|
||||||
}
|
|
||||||
ptq.withUser = query
|
|
||||||
return ptq
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupBy is used to group vertices by one or more fields/columns.
|
|
||||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// var v []struct {
|
|
||||||
// Hash string `json:"hash,omitempty"`
|
|
||||||
// Count int `json:"count,omitempty"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// client.PasswordToken.Query().
|
|
||||||
// GroupBy(passwordtoken.FieldHash).
|
|
||||||
// Aggregate(ent.Count()).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func (ptq *PasswordTokenQuery) GroupBy(field string, fields ...string) *PasswordTokenGroupBy {
|
|
||||||
ptq.ctx.Fields = append([]string{field}, fields...)
|
|
||||||
grbuild := &PasswordTokenGroupBy{build: ptq}
|
|
||||||
grbuild.flds = &ptq.ctx.Fields
|
|
||||||
grbuild.label = passwordtoken.Label
|
|
||||||
grbuild.scan = grbuild.Scan
|
|
||||||
return grbuild
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select allows the selection one or more fields/columns for the given query,
|
|
||||||
// instead of selecting all fields in the entity.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// var v []struct {
|
|
||||||
// Hash string `json:"hash,omitempty"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// client.PasswordToken.Query().
|
|
||||||
// Select(passwordtoken.FieldHash).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func (ptq *PasswordTokenQuery) Select(fields ...string) *PasswordTokenSelect {
|
|
||||||
ptq.ctx.Fields = append(ptq.ctx.Fields, fields...)
|
|
||||||
sbuild := &PasswordTokenSelect{PasswordTokenQuery: ptq}
|
|
||||||
sbuild.label = passwordtoken.Label
|
|
||||||
sbuild.flds, sbuild.scan = &ptq.ctx.Fields, sbuild.Scan
|
|
||||||
return sbuild
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate returns a PasswordTokenSelect configured with the given aggregations.
|
|
||||||
func (ptq *PasswordTokenQuery) Aggregate(fns ...AggregateFunc) *PasswordTokenSelect {
|
|
||||||
return ptq.Select().Aggregate(fns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptq *PasswordTokenQuery) prepareQuery(ctx context.Context) error {
|
|
||||||
for _, inter := range ptq.inters {
|
|
||||||
if inter == nil {
|
|
||||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
|
||||||
}
|
|
||||||
if trv, ok := inter.(Traverser); ok {
|
|
||||||
if err := trv.Traverse(ctx, ptq); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, f := range ptq.ctx.Fields {
|
|
||||||
if !passwordtoken.ValidColumn(f) {
|
|
||||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ptq.path != nil {
|
|
||||||
prev, err := ptq.path(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ptq.sql = prev
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptq *PasswordTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PasswordToken, error) {
|
|
||||||
var (
|
|
||||||
nodes = []*PasswordToken{}
|
|
||||||
withFKs = ptq.withFKs
|
|
||||||
_spec = ptq.querySpec()
|
|
||||||
loadedTypes = [1]bool{
|
|
||||||
ptq.withUser != nil,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if ptq.withUser != nil {
|
|
||||||
withFKs = true
|
|
||||||
}
|
|
||||||
if withFKs {
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, passwordtoken.ForeignKeys...)
|
|
||||||
}
|
|
||||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
|
||||||
return (*PasswordToken).scanValues(nil, columns)
|
|
||||||
}
|
|
||||||
_spec.Assign = func(columns []string, values []any) error {
|
|
||||||
node := &PasswordToken{config: ptq.config}
|
|
||||||
nodes = append(nodes, node)
|
|
||||||
node.Edges.loadedTypes = loadedTypes
|
|
||||||
return node.assignValues(columns, values)
|
|
||||||
}
|
|
||||||
for i := range hooks {
|
|
||||||
hooks[i](ctx, _spec)
|
|
||||||
}
|
|
||||||
if err := sqlgraph.QueryNodes(ctx, ptq.driver, _spec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
if query := ptq.withUser; query != nil {
|
|
||||||
if err := ptq.loadUser(ctx, query, nodes, nil,
|
|
||||||
func(n *PasswordToken, e *User) { n.Edges.User = e }); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptq *PasswordTokenQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*PasswordToken, init func(*PasswordToken), assign func(*PasswordToken, *User)) error {
|
|
||||||
ids := make([]int, 0, len(nodes))
|
|
||||||
nodeids := make(map[int][]*PasswordToken)
|
|
||||||
for i := range nodes {
|
|
||||||
if nodes[i].password_token_user == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fk := *nodes[i].password_token_user
|
|
||||||
if _, ok := nodeids[fk]; !ok {
|
|
||||||
ids = append(ids, fk)
|
|
||||||
}
|
|
||||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
|
||||||
}
|
|
||||||
if len(ids) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
query.Where(user.IDIn(ids...))
|
|
||||||
neighbors, err := query.All(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, n := range neighbors {
|
|
||||||
nodes, ok := nodeids[n.ID]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf(`unexpected foreign-key "password_token_user" returned %v`, n.ID)
|
|
||||||
}
|
|
||||||
for i := range nodes {
|
|
||||||
assign(nodes[i], n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptq *PasswordTokenQuery) sqlCount(ctx context.Context) (int, error) {
|
|
||||||
_spec := ptq.querySpec()
|
|
||||||
_spec.Node.Columns = ptq.ctx.Fields
|
|
||||||
if len(ptq.ctx.Fields) > 0 {
|
|
||||||
_spec.Unique = ptq.ctx.Unique != nil && *ptq.ctx.Unique
|
|
||||||
}
|
|
||||||
return sqlgraph.CountNodes(ctx, ptq.driver, _spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptq *PasswordTokenQuery) querySpec() *sqlgraph.QuerySpec {
|
|
||||||
_spec := sqlgraph.NewQuerySpec(passwordtoken.Table, passwordtoken.Columns, sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt))
|
|
||||||
_spec.From = ptq.sql
|
|
||||||
if unique := ptq.ctx.Unique; unique != nil {
|
|
||||||
_spec.Unique = *unique
|
|
||||||
} else if ptq.path != nil {
|
|
||||||
_spec.Unique = true
|
|
||||||
}
|
|
||||||
if fields := ptq.ctx.Fields; len(fields) > 0 {
|
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, passwordtoken.FieldID)
|
|
||||||
for i := range fields {
|
|
||||||
if fields[i] != passwordtoken.FieldID {
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ps := ptq.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if limit := ptq.ctx.Limit; limit != nil {
|
|
||||||
_spec.Limit = *limit
|
|
||||||
}
|
|
||||||
if offset := ptq.ctx.Offset; offset != nil {
|
|
||||||
_spec.Offset = *offset
|
|
||||||
}
|
|
||||||
if ps := ptq.order; len(ps) > 0 {
|
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return _spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptq *PasswordTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|
||||||
builder := sql.Dialect(ptq.driver.Dialect())
|
|
||||||
t1 := builder.Table(passwordtoken.Table)
|
|
||||||
columns := ptq.ctx.Fields
|
|
||||||
if len(columns) == 0 {
|
|
||||||
columns = passwordtoken.Columns
|
|
||||||
}
|
|
||||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
|
||||||
if ptq.sql != nil {
|
|
||||||
selector = ptq.sql
|
|
||||||
selector.Select(selector.Columns(columns...)...)
|
|
||||||
}
|
|
||||||
if ptq.ctx.Unique != nil && *ptq.ctx.Unique {
|
|
||||||
selector.Distinct()
|
|
||||||
}
|
|
||||||
for _, p := range ptq.predicates {
|
|
||||||
p(selector)
|
|
||||||
}
|
|
||||||
for _, p := range ptq.order {
|
|
||||||
p(selector)
|
|
||||||
}
|
|
||||||
if offset := ptq.ctx.Offset; offset != nil {
|
|
||||||
// limit is mandatory for offset clause. We start
|
|
||||||
// with default value, and override it below if needed.
|
|
||||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
|
||||||
}
|
|
||||||
if limit := ptq.ctx.Limit; limit != nil {
|
|
||||||
selector.Limit(*limit)
|
|
||||||
}
|
|
||||||
return selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenGroupBy is the group-by builder for PasswordToken entities.
|
|
||||||
type PasswordTokenGroupBy struct {
|
|
||||||
selector
|
|
||||||
build *PasswordTokenQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the group-by query.
|
|
||||||
func (ptgb *PasswordTokenGroupBy) Aggregate(fns ...AggregateFunc) *PasswordTokenGroupBy {
|
|
||||||
ptgb.fns = append(ptgb.fns, fns...)
|
|
||||||
return ptgb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
|
||||||
func (ptgb *PasswordTokenGroupBy) Scan(ctx context.Context, v any) error {
|
|
||||||
ctx = setContextOp(ctx, ptgb.build.ctx, "GroupBy")
|
|
||||||
if err := ptgb.build.prepareQuery(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return scanWithInterceptors[*PasswordTokenQuery, *PasswordTokenGroupBy](ctx, ptgb.build, ptgb, ptgb.build.inters, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptgb *PasswordTokenGroupBy) sqlScan(ctx context.Context, root *PasswordTokenQuery, v any) error {
|
|
||||||
selector := root.sqlQuery(ctx).Select()
|
|
||||||
aggregation := make([]string, 0, len(ptgb.fns))
|
|
||||||
for _, fn := range ptgb.fns {
|
|
||||||
aggregation = append(aggregation, fn(selector))
|
|
||||||
}
|
|
||||||
if len(selector.SelectedColumns()) == 0 {
|
|
||||||
columns := make([]string, 0, len(*ptgb.flds)+len(ptgb.fns))
|
|
||||||
for _, f := range *ptgb.flds {
|
|
||||||
columns = append(columns, selector.C(f))
|
|
||||||
}
|
|
||||||
columns = append(columns, aggregation...)
|
|
||||||
selector.Select(columns...)
|
|
||||||
}
|
|
||||||
selector.GroupBy(selector.Columns(*ptgb.flds...)...)
|
|
||||||
if err := selector.Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rows := &sql.Rows{}
|
|
||||||
query, args := selector.Query()
|
|
||||||
if err := ptgb.build.driver.Query(ctx, query, args, rows); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return sql.ScanSlice(rows, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenSelect is the builder for selecting fields of PasswordToken entities.
|
|
||||||
type PasswordTokenSelect struct {
|
|
||||||
*PasswordTokenQuery
|
|
||||||
selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the selector query.
|
|
||||||
func (pts *PasswordTokenSelect) Aggregate(fns ...AggregateFunc) *PasswordTokenSelect {
|
|
||||||
pts.fns = append(pts.fns, fns...)
|
|
||||||
return pts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
|
||||||
func (pts *PasswordTokenSelect) Scan(ctx context.Context, v any) error {
|
|
||||||
ctx = setContextOp(ctx, pts.ctx, "Select")
|
|
||||||
if err := pts.prepareQuery(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return scanWithInterceptors[*PasswordTokenQuery, *PasswordTokenSelect](ctx, pts.PasswordTokenQuery, pts, pts.inters, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pts *PasswordTokenSelect) sqlScan(ctx context.Context, root *PasswordTokenQuery, v any) error {
|
|
||||||
selector := root.sqlQuery(ctx)
|
|
||||||
aggregation := make([]string, 0, len(pts.fns))
|
|
||||||
for _, fn := range pts.fns {
|
|
||||||
aggregation = append(aggregation, fn(selector))
|
|
||||||
}
|
|
||||||
switch n := len(*pts.selector.flds); {
|
|
||||||
case n == 0 && len(aggregation) > 0:
|
|
||||||
selector.Select(aggregation...)
|
|
||||||
case n != 0 && len(aggregation) > 0:
|
|
||||||
selector.AppendSelect(aggregation...)
|
|
||||||
}
|
|
||||||
rows := &sql.Rows{}
|
|
||||||
query, args := selector.Query()
|
|
||||||
if err := pts.driver.Query(ctx, query, args, rows); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return sql.ScanSlice(rows, v)
|
|
||||||
}
|
|
@ -1,369 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordTokenUpdate is the builder for updating PasswordToken entities.
|
|
||||||
type PasswordTokenUpdate struct {
|
|
||||||
config
|
|
||||||
hooks []Hook
|
|
||||||
mutation *PasswordTokenMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the PasswordTokenUpdate builder.
|
|
||||||
func (ptu *PasswordTokenUpdate) Where(ps ...predicate.PasswordToken) *PasswordTokenUpdate {
|
|
||||||
ptu.mutation.Where(ps...)
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHash sets the "hash" field.
|
|
||||||
func (ptu *PasswordTokenUpdate) SetHash(s string) *PasswordTokenUpdate {
|
|
||||||
ptu.mutation.SetHash(s)
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableHash sets the "hash" field if the given value is not nil.
|
|
||||||
func (ptu *PasswordTokenUpdate) SetNillableHash(s *string) *PasswordTokenUpdate {
|
|
||||||
if s != nil {
|
|
||||||
ptu.SetHash(*s)
|
|
||||||
}
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
|
||||||
func (ptu *PasswordTokenUpdate) SetCreatedAt(t time.Time) *PasswordTokenUpdate {
|
|
||||||
ptu.mutation.SetCreatedAt(t)
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
|
||||||
func (ptu *PasswordTokenUpdate) SetNillableCreatedAt(t *time.Time) *PasswordTokenUpdate {
|
|
||||||
if t != nil {
|
|
||||||
ptu.SetCreatedAt(*t)
|
|
||||||
}
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUserID sets the "user" edge to the User entity by ID.
|
|
||||||
func (ptu *PasswordTokenUpdate) SetUserID(id int) *PasswordTokenUpdate {
|
|
||||||
ptu.mutation.SetUserID(id)
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
|
||||||
func (ptu *PasswordTokenUpdate) SetUser(u *User) *PasswordTokenUpdate {
|
|
||||||
return ptu.SetUserID(u.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the PasswordTokenMutation object of the builder.
|
|
||||||
func (ptu *PasswordTokenUpdate) Mutation() *PasswordTokenMutation {
|
|
||||||
return ptu.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearUser clears the "user" edge to the User entity.
|
|
||||||
func (ptu *PasswordTokenUpdate) ClearUser() *PasswordTokenUpdate {
|
|
||||||
ptu.mutation.ClearUser()
|
|
||||||
return ptu
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
|
||||||
func (ptu *PasswordTokenUpdate) Save(ctx context.Context) (int, error) {
|
|
||||||
return withHooks(ctx, ptu.sqlSave, ptu.mutation, ptu.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (ptu *PasswordTokenUpdate) SaveX(ctx context.Context) int {
|
|
||||||
affected, err := ptu.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return affected
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (ptu *PasswordTokenUpdate) Exec(ctx context.Context) error {
|
|
||||||
_, err := ptu.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ptu *PasswordTokenUpdate) ExecX(ctx context.Context) {
|
|
||||||
if err := ptu.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (ptu *PasswordTokenUpdate) check() error {
|
|
||||||
if v, ok := ptu.mutation.Hash(); ok {
|
|
||||||
if err := passwordtoken.HashValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "hash", err: fmt.Errorf(`ent: validator failed for field "PasswordToken.hash": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := ptu.mutation.UserID(); ptu.mutation.UserCleared() && !ok {
|
|
||||||
return errors.New(`ent: clearing a required unique edge "PasswordToken.user"`)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptu *PasswordTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|
||||||
if err := ptu.check(); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
_spec := sqlgraph.NewUpdateSpec(passwordtoken.Table, passwordtoken.Columns, sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt))
|
|
||||||
if ps := ptu.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if value, ok := ptu.mutation.Hash(); ok {
|
|
||||||
_spec.SetField(passwordtoken.FieldHash, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := ptu.mutation.CreatedAt(); ok {
|
|
||||||
_spec.SetField(passwordtoken.FieldCreatedAt, field.TypeTime, value)
|
|
||||||
}
|
|
||||||
if ptu.mutation.UserCleared() {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.M2O,
|
|
||||||
Inverse: false,
|
|
||||||
Table: passwordtoken.UserTable,
|
|
||||||
Columns: []string{passwordtoken.UserColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
|
||||||
}
|
|
||||||
if nodes := ptu.mutation.UserIDs(); len(nodes) > 0 {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.M2O,
|
|
||||||
Inverse: false,
|
|
||||||
Table: passwordtoken.UserTable,
|
|
||||||
Columns: []string{passwordtoken.UserColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
|
||||||
}
|
|
||||||
if n, err = sqlgraph.UpdateNodes(ctx, ptu.driver, _spec); err != nil {
|
|
||||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
|
||||||
err = &NotFoundError{passwordtoken.Label}
|
|
||||||
} else if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
ptu.mutation.done = true
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordTokenUpdateOne is the builder for updating a single PasswordToken entity.
|
|
||||||
type PasswordTokenUpdateOne struct {
|
|
||||||
config
|
|
||||||
fields []string
|
|
||||||
hooks []Hook
|
|
||||||
mutation *PasswordTokenMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHash sets the "hash" field.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SetHash(s string) *PasswordTokenUpdateOne {
|
|
||||||
ptuo.mutation.SetHash(s)
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableHash sets the "hash" field if the given value is not nil.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SetNillableHash(s *string) *PasswordTokenUpdateOne {
|
|
||||||
if s != nil {
|
|
||||||
ptuo.SetHash(*s)
|
|
||||||
}
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SetCreatedAt(t time.Time) *PasswordTokenUpdateOne {
|
|
||||||
ptuo.mutation.SetCreatedAt(t)
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SetNillableCreatedAt(t *time.Time) *PasswordTokenUpdateOne {
|
|
||||||
if t != nil {
|
|
||||||
ptuo.SetCreatedAt(*t)
|
|
||||||
}
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUserID sets the "user" edge to the User entity by ID.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SetUserID(id int) *PasswordTokenUpdateOne {
|
|
||||||
ptuo.mutation.SetUserID(id)
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUser sets the "user" edge to the User entity.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SetUser(u *User) *PasswordTokenUpdateOne {
|
|
||||||
return ptuo.SetUserID(u.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the PasswordTokenMutation object of the builder.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) Mutation() *PasswordTokenMutation {
|
|
||||||
return ptuo.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearUser clears the "user" edge to the User entity.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) ClearUser() *PasswordTokenUpdateOne {
|
|
||||||
ptuo.mutation.ClearUser()
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the PasswordTokenUpdate builder.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) Where(ps ...predicate.PasswordToken) *PasswordTokenUpdateOne {
|
|
||||||
ptuo.mutation.Where(ps...)
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
|
||||||
// The default is selecting all fields defined in the entity schema.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) Select(field string, fields ...string) *PasswordTokenUpdateOne {
|
|
||||||
ptuo.fields = append([]string{field}, fields...)
|
|
||||||
return ptuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save executes the query and returns the updated PasswordToken entity.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) Save(ctx context.Context) (*PasswordToken, error) {
|
|
||||||
return withHooks(ctx, ptuo.sqlSave, ptuo.mutation, ptuo.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) SaveX(ctx context.Context) *PasswordToken {
|
|
||||||
node, err := ptuo.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query on the entity.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) Exec(ctx context.Context) error {
|
|
||||||
_, err := ptuo.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) ExecX(ctx context.Context) {
|
|
||||||
if err := ptuo.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) check() error {
|
|
||||||
if v, ok := ptuo.mutation.Hash(); ok {
|
|
||||||
if err := passwordtoken.HashValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "hash", err: fmt.Errorf(`ent: validator failed for field "PasswordToken.hash": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := ptuo.mutation.UserID(); ptuo.mutation.UserCleared() && !ok {
|
|
||||||
return errors.New(`ent: clearing a required unique edge "PasswordToken.user"`)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ptuo *PasswordTokenUpdateOne) sqlSave(ctx context.Context) (_node *PasswordToken, err error) {
|
|
||||||
if err := ptuo.check(); err != nil {
|
|
||||||
return _node, err
|
|
||||||
}
|
|
||||||
_spec := sqlgraph.NewUpdateSpec(passwordtoken.Table, passwordtoken.Columns, sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt))
|
|
||||||
id, ok := ptuo.mutation.ID()
|
|
||||||
if !ok {
|
|
||||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PasswordToken.id" for update`)}
|
|
||||||
}
|
|
||||||
_spec.Node.ID.Value = id
|
|
||||||
if fields := ptuo.fields; len(fields) > 0 {
|
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, passwordtoken.FieldID)
|
|
||||||
for _, f := range fields {
|
|
||||||
if !passwordtoken.ValidColumn(f) {
|
|
||||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
|
||||||
}
|
|
||||||
if f != passwordtoken.FieldID {
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ps := ptuo.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if value, ok := ptuo.mutation.Hash(); ok {
|
|
||||||
_spec.SetField(passwordtoken.FieldHash, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := ptuo.mutation.CreatedAt(); ok {
|
|
||||||
_spec.SetField(passwordtoken.FieldCreatedAt, field.TypeTime, value)
|
|
||||||
}
|
|
||||||
if ptuo.mutation.UserCleared() {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.M2O,
|
|
||||||
Inverse: false,
|
|
||||||
Table: passwordtoken.UserTable,
|
|
||||||
Columns: []string{passwordtoken.UserColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
|
||||||
}
|
|
||||||
if nodes := ptuo.mutation.UserIDs(); len(nodes) > 0 {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.M2O,
|
|
||||||
Inverse: false,
|
|
||||||
Table: passwordtoken.UserTable,
|
|
||||||
Columns: []string{passwordtoken.UserColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
|
||||||
}
|
|
||||||
_node = &PasswordToken{config: ptuo.config}
|
|
||||||
_spec.Assign = _node.assignValues
|
|
||||||
_spec.ScanValues = _node.scanValues
|
|
||||||
if err = sqlgraph.UpdateNode(ctx, ptuo.driver, _spec); err != nil {
|
|
||||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
|
||||||
err = &NotFoundError{passwordtoken.Label}
|
|
||||||
} else if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ptuo.mutation.done = true
|
|
||||||
return _node, nil
|
|
||||||
}
|
|
@ -1,13 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package predicate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordToken is the predicate function for passwordtoken builders.
|
|
||||||
type PasswordToken func(*sql.Selector)
|
|
||||||
|
|
||||||
// User is the predicate function for user builders.
|
|
||||||
type User func(*sql.Selector)
|
|
@ -1,5 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
// The schema-stitching logic is generated in github.com/mikestefanello/pagoda/ent/runtime/runtime.go
|
|
@ -1,56 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/schema"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The init function reads all schema descriptors with runtime code
|
|
||||||
// (default values, validators, hooks and policies) and stitches it
|
|
||||||
// to their package variables.
|
|
||||||
func init() {
|
|
||||||
passwordtokenFields := schema.PasswordToken{}.Fields()
|
|
||||||
_ = passwordtokenFields
|
|
||||||
// passwordtokenDescHash is the schema descriptor for hash field.
|
|
||||||
passwordtokenDescHash := passwordtokenFields[0].Descriptor()
|
|
||||||
// passwordtoken.HashValidator is a validator for the "hash" field. It is called by the builders before save.
|
|
||||||
passwordtoken.HashValidator = passwordtokenDescHash.Validators[0].(func(string) error)
|
|
||||||
// passwordtokenDescCreatedAt is the schema descriptor for created_at field.
|
|
||||||
passwordtokenDescCreatedAt := passwordtokenFields[1].Descriptor()
|
|
||||||
// passwordtoken.DefaultCreatedAt holds the default value on creation for the created_at field.
|
|
||||||
passwordtoken.DefaultCreatedAt = passwordtokenDescCreatedAt.Default.(func() time.Time)
|
|
||||||
userHooks := schema.User{}.Hooks()
|
|
||||||
user.Hooks[0] = userHooks[0]
|
|
||||||
userFields := schema.User{}.Fields()
|
|
||||||
_ = userFields
|
|
||||||
// userDescName is the schema descriptor for name field.
|
|
||||||
userDescName := userFields[0].Descriptor()
|
|
||||||
// user.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
|
||||||
user.NameValidator = userDescName.Validators[0].(func(string) error)
|
|
||||||
// userDescEmail is the schema descriptor for email field.
|
|
||||||
userDescEmail := userFields[1].Descriptor()
|
|
||||||
// user.EmailValidator is a validator for the "email" field. It is called by the builders before save.
|
|
||||||
user.EmailValidator = userDescEmail.Validators[0].(func(string) error)
|
|
||||||
// userDescPassword is the schema descriptor for password field.
|
|
||||||
userDescPassword := userFields[2].Descriptor()
|
|
||||||
// user.PasswordValidator is a validator for the "password" field. It is called by the builders before save.
|
|
||||||
user.PasswordValidator = userDescPassword.Validators[0].(func(string) error)
|
|
||||||
// userDescVerified is the schema descriptor for verified field.
|
|
||||||
userDescVerified := userFields[3].Descriptor()
|
|
||||||
// user.DefaultVerified holds the default value on creation for the verified field.
|
|
||||||
user.DefaultVerified = userDescVerified.Default.(bool)
|
|
||||||
// userDescCreatedAt is the schema descriptor for created_at field.
|
|
||||||
userDescCreatedAt := userFields[4].Descriptor()
|
|
||||||
// user.DefaultCreatedAt holds the default value on creation for the created_at field.
|
|
||||||
user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
Version = "v0.12.5" // Version of ent codegen.
|
|
||||||
Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen.
|
|
||||||
)
|
|
@ -1,34 +0,0 @@
|
|||||||
package schema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/schema/edge"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PasswordToken holds the schema definition for the PasswordToken entity.
|
|
||||||
type PasswordToken struct {
|
|
||||||
ent.Schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields of the PasswordToken.
|
|
||||||
func (PasswordToken) Fields() []ent.Field {
|
|
||||||
return []ent.Field{
|
|
||||||
field.String("hash").
|
|
||||||
Sensitive().
|
|
||||||
NotEmpty(),
|
|
||||||
field.Time("created_at").
|
|
||||||
Default(time.Now),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Edges of the PasswordToken.
|
|
||||||
func (PasswordToken) Edges() []ent.Edge {
|
|
||||||
return []ent.Edge{
|
|
||||||
edge.To("user", User.Type).
|
|
||||||
Required().
|
|
||||||
Unique(),
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
package schema
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
ge "github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/hook"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/schema/edge"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
)
|
|
||||||
|
|
||||||
// User holds the schema definition for the User entity.
|
|
||||||
type User struct {
|
|
||||||
ent.Schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields of the User.
|
|
||||||
func (User) Fields() []ent.Field {
|
|
||||||
return []ent.Field{
|
|
||||||
field.String("name").
|
|
||||||
NotEmpty(),
|
|
||||||
field.String("email").
|
|
||||||
NotEmpty().
|
|
||||||
Unique(),
|
|
||||||
field.String("password").
|
|
||||||
Sensitive().
|
|
||||||
NotEmpty(),
|
|
||||||
field.Bool("verified").
|
|
||||||
Default(false),
|
|
||||||
field.Time("created_at").
|
|
||||||
Default(time.Now).
|
|
||||||
Immutable(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Edges of the User.
|
|
||||||
func (User) Edges() []ent.Edge {
|
|
||||||
return []ent.Edge{
|
|
||||||
edge.From("owner", PasswordToken.Type).
|
|
||||||
Ref("user"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hooks of the User.
|
|
||||||
func (User) Hooks() []ent.Hook {
|
|
||||||
return []ent.Hook{
|
|
||||||
hook.On(
|
|
||||||
func(next ent.Mutator) ent.Mutator {
|
|
||||||
return hook.UserFunc(func(ctx context.Context, m *ge.UserMutation) (ent.Value, error) {
|
|
||||||
if v, exists := m.Email(); exists {
|
|
||||||
m.SetEmail(strings.ToLower(v))
|
|
||||||
}
|
|
||||||
return next.Mutate(ctx, m)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
// Limit the hook only for these operations.
|
|
||||||
ent.OpCreate|ent.OpUpdate|ent.OpUpdateOne,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
213
ent/tx.go
213
ent/tx.go
@ -1,213 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tx is a transactional client that is created by calling Client.Tx().
|
|
||||||
type Tx struct {
|
|
||||||
config
|
|
||||||
// PasswordToken is the client for interacting with the PasswordToken builders.
|
|
||||||
PasswordToken *PasswordTokenClient
|
|
||||||
// User is the client for interacting with the User builders.
|
|
||||||
User *UserClient
|
|
||||||
|
|
||||||
// lazily loaded.
|
|
||||||
client *Client
|
|
||||||
clientOnce sync.Once
|
|
||||||
// ctx lives for the life of the transaction. It is
|
|
||||||
// the same context used by the underlying connection.
|
|
||||||
ctx context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Committer is the interface that wraps the Commit method.
|
|
||||||
Committer interface {
|
|
||||||
Commit(context.Context, *Tx) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The CommitFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as a Committer. If f is a function with the appropriate
|
|
||||||
// signature, CommitFunc(f) is a Committer that calls f.
|
|
||||||
CommitFunc func(context.Context, *Tx) error
|
|
||||||
|
|
||||||
// CommitHook defines the "commit middleware". A function that gets a Committer
|
|
||||||
// and returns a Committer. For example:
|
|
||||||
//
|
|
||||||
// hook := func(next ent.Committer) ent.Committer {
|
|
||||||
// return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error {
|
|
||||||
// // Do some stuff before.
|
|
||||||
// if err := next.Commit(ctx, tx); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// // Do some stuff after.
|
|
||||||
// return nil
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
CommitHook func(Committer) Committer
|
|
||||||
)
|
|
||||||
|
|
||||||
// Commit calls f(ctx, m).
|
|
||||||
func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error {
|
|
||||||
return f(ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit commits the transaction.
|
|
||||||
func (tx *Tx) Commit() error {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
var fn Committer = CommitFunc(func(context.Context, *Tx) error {
|
|
||||||
return txDriver.tx.Commit()
|
|
||||||
})
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
hooks := append([]CommitHook(nil), txDriver.onCommit...)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
for i := len(hooks) - 1; i >= 0; i-- {
|
|
||||||
fn = hooks[i](fn)
|
|
||||||
}
|
|
||||||
return fn.Commit(tx.ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnCommit adds a hook to call on commit.
|
|
||||||
func (tx *Tx) OnCommit(f CommitHook) {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
txDriver.onCommit = append(txDriver.onCommit, f)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Rollbacker is the interface that wraps the Rollback method.
|
|
||||||
Rollbacker interface {
|
|
||||||
Rollback(context.Context, *Tx) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The RollbackFunc type is an adapter to allow the use of ordinary
|
|
||||||
// function as a Rollbacker. If f is a function with the appropriate
|
|
||||||
// signature, RollbackFunc(f) is a Rollbacker that calls f.
|
|
||||||
RollbackFunc func(context.Context, *Tx) error
|
|
||||||
|
|
||||||
// RollbackHook defines the "rollback middleware". A function that gets a Rollbacker
|
|
||||||
// and returns a Rollbacker. For example:
|
|
||||||
//
|
|
||||||
// hook := func(next ent.Rollbacker) ent.Rollbacker {
|
|
||||||
// return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error {
|
|
||||||
// // Do some stuff before.
|
|
||||||
// if err := next.Rollback(ctx, tx); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// // Do some stuff after.
|
|
||||||
// return nil
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
RollbackHook func(Rollbacker) Rollbacker
|
|
||||||
)
|
|
||||||
|
|
||||||
// Rollback calls f(ctx, m).
|
|
||||||
func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error {
|
|
||||||
return f(ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rollback rollbacks the transaction.
|
|
||||||
func (tx *Tx) Rollback() error {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
|
|
||||||
return txDriver.tx.Rollback()
|
|
||||||
})
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
hooks := append([]RollbackHook(nil), txDriver.onRollback...)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
for i := len(hooks) - 1; i >= 0; i-- {
|
|
||||||
fn = hooks[i](fn)
|
|
||||||
}
|
|
||||||
return fn.Rollback(tx.ctx, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnRollback adds a hook to call on rollback.
|
|
||||||
func (tx *Tx) OnRollback(f RollbackHook) {
|
|
||||||
txDriver := tx.config.driver.(*txDriver)
|
|
||||||
txDriver.mu.Lock()
|
|
||||||
txDriver.onRollback = append(txDriver.onRollback, f)
|
|
||||||
txDriver.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client returns a Client that binds to current transaction.
|
|
||||||
func (tx *Tx) Client() *Client {
|
|
||||||
tx.clientOnce.Do(func() {
|
|
||||||
tx.client = &Client{config: tx.config}
|
|
||||||
tx.client.init()
|
|
||||||
})
|
|
||||||
return tx.client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *Tx) init() {
|
|
||||||
tx.PasswordToken = NewPasswordTokenClient(tx.config)
|
|
||||||
tx.User = NewUserClient(tx.config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
|
|
||||||
// The idea is to support transactions without adding any extra code to the builders.
|
|
||||||
// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance.
|
|
||||||
// Commit and Rollback are nop for the internal builders and the user must call one
|
|
||||||
// of them in order to commit or rollback the transaction.
|
|
||||||
//
|
|
||||||
// If a closed transaction is embedded in one of the generated entities, and the entity
|
|
||||||
// applies a query, for example: PasswordToken.QueryXXX(), the query will be executed
|
|
||||||
// through the driver which created this transaction.
|
|
||||||
//
|
|
||||||
// Note that txDriver is not goroutine safe.
|
|
||||||
type txDriver struct {
|
|
||||||
// the driver we started the transaction from.
|
|
||||||
drv dialect.Driver
|
|
||||||
// tx is the underlying transaction.
|
|
||||||
tx dialect.Tx
|
|
||||||
// completion hooks.
|
|
||||||
mu sync.Mutex
|
|
||||||
onCommit []CommitHook
|
|
||||||
onRollback []RollbackHook
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTx creates a new transactional driver.
|
|
||||||
func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) {
|
|
||||||
tx, err := drv.Tx(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &txDriver{tx: tx, drv: drv}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls
|
|
||||||
// from the internal builders. Should be called only by the internal builders.
|
|
||||||
func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }
|
|
||||||
|
|
||||||
// Dialect returns the dialect of the driver we started the transaction from.
|
|
||||||
func (tx *txDriver) Dialect() string { return tx.drv.Dialect() }
|
|
||||||
|
|
||||||
// Close is a nop close.
|
|
||||||
func (*txDriver) Close() error { return nil }
|
|
||||||
|
|
||||||
// Commit is a nop commit for the internal builders.
|
|
||||||
// User must call `Tx.Commit` in order to commit the transaction.
|
|
||||||
func (*txDriver) Commit() error { return nil }
|
|
||||||
|
|
||||||
// Rollback is a nop rollback for the internal builders.
|
|
||||||
// User must call `Tx.Rollback` in order to rollback the transaction.
|
|
||||||
func (*txDriver) Rollback() error { return nil }
|
|
||||||
|
|
||||||
// Exec calls tx.Exec.
|
|
||||||
func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error {
|
|
||||||
return tx.tx.Exec(ctx, query, args, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query calls tx.Query.
|
|
||||||
func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error {
|
|
||||||
return tx.tx.Query(ctx, query, args, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dialect.Driver = (*txDriver)(nil)
|
|
177
ent/user.go
177
ent/user.go
@ -1,177 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// User is the model entity for the User schema.
|
|
||||||
type User struct {
|
|
||||||
config `json:"-"`
|
|
||||||
// ID of the ent.
|
|
||||||
ID int `json:"id,omitempty"`
|
|
||||||
// Name holds the value of the "name" field.
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
// Email holds the value of the "email" field.
|
|
||||||
Email string `json:"email,omitempty"`
|
|
||||||
// Password holds the value of the "password" field.
|
|
||||||
Password string `json:"-"`
|
|
||||||
// Verified holds the value of the "verified" field.
|
|
||||||
Verified bool `json:"verified,omitempty"`
|
|
||||||
// CreatedAt holds the value of the "created_at" field.
|
|
||||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
|
||||||
// Edges holds the relations/edges for other nodes in the graph.
|
|
||||||
// The values are being populated by the UserQuery when eager-loading is set.
|
|
||||||
Edges UserEdges `json:"edges"`
|
|
||||||
selectValues sql.SelectValues
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserEdges holds the relations/edges for other nodes in the graph.
|
|
||||||
type UserEdges struct {
|
|
||||||
// Owner holds the value of the owner edge.
|
|
||||||
Owner []*PasswordToken `json:"owner,omitempty"`
|
|
||||||
// loadedTypes holds the information for reporting if a
|
|
||||||
// type was loaded (or requested) in eager-loading or not.
|
|
||||||
loadedTypes [1]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// OwnerOrErr returns the Owner value or an error if the edge
|
|
||||||
// was not loaded in eager-loading.
|
|
||||||
func (e UserEdges) OwnerOrErr() ([]*PasswordToken, error) {
|
|
||||||
if e.loadedTypes[0] {
|
|
||||||
return e.Owner, nil
|
|
||||||
}
|
|
||||||
return nil, &NotLoadedError{edge: "owner"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanValues returns the types for scanning values from sql.Rows.
|
|
||||||
func (*User) scanValues(columns []string) ([]any, error) {
|
|
||||||
values := make([]any, len(columns))
|
|
||||||
for i := range columns {
|
|
||||||
switch columns[i] {
|
|
||||||
case user.FieldVerified:
|
|
||||||
values[i] = new(sql.NullBool)
|
|
||||||
case user.FieldID:
|
|
||||||
values[i] = new(sql.NullInt64)
|
|
||||||
case user.FieldName, user.FieldEmail, user.FieldPassword:
|
|
||||||
values[i] = new(sql.NullString)
|
|
||||||
case user.FieldCreatedAt:
|
|
||||||
values[i] = new(sql.NullTime)
|
|
||||||
default:
|
|
||||||
values[i] = new(sql.UnknownType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
|
||||||
// to the User fields.
|
|
||||||
func (u *User) assignValues(columns []string, values []any) error {
|
|
||||||
if m, n := len(values), len(columns); m < n {
|
|
||||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
|
||||||
}
|
|
||||||
for i := range columns {
|
|
||||||
switch columns[i] {
|
|
||||||
case user.FieldID:
|
|
||||||
value, ok := values[i].(*sql.NullInt64)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field id", value)
|
|
||||||
}
|
|
||||||
u.ID = int(value.Int64)
|
|
||||||
case user.FieldName:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
u.Name = value.String
|
|
||||||
}
|
|
||||||
case user.FieldEmail:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field email", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
u.Email = value.String
|
|
||||||
}
|
|
||||||
case user.FieldPassword:
|
|
||||||
if value, ok := values[i].(*sql.NullString); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field password", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
u.Password = value.String
|
|
||||||
}
|
|
||||||
case user.FieldVerified:
|
|
||||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field verified", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
u.Verified = value.Bool
|
|
||||||
}
|
|
||||||
case user.FieldCreatedAt:
|
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
|
||||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
|
||||||
} else if value.Valid {
|
|
||||||
u.CreatedAt = value.Time
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
u.selectValues.Set(columns[i], values[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the ent.Value that was dynamically selected and assigned to the User.
|
|
||||||
// This includes values selected through modifiers, order, etc.
|
|
||||||
func (u *User) Value(name string) (ent.Value, error) {
|
|
||||||
return u.selectValues.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryOwner queries the "owner" edge of the User entity.
|
|
||||||
func (u *User) QueryOwner() *PasswordTokenQuery {
|
|
||||||
return NewUserClient(u.config).QueryOwner(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update returns a builder for updating this User.
|
|
||||||
// Note that you need to call User.Unwrap() before calling this method if this User
|
|
||||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
|
||||||
func (u *User) Update() *UserUpdateOne {
|
|
||||||
return NewUserClient(u.config).UpdateOne(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap unwraps the User entity that was returned from a transaction after it was closed,
|
|
||||||
// so that all future queries will be executed through the driver which created the transaction.
|
|
||||||
func (u *User) Unwrap() *User {
|
|
||||||
_tx, ok := u.config.driver.(*txDriver)
|
|
||||||
if !ok {
|
|
||||||
panic("ent: User is not a transactional entity")
|
|
||||||
}
|
|
||||||
u.config.driver = _tx.drv
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// String implements the fmt.Stringer.
|
|
||||||
func (u *User) String() string {
|
|
||||||
var builder strings.Builder
|
|
||||||
builder.WriteString("User(")
|
|
||||||
builder.WriteString(fmt.Sprintf("id=%v, ", u.ID))
|
|
||||||
builder.WriteString("name=")
|
|
||||||
builder.WriteString(u.Name)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("email=")
|
|
||||||
builder.WriteString(u.Email)
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("password=<sensitive>")
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("verified=")
|
|
||||||
builder.WriteString(fmt.Sprintf("%v", u.Verified))
|
|
||||||
builder.WriteString(", ")
|
|
||||||
builder.WriteString("created_at=")
|
|
||||||
builder.WriteString(u.CreatedAt.Format(time.ANSIC))
|
|
||||||
builder.WriteByte(')')
|
|
||||||
return builder.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Users is a parsable slice of User.
|
|
||||||
type Users []*User
|
|
132
ent/user/user.go
132
ent/user/user.go
@ -1,132 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package user
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent"
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Label holds the string label denoting the user type in the database.
|
|
||||||
Label = "user"
|
|
||||||
// FieldID holds the string denoting the id field in the database.
|
|
||||||
FieldID = "id"
|
|
||||||
// FieldName holds the string denoting the name field in the database.
|
|
||||||
FieldName = "name"
|
|
||||||
// FieldEmail holds the string denoting the email field in the database.
|
|
||||||
FieldEmail = "email"
|
|
||||||
// FieldPassword holds the string denoting the password field in the database.
|
|
||||||
FieldPassword = "password"
|
|
||||||
// FieldVerified holds the string denoting the verified field in the database.
|
|
||||||
FieldVerified = "verified"
|
|
||||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
|
||||||
FieldCreatedAt = "created_at"
|
|
||||||
// EdgeOwner holds the string denoting the owner edge name in mutations.
|
|
||||||
EdgeOwner = "owner"
|
|
||||||
// Table holds the table name of the user in the database.
|
|
||||||
Table = "users"
|
|
||||||
// OwnerTable is the table that holds the owner relation/edge.
|
|
||||||
OwnerTable = "password_tokens"
|
|
||||||
// OwnerInverseTable is the table name for the PasswordToken entity.
|
|
||||||
// It exists in this package in order to avoid circular dependency with the "passwordtoken" package.
|
|
||||||
OwnerInverseTable = "password_tokens"
|
|
||||||
// OwnerColumn is the table column denoting the owner relation/edge.
|
|
||||||
OwnerColumn = "password_token_user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Columns holds all SQL columns for user fields.
|
|
||||||
var Columns = []string{
|
|
||||||
FieldID,
|
|
||||||
FieldName,
|
|
||||||
FieldEmail,
|
|
||||||
FieldPassword,
|
|
||||||
FieldVerified,
|
|
||||||
FieldCreatedAt,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
|
||||||
func ValidColumn(column string) bool {
|
|
||||||
for i := range Columns {
|
|
||||||
if column == Columns[i] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note that the variables below are initialized by the runtime
|
|
||||||
// package on the initialization of the application. Therefore,
|
|
||||||
// it should be imported in the main as follows:
|
|
||||||
//
|
|
||||||
// import _ "github.com/mikestefanello/pagoda/ent/runtime"
|
|
||||||
var (
|
|
||||||
Hooks [1]ent.Hook
|
|
||||||
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
|
||||||
NameValidator func(string) error
|
|
||||||
// EmailValidator is a validator for the "email" field. It is called by the builders before save.
|
|
||||||
EmailValidator func(string) error
|
|
||||||
// PasswordValidator is a validator for the "password" field. It is called by the builders before save.
|
|
||||||
PasswordValidator func(string) error
|
|
||||||
// DefaultVerified holds the default value on creation for the "verified" field.
|
|
||||||
DefaultVerified bool
|
|
||||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
|
||||||
DefaultCreatedAt func() time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
// OrderOption defines the ordering options for the User queries.
|
|
||||||
type OrderOption func(*sql.Selector)
|
|
||||||
|
|
||||||
// ByID orders the results by the id field.
|
|
||||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByName orders the results by the name field.
|
|
||||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByEmail orders the results by the email field.
|
|
||||||
func ByEmail(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldEmail, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByPassword orders the results by the password field.
|
|
||||||
func ByPassword(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldPassword, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByVerified orders the results by the verified field.
|
|
||||||
func ByVerified(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldVerified, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByCreatedAt orders the results by the created_at field.
|
|
||||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByOwnerCount orders the results by owner count.
|
|
||||||
func ByOwnerCount(opts ...sql.OrderTermOption) OrderOption {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
sqlgraph.OrderByNeighborsCount(s, newOwnerStep(), opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByOwner orders the results by owner terms.
|
|
||||||
func ByOwner(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
|
||||||
return func(s *sql.Selector) {
|
|
||||||
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), append([]sql.OrderTerm{term}, terms...)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func newOwnerStep() *sqlgraph.Step {
|
|
||||||
return sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(Table, FieldID),
|
|
||||||
sqlgraph.To(OwnerInverseTable, FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.O2M, true, OwnerTable, OwnerColumn),
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,364 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package user
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ID filters vertices based on their ID field.
|
|
||||||
func ID(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDEQ applies the EQ predicate on the ID field.
|
|
||||||
func IDEQ(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDNEQ applies the NEQ predicate on the ID field.
|
|
||||||
func IDNEQ(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNEQ(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDIn applies the In predicate on the ID field.
|
|
||||||
func IDIn(ids ...int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldIn(FieldID, ids...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDNotIn applies the NotIn predicate on the ID field.
|
|
||||||
func IDNotIn(ids ...int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNotIn(FieldID, ids...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDGT applies the GT predicate on the ID field.
|
|
||||||
func IDGT(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGT(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDGTE applies the GTE predicate on the ID field.
|
|
||||||
func IDGTE(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGTE(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDLT applies the LT predicate on the ID field.
|
|
||||||
func IDLT(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLT(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDLTE applies the LTE predicate on the ID field.
|
|
||||||
func IDLTE(id int) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLTE(FieldID, id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
|
||||||
func Name(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Email applies equality check predicate on the "email" field. It's identical to EmailEQ.
|
|
||||||
func Email(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ.
|
|
||||||
func Password(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verified applies equality check predicate on the "verified" field. It's identical to VerifiedEQ.
|
|
||||||
func Verified(v bool) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldVerified, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
|
||||||
func CreatedAt(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameEQ applies the EQ predicate on the "name" field.
|
|
||||||
func NameEQ(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
|
||||||
func NameNEQ(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNEQ(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameIn applies the In predicate on the "name" field.
|
|
||||||
func NameIn(vs ...string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldIn(FieldName, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
|
||||||
func NameNotIn(vs ...string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNotIn(FieldName, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameGT applies the GT predicate on the "name" field.
|
|
||||||
func NameGT(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGT(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameGTE applies the GTE predicate on the "name" field.
|
|
||||||
func NameGTE(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGTE(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameLT applies the LT predicate on the "name" field.
|
|
||||||
func NameLT(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLT(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameLTE applies the LTE predicate on the "name" field.
|
|
||||||
func NameLTE(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLTE(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameContains applies the Contains predicate on the "name" field.
|
|
||||||
func NameContains(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldContains(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
|
||||||
func NameHasPrefix(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldHasPrefix(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
|
||||||
func NameHasSuffix(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldHasSuffix(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
|
||||||
func NameEqualFold(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEqualFold(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
|
||||||
func NameContainsFold(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldContainsFold(FieldName, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailEQ applies the EQ predicate on the "email" field.
|
|
||||||
func EmailEQ(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailNEQ applies the NEQ predicate on the "email" field.
|
|
||||||
func EmailNEQ(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNEQ(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailIn applies the In predicate on the "email" field.
|
|
||||||
func EmailIn(vs ...string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldIn(FieldEmail, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailNotIn applies the NotIn predicate on the "email" field.
|
|
||||||
func EmailNotIn(vs ...string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNotIn(FieldEmail, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailGT applies the GT predicate on the "email" field.
|
|
||||||
func EmailGT(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGT(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailGTE applies the GTE predicate on the "email" field.
|
|
||||||
func EmailGTE(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGTE(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailLT applies the LT predicate on the "email" field.
|
|
||||||
func EmailLT(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLT(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailLTE applies the LTE predicate on the "email" field.
|
|
||||||
func EmailLTE(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLTE(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailContains applies the Contains predicate on the "email" field.
|
|
||||||
func EmailContains(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldContains(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailHasPrefix applies the HasPrefix predicate on the "email" field.
|
|
||||||
func EmailHasPrefix(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldHasPrefix(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailHasSuffix applies the HasSuffix predicate on the "email" field.
|
|
||||||
func EmailHasSuffix(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldHasSuffix(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailEqualFold applies the EqualFold predicate on the "email" field.
|
|
||||||
func EmailEqualFold(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEqualFold(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmailContainsFold applies the ContainsFold predicate on the "email" field.
|
|
||||||
func EmailContainsFold(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldContainsFold(FieldEmail, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordEQ applies the EQ predicate on the "password" field.
|
|
||||||
func PasswordEQ(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordNEQ applies the NEQ predicate on the "password" field.
|
|
||||||
func PasswordNEQ(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNEQ(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordIn applies the In predicate on the "password" field.
|
|
||||||
func PasswordIn(vs ...string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldIn(FieldPassword, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordNotIn applies the NotIn predicate on the "password" field.
|
|
||||||
func PasswordNotIn(vs ...string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNotIn(FieldPassword, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordGT applies the GT predicate on the "password" field.
|
|
||||||
func PasswordGT(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGT(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordGTE applies the GTE predicate on the "password" field.
|
|
||||||
func PasswordGTE(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGTE(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordLT applies the LT predicate on the "password" field.
|
|
||||||
func PasswordLT(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLT(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordLTE applies the LTE predicate on the "password" field.
|
|
||||||
func PasswordLTE(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLTE(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordContains applies the Contains predicate on the "password" field.
|
|
||||||
func PasswordContains(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldContains(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordHasPrefix applies the HasPrefix predicate on the "password" field.
|
|
||||||
func PasswordHasPrefix(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldHasPrefix(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordHasSuffix applies the HasSuffix predicate on the "password" field.
|
|
||||||
func PasswordHasSuffix(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldHasSuffix(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordEqualFold applies the EqualFold predicate on the "password" field.
|
|
||||||
func PasswordEqualFold(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEqualFold(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PasswordContainsFold applies the ContainsFold predicate on the "password" field.
|
|
||||||
func PasswordContainsFold(v string) predicate.User {
|
|
||||||
return predicate.User(sql.FieldContainsFold(FieldPassword, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifiedEQ applies the EQ predicate on the "verified" field.
|
|
||||||
func VerifiedEQ(v bool) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldVerified, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifiedNEQ applies the NEQ predicate on the "verified" field.
|
|
||||||
func VerifiedNEQ(v bool) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNEQ(FieldVerified, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
|
||||||
func CreatedAtEQ(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldEQ(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
|
||||||
func CreatedAtNEQ(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNEQ(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
|
||||||
func CreatedAtIn(vs ...time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldIn(FieldCreatedAt, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
|
||||||
func CreatedAtNotIn(vs ...time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
|
||||||
func CreatedAtGT(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGT(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
|
||||||
func CreatedAtGTE(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldGTE(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
|
||||||
func CreatedAtLT(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLT(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
|
||||||
func CreatedAtLTE(v time.Time) predicate.User {
|
|
||||||
return predicate.User(sql.FieldLTE(FieldCreatedAt, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasOwner applies the HasEdge predicate on the "owner" edge.
|
|
||||||
func HasOwner() predicate.User {
|
|
||||||
return predicate.User(func(s *sql.Selector) {
|
|
||||||
step := sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(Table, FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.O2M, true, OwnerTable, OwnerColumn),
|
|
||||||
)
|
|
||||||
sqlgraph.HasNeighbors(s, step)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
|
|
||||||
func HasOwnerWith(preds ...predicate.PasswordToken) predicate.User {
|
|
||||||
return predicate.User(func(s *sql.Selector) {
|
|
||||||
step := newOwnerStep()
|
|
||||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
|
||||||
for _, p := range preds {
|
|
||||||
p(s)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// And groups predicates with the AND operator between them.
|
|
||||||
func And(predicates ...predicate.User) predicate.User {
|
|
||||||
return predicate.User(sql.AndPredicates(predicates...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or groups predicates with the OR operator between them.
|
|
||||||
func Or(predicates ...predicate.User) predicate.User {
|
|
||||||
return predicate.User(sql.OrPredicates(predicates...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not applies the not operator on the given predicate.
|
|
||||||
func Not(p predicate.User) predicate.User {
|
|
||||||
return predicate.User(sql.NotPredicates(p))
|
|
||||||
}
|
|
@ -1,319 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserCreate is the builder for creating a User entity.
|
|
||||||
type UserCreate struct {
|
|
||||||
config
|
|
||||||
mutation *UserMutation
|
|
||||||
hooks []Hook
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetName sets the "name" field.
|
|
||||||
func (uc *UserCreate) SetName(s string) *UserCreate {
|
|
||||||
uc.mutation.SetName(s)
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEmail sets the "email" field.
|
|
||||||
func (uc *UserCreate) SetEmail(s string) *UserCreate {
|
|
||||||
uc.mutation.SetEmail(s)
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPassword sets the "password" field.
|
|
||||||
func (uc *UserCreate) SetPassword(s string) *UserCreate {
|
|
||||||
uc.mutation.SetPassword(s)
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetVerified sets the "verified" field.
|
|
||||||
func (uc *UserCreate) SetVerified(b bool) *UserCreate {
|
|
||||||
uc.mutation.SetVerified(b)
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableVerified sets the "verified" field if the given value is not nil.
|
|
||||||
func (uc *UserCreate) SetNillableVerified(b *bool) *UserCreate {
|
|
||||||
if b != nil {
|
|
||||||
uc.SetVerified(*b)
|
|
||||||
}
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
|
||||||
func (uc *UserCreate) SetCreatedAt(t time.Time) *UserCreate {
|
|
||||||
uc.mutation.SetCreatedAt(t)
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
|
||||||
func (uc *UserCreate) SetNillableCreatedAt(t *time.Time) *UserCreate {
|
|
||||||
if t != nil {
|
|
||||||
uc.SetCreatedAt(*t)
|
|
||||||
}
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOwnerIDs adds the "owner" edge to the PasswordToken entity by IDs.
|
|
||||||
func (uc *UserCreate) AddOwnerIDs(ids ...int) *UserCreate {
|
|
||||||
uc.mutation.AddOwnerIDs(ids...)
|
|
||||||
return uc
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOwner adds the "owner" edges to the PasswordToken entity.
|
|
||||||
func (uc *UserCreate) AddOwner(p ...*PasswordToken) *UserCreate {
|
|
||||||
ids := make([]int, len(p))
|
|
||||||
for i := range p {
|
|
||||||
ids[i] = p[i].ID
|
|
||||||
}
|
|
||||||
return uc.AddOwnerIDs(ids...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the UserMutation object of the builder.
|
|
||||||
func (uc *UserCreate) Mutation() *UserMutation {
|
|
||||||
return uc.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save creates the User in the database.
|
|
||||||
func (uc *UserCreate) Save(ctx context.Context) (*User, error) {
|
|
||||||
if err := uc.defaults(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX calls Save and panics if Save returns an error.
|
|
||||||
func (uc *UserCreate) SaveX(ctx context.Context) *User {
|
|
||||||
v, err := uc.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (uc *UserCreate) Exec(ctx context.Context) error {
|
|
||||||
_, err := uc.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (uc *UserCreate) ExecX(ctx context.Context) {
|
|
||||||
if err := uc.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaults sets the default values of the builder before save.
|
|
||||||
func (uc *UserCreate) defaults() error {
|
|
||||||
if _, ok := uc.mutation.Verified(); !ok {
|
|
||||||
v := user.DefaultVerified
|
|
||||||
uc.mutation.SetVerified(v)
|
|
||||||
}
|
|
||||||
if _, ok := uc.mutation.CreatedAt(); !ok {
|
|
||||||
if user.DefaultCreatedAt == nil {
|
|
||||||
return fmt.Errorf("ent: uninitialized user.DefaultCreatedAt (forgotten import ent/runtime?)")
|
|
||||||
}
|
|
||||||
v := user.DefaultCreatedAt()
|
|
||||||
uc.mutation.SetCreatedAt(v)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (uc *UserCreate) check() error {
|
|
||||||
if _, ok := uc.mutation.Name(); !ok {
|
|
||||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "User.name"`)}
|
|
||||||
}
|
|
||||||
if v, ok := uc.mutation.Name(); ok {
|
|
||||||
if err := user.NameValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "User.name": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := uc.mutation.Email(); !ok {
|
|
||||||
return &ValidationError{Name: "email", err: errors.New(`ent: missing required field "User.email"`)}
|
|
||||||
}
|
|
||||||
if v, ok := uc.mutation.Email(); ok {
|
|
||||||
if err := user.EmailValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := uc.mutation.Password(); !ok {
|
|
||||||
return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "User.password"`)}
|
|
||||||
}
|
|
||||||
if v, ok := uc.mutation.Password(); ok {
|
|
||||||
if err := user.PasswordValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "User.password": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := uc.mutation.Verified(); !ok {
|
|
||||||
return &ValidationError{Name: "verified", err: errors.New(`ent: missing required field "User.verified"`)}
|
|
||||||
}
|
|
||||||
if _, ok := uc.mutation.CreatedAt(); !ok {
|
|
||||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) {
|
|
||||||
if err := uc.check(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_node, _spec := uc.createSpec()
|
|
||||||
if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil {
|
|
||||||
if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
id := _spec.ID.Value.(int64)
|
|
||||||
_node.ID = int(id)
|
|
||||||
uc.mutation.id = &_node.ID
|
|
||||||
uc.mutation.done = true
|
|
||||||
return _node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) {
|
|
||||||
var (
|
|
||||||
_node = &User{config: uc.config}
|
|
||||||
_spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt))
|
|
||||||
)
|
|
||||||
if value, ok := uc.mutation.Name(); ok {
|
|
||||||
_spec.SetField(user.FieldName, field.TypeString, value)
|
|
||||||
_node.Name = value
|
|
||||||
}
|
|
||||||
if value, ok := uc.mutation.Email(); ok {
|
|
||||||
_spec.SetField(user.FieldEmail, field.TypeString, value)
|
|
||||||
_node.Email = value
|
|
||||||
}
|
|
||||||
if value, ok := uc.mutation.Password(); ok {
|
|
||||||
_spec.SetField(user.FieldPassword, field.TypeString, value)
|
|
||||||
_node.Password = value
|
|
||||||
}
|
|
||||||
if value, ok := uc.mutation.Verified(); ok {
|
|
||||||
_spec.SetField(user.FieldVerified, field.TypeBool, value)
|
|
||||||
_node.Verified = value
|
|
||||||
}
|
|
||||||
if value, ok := uc.mutation.CreatedAt(); ok {
|
|
||||||
_spec.SetField(user.FieldCreatedAt, field.TypeTime, value)
|
|
||||||
_node.CreatedAt = value
|
|
||||||
}
|
|
||||||
if nodes := uc.mutation.OwnerIDs(); len(nodes) > 0 {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges = append(_spec.Edges, edge)
|
|
||||||
}
|
|
||||||
return _node, _spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserCreateBulk is the builder for creating many User entities in bulk.
|
|
||||||
type UserCreateBulk struct {
|
|
||||||
config
|
|
||||||
err error
|
|
||||||
builders []*UserCreate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save creates the User entities in the database.
|
|
||||||
func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) {
|
|
||||||
if ucb.err != nil {
|
|
||||||
return nil, ucb.err
|
|
||||||
}
|
|
||||||
specs := make([]*sqlgraph.CreateSpec, len(ucb.builders))
|
|
||||||
nodes := make([]*User, len(ucb.builders))
|
|
||||||
mutators := make([]Mutator, len(ucb.builders))
|
|
||||||
for i := range ucb.builders {
|
|
||||||
func(i int, root context.Context) {
|
|
||||||
builder := ucb.builders[i]
|
|
||||||
builder.defaults()
|
|
||||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
|
||||||
mutation, ok := m.(*UserMutation)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
|
||||||
}
|
|
||||||
if err := builder.check(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
builder.mutation = mutation
|
|
||||||
var err error
|
|
||||||
nodes[i], specs[i] = builder.createSpec()
|
|
||||||
if i < len(mutators)-1 {
|
|
||||||
_, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation)
|
|
||||||
} else {
|
|
||||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
|
||||||
// Invoke the actual operation on the latest mutation in the chain.
|
|
||||||
if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil {
|
|
||||||
if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mutation.id = &nodes[i].ID
|
|
||||||
if specs[i].ID.Value != nil {
|
|
||||||
id := specs[i].ID.Value.(int64)
|
|
||||||
nodes[i].ID = int(id)
|
|
||||||
}
|
|
||||||
mutation.done = true
|
|
||||||
return nodes[i], nil
|
|
||||||
})
|
|
||||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
|
||||||
mut = builder.hooks[i](mut)
|
|
||||||
}
|
|
||||||
mutators[i] = mut
|
|
||||||
}(i, ctx)
|
|
||||||
}
|
|
||||||
if len(mutators) > 0 {
|
|
||||||
if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User {
|
|
||||||
v, err := ucb.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (ucb *UserCreateBulk) Exec(ctx context.Context) error {
|
|
||||||
_, err := ucb.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ucb *UserCreateBulk) ExecX(ctx context.Context) {
|
|
||||||
if err := ucb.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserDelete is the builder for deleting a User entity.
|
|
||||||
type UserDelete struct {
|
|
||||||
config
|
|
||||||
hooks []Hook
|
|
||||||
mutation *UserMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the UserDelete builder.
|
|
||||||
func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete {
|
|
||||||
ud.mutation.Where(ps...)
|
|
||||||
return ud
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
|
||||||
func (ud *UserDelete) Exec(ctx context.Context) (int, error) {
|
|
||||||
return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (ud *UserDelete) ExecX(ctx context.Context) int {
|
|
||||||
n, err := ud.Exec(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) {
|
|
||||||
_spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt))
|
|
||||||
if ps := ud.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec)
|
|
||||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
ud.mutation.done = true
|
|
||||||
return affected, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserDeleteOne is the builder for deleting a single User entity.
|
|
||||||
type UserDeleteOne struct {
|
|
||||||
ud *UserDelete
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the UserDelete builder.
|
|
||||||
func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne {
|
|
||||||
udo.ud.mutation.Where(ps...)
|
|
||||||
return udo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the deletion query.
|
|
||||||
func (udo *UserDeleteOne) Exec(ctx context.Context) error {
|
|
||||||
n, err := udo.ud.Exec(ctx)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
case n == 0:
|
|
||||||
return &NotFoundError{user.Label}
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (udo *UserDeleteOne) ExecX(ctx context.Context) {
|
|
||||||
if err := udo.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,606 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql/driver"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserQuery is the builder for querying User entities.
|
|
||||||
type UserQuery struct {
|
|
||||||
config
|
|
||||||
ctx *QueryContext
|
|
||||||
order []user.OrderOption
|
|
||||||
inters []Interceptor
|
|
||||||
predicates []predicate.User
|
|
||||||
withOwner *PasswordTokenQuery
|
|
||||||
// intermediate query (i.e. traversal path).
|
|
||||||
sql *sql.Selector
|
|
||||||
path func(context.Context) (*sql.Selector, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where adds a new predicate for the UserQuery builder.
|
|
||||||
func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery {
|
|
||||||
uq.predicates = append(uq.predicates, ps...)
|
|
||||||
return uq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit the number of records to be returned by this query.
|
|
||||||
func (uq *UserQuery) Limit(limit int) *UserQuery {
|
|
||||||
uq.ctx.Limit = &limit
|
|
||||||
return uq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offset to start from.
|
|
||||||
func (uq *UserQuery) Offset(offset int) *UserQuery {
|
|
||||||
uq.ctx.Offset = &offset
|
|
||||||
return uq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unique configures the query builder to filter duplicate records on query.
|
|
||||||
// By default, unique is set to true, and can be disabled using this method.
|
|
||||||
func (uq *UserQuery) Unique(unique bool) *UserQuery {
|
|
||||||
uq.ctx.Unique = &unique
|
|
||||||
return uq
|
|
||||||
}
|
|
||||||
|
|
||||||
// Order specifies how the records should be ordered.
|
|
||||||
func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery {
|
|
||||||
uq.order = append(uq.order, o...)
|
|
||||||
return uq
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryOwner chains the current query on the "owner" edge.
|
|
||||||
func (uq *UserQuery) QueryOwner() *PasswordTokenQuery {
|
|
||||||
query := (&PasswordTokenClient{config: uq.config}).Query()
|
|
||||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
|
||||||
if err := uq.prepareQuery(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
selector := uq.sqlQuery(ctx)
|
|
||||||
if err := selector.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
step := sqlgraph.NewStep(
|
|
||||||
sqlgraph.From(user.Table, user.FieldID, selector),
|
|
||||||
sqlgraph.To(passwordtoken.Table, passwordtoken.FieldID),
|
|
||||||
sqlgraph.Edge(sqlgraph.O2M, true, user.OwnerTable, user.OwnerColumn),
|
|
||||||
)
|
|
||||||
fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step)
|
|
||||||
return fromU, nil
|
|
||||||
}
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
|
|
||||||
// First returns the first User entity from the query.
|
|
||||||
// Returns a *NotFoundError when no User was found.
|
|
||||||
func (uq *UserQuery) First(ctx context.Context) (*User, error) {
|
|
||||||
nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nil, &NotFoundError{user.Label}
|
|
||||||
}
|
|
||||||
return nodes[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstX is like First, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) FirstX(ctx context.Context) *User {
|
|
||||||
node, err := uq.First(ctx)
|
|
||||||
if err != nil && !IsNotFound(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstID returns the first User ID from the query.
|
|
||||||
// Returns a *NotFoundError when no User ID was found.
|
|
||||||
func (uq *UserQuery) FirstID(ctx context.Context) (id int, err error) {
|
|
||||||
var ids []int
|
|
||||||
if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(ids) == 0 {
|
|
||||||
err = &NotFoundError{user.Label}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return ids[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) FirstIDX(ctx context.Context) int {
|
|
||||||
id, err := uq.FirstID(ctx)
|
|
||||||
if err != nil && !IsNotFound(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only returns a single User entity found by the query, ensuring it only returns one.
|
|
||||||
// Returns a *NotSingularError when more than one User entity is found.
|
|
||||||
// Returns a *NotFoundError when no User entities are found.
|
|
||||||
func (uq *UserQuery) Only(ctx context.Context) (*User, error) {
|
|
||||||
nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch len(nodes) {
|
|
||||||
case 1:
|
|
||||||
return nodes[0], nil
|
|
||||||
case 0:
|
|
||||||
return nil, &NotFoundError{user.Label}
|
|
||||||
default:
|
|
||||||
return nil, &NotSingularError{user.Label}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyX is like Only, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) OnlyX(ctx context.Context) *User {
|
|
||||||
node, err := uq.Only(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyID is like Only, but returns the only User ID in the query.
|
|
||||||
// Returns a *NotSingularError when more than one User ID is found.
|
|
||||||
// Returns a *NotFoundError when no entities are found.
|
|
||||||
func (uq *UserQuery) OnlyID(ctx context.Context) (id int, err error) {
|
|
||||||
var ids []int
|
|
||||||
if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch len(ids) {
|
|
||||||
case 1:
|
|
||||||
id = ids[0]
|
|
||||||
case 0:
|
|
||||||
err = &NotFoundError{user.Label}
|
|
||||||
default:
|
|
||||||
err = &NotSingularError{user.Label}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) OnlyIDX(ctx context.Context) int {
|
|
||||||
id, err := uq.OnlyID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// All executes the query and returns a list of Users.
|
|
||||||
func (uq *UserQuery) All(ctx context.Context) ([]*User, error) {
|
|
||||||
ctx = setContextOp(ctx, uq.ctx, "All")
|
|
||||||
if err := uq.prepareQuery(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
qr := querierAll[[]*User, *UserQuery]()
|
|
||||||
return withInterceptors[[]*User](ctx, uq, qr, uq.inters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllX is like All, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) AllX(ctx context.Context) []*User {
|
|
||||||
nodes, err := uq.All(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDs executes the query and returns a list of User IDs.
|
|
||||||
func (uq *UserQuery) IDs(ctx context.Context) (ids []int, err error) {
|
|
||||||
if uq.ctx.Unique == nil && uq.path != nil {
|
|
||||||
uq.Unique(true)
|
|
||||||
}
|
|
||||||
ctx = setContextOp(ctx, uq.ctx, "IDs")
|
|
||||||
if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDsX is like IDs, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) IDsX(ctx context.Context) []int {
|
|
||||||
ids, err := uq.IDs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ids
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the count of the given query.
|
|
||||||
func (uq *UserQuery) Count(ctx context.Context) (int, error) {
|
|
||||||
ctx = setContextOp(ctx, uq.ctx, "Count")
|
|
||||||
if err := uq.prepareQuery(ctx); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountX is like Count, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) CountX(ctx context.Context) int {
|
|
||||||
count, err := uq.Count(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exist returns true if the query has elements in the graph.
|
|
||||||
func (uq *UserQuery) Exist(ctx context.Context) (bool, error) {
|
|
||||||
ctx = setContextOp(ctx, uq.ctx, "Exist")
|
|
||||||
switch _, err := uq.FirstID(ctx); {
|
|
||||||
case IsNotFound(err):
|
|
||||||
return false, nil
|
|
||||||
case err != nil:
|
|
||||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
|
||||||
default:
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistX is like Exist, but panics if an error occurs.
|
|
||||||
func (uq *UserQuery) ExistX(ctx context.Context) bool {
|
|
||||||
exist, err := uq.Exist(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return exist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be
|
|
||||||
// used to prepare common query builders and use them differently after the clone is made.
|
|
||||||
func (uq *UserQuery) Clone() *UserQuery {
|
|
||||||
if uq == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &UserQuery{
|
|
||||||
config: uq.config,
|
|
||||||
ctx: uq.ctx.Clone(),
|
|
||||||
order: append([]user.OrderOption{}, uq.order...),
|
|
||||||
inters: append([]Interceptor{}, uq.inters...),
|
|
||||||
predicates: append([]predicate.User{}, uq.predicates...),
|
|
||||||
withOwner: uq.withOwner.Clone(),
|
|
||||||
// clone intermediate query.
|
|
||||||
sql: uq.sql.Clone(),
|
|
||||||
path: uq.path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithOwner tells the query-builder to eager-load the nodes that are connected to
|
|
||||||
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
|
|
||||||
func (uq *UserQuery) WithOwner(opts ...func(*PasswordTokenQuery)) *UserQuery {
|
|
||||||
query := (&PasswordTokenClient{config: uq.config}).Query()
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(query)
|
|
||||||
}
|
|
||||||
uq.withOwner = query
|
|
||||||
return uq
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupBy is used to group vertices by one or more fields/columns.
|
|
||||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// var v []struct {
|
|
||||||
// Name string `json:"name,omitempty"`
|
|
||||||
// Count int `json:"count,omitempty"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// client.User.Query().
|
|
||||||
// GroupBy(user.FieldName).
|
|
||||||
// Aggregate(ent.Count()).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy {
|
|
||||||
uq.ctx.Fields = append([]string{field}, fields...)
|
|
||||||
grbuild := &UserGroupBy{build: uq}
|
|
||||||
grbuild.flds = &uq.ctx.Fields
|
|
||||||
grbuild.label = user.Label
|
|
||||||
grbuild.scan = grbuild.Scan
|
|
||||||
return grbuild
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select allows the selection one or more fields/columns for the given query,
|
|
||||||
// instead of selecting all fields in the entity.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// var v []struct {
|
|
||||||
// Name string `json:"name,omitempty"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// client.User.Query().
|
|
||||||
// Select(user.FieldName).
|
|
||||||
// Scan(ctx, &v)
|
|
||||||
func (uq *UserQuery) Select(fields ...string) *UserSelect {
|
|
||||||
uq.ctx.Fields = append(uq.ctx.Fields, fields...)
|
|
||||||
sbuild := &UserSelect{UserQuery: uq}
|
|
||||||
sbuild.label = user.Label
|
|
||||||
sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan
|
|
||||||
return sbuild
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate returns a UserSelect configured with the given aggregations.
|
|
||||||
func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect {
|
|
||||||
return uq.Select().Aggregate(fns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uq *UserQuery) prepareQuery(ctx context.Context) error {
|
|
||||||
for _, inter := range uq.inters {
|
|
||||||
if inter == nil {
|
|
||||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
|
||||||
}
|
|
||||||
if trv, ok := inter.(Traverser); ok {
|
|
||||||
if err := trv.Traverse(ctx, uq); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, f := range uq.ctx.Fields {
|
|
||||||
if !user.ValidColumn(f) {
|
|
||||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if uq.path != nil {
|
|
||||||
prev, err := uq.path(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
uq.sql = prev
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) {
|
|
||||||
var (
|
|
||||||
nodes = []*User{}
|
|
||||||
_spec = uq.querySpec()
|
|
||||||
loadedTypes = [1]bool{
|
|
||||||
uq.withOwner != nil,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
|
||||||
return (*User).scanValues(nil, columns)
|
|
||||||
}
|
|
||||||
_spec.Assign = func(columns []string, values []any) error {
|
|
||||||
node := &User{config: uq.config}
|
|
||||||
nodes = append(nodes, node)
|
|
||||||
node.Edges.loadedTypes = loadedTypes
|
|
||||||
return node.assignValues(columns, values)
|
|
||||||
}
|
|
||||||
for i := range hooks {
|
|
||||||
hooks[i](ctx, _spec)
|
|
||||||
}
|
|
||||||
if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
if query := uq.withOwner; query != nil {
|
|
||||||
if err := uq.loadOwner(ctx, query, nodes,
|
|
||||||
func(n *User) { n.Edges.Owner = []*PasswordToken{} },
|
|
||||||
func(n *User, e *PasswordToken) { n.Edges.Owner = append(n.Edges.Owner, e) }); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uq *UserQuery) loadOwner(ctx context.Context, query *PasswordTokenQuery, nodes []*User, init func(*User), assign func(*User, *PasswordToken)) error {
|
|
||||||
fks := make([]driver.Value, 0, len(nodes))
|
|
||||||
nodeids := make(map[int]*User)
|
|
||||||
for i := range nodes {
|
|
||||||
fks = append(fks, nodes[i].ID)
|
|
||||||
nodeids[nodes[i].ID] = nodes[i]
|
|
||||||
if init != nil {
|
|
||||||
init(nodes[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
query.withFKs = true
|
|
||||||
query.Where(predicate.PasswordToken(func(s *sql.Selector) {
|
|
||||||
s.Where(sql.InValues(s.C(user.OwnerColumn), fks...))
|
|
||||||
}))
|
|
||||||
neighbors, err := query.All(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, n := range neighbors {
|
|
||||||
fk := n.password_token_user
|
|
||||||
if fk == nil {
|
|
||||||
return fmt.Errorf(`foreign-key "password_token_user" is nil for node %v`, n.ID)
|
|
||||||
}
|
|
||||||
node, ok := nodeids[*fk]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf(`unexpected referenced foreign-key "password_token_user" returned %v for node %v`, *fk, n.ID)
|
|
||||||
}
|
|
||||||
assign(node, n)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) {
|
|
||||||
_spec := uq.querySpec()
|
|
||||||
_spec.Node.Columns = uq.ctx.Fields
|
|
||||||
if len(uq.ctx.Fields) > 0 {
|
|
||||||
_spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique
|
|
||||||
}
|
|
||||||
return sqlgraph.CountNodes(ctx, uq.driver, _spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec {
|
|
||||||
_spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt))
|
|
||||||
_spec.From = uq.sql
|
|
||||||
if unique := uq.ctx.Unique; unique != nil {
|
|
||||||
_spec.Unique = *unique
|
|
||||||
} else if uq.path != nil {
|
|
||||||
_spec.Unique = true
|
|
||||||
}
|
|
||||||
if fields := uq.ctx.Fields; len(fields) > 0 {
|
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
|
|
||||||
for i := range fields {
|
|
||||||
if fields[i] != user.FieldID {
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ps := uq.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if limit := uq.ctx.Limit; limit != nil {
|
|
||||||
_spec.Limit = *limit
|
|
||||||
}
|
|
||||||
if offset := uq.ctx.Offset; offset != nil {
|
|
||||||
_spec.Offset = *offset
|
|
||||||
}
|
|
||||||
if ps := uq.order; len(ps) > 0 {
|
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return _spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|
||||||
builder := sql.Dialect(uq.driver.Dialect())
|
|
||||||
t1 := builder.Table(user.Table)
|
|
||||||
columns := uq.ctx.Fields
|
|
||||||
if len(columns) == 0 {
|
|
||||||
columns = user.Columns
|
|
||||||
}
|
|
||||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
|
||||||
if uq.sql != nil {
|
|
||||||
selector = uq.sql
|
|
||||||
selector.Select(selector.Columns(columns...)...)
|
|
||||||
}
|
|
||||||
if uq.ctx.Unique != nil && *uq.ctx.Unique {
|
|
||||||
selector.Distinct()
|
|
||||||
}
|
|
||||||
for _, p := range uq.predicates {
|
|
||||||
p(selector)
|
|
||||||
}
|
|
||||||
for _, p := range uq.order {
|
|
||||||
p(selector)
|
|
||||||
}
|
|
||||||
if offset := uq.ctx.Offset; offset != nil {
|
|
||||||
// limit is mandatory for offset clause. We start
|
|
||||||
// with default value, and override it below if needed.
|
|
||||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
|
||||||
}
|
|
||||||
if limit := uq.ctx.Limit; limit != nil {
|
|
||||||
selector.Limit(*limit)
|
|
||||||
}
|
|
||||||
return selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserGroupBy is the group-by builder for User entities.
|
|
||||||
type UserGroupBy struct {
|
|
||||||
selector
|
|
||||||
build *UserQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the group-by query.
|
|
||||||
func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy {
|
|
||||||
ugb.fns = append(ugb.fns, fns...)
|
|
||||||
return ugb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
|
||||||
func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error {
|
|
||||||
ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy")
|
|
||||||
if err := ugb.build.prepareQuery(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error {
|
|
||||||
selector := root.sqlQuery(ctx).Select()
|
|
||||||
aggregation := make([]string, 0, len(ugb.fns))
|
|
||||||
for _, fn := range ugb.fns {
|
|
||||||
aggregation = append(aggregation, fn(selector))
|
|
||||||
}
|
|
||||||
if len(selector.SelectedColumns()) == 0 {
|
|
||||||
columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns))
|
|
||||||
for _, f := range *ugb.flds {
|
|
||||||
columns = append(columns, selector.C(f))
|
|
||||||
}
|
|
||||||
columns = append(columns, aggregation...)
|
|
||||||
selector.Select(columns...)
|
|
||||||
}
|
|
||||||
selector.GroupBy(selector.Columns(*ugb.flds...)...)
|
|
||||||
if err := selector.Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rows := &sql.Rows{}
|
|
||||||
query, args := selector.Query()
|
|
||||||
if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return sql.ScanSlice(rows, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserSelect is the builder for selecting fields of User entities.
|
|
||||||
type UserSelect struct {
|
|
||||||
*UserQuery
|
|
||||||
selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate adds the given aggregation functions to the selector query.
|
|
||||||
func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect {
|
|
||||||
us.fns = append(us.fns, fns...)
|
|
||||||
return us
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan applies the selector query and scans the result into the given value.
|
|
||||||
func (us *UserSelect) Scan(ctx context.Context, v any) error {
|
|
||||||
ctx = setContextOp(ctx, us.ctx, "Select")
|
|
||||||
if err := us.prepareQuery(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error {
|
|
||||||
selector := root.sqlQuery(ctx)
|
|
||||||
aggregation := make([]string, 0, len(us.fns))
|
|
||||||
for _, fn := range us.fns {
|
|
||||||
aggregation = append(aggregation, fn(selector))
|
|
||||||
}
|
|
||||||
switch n := len(*us.selector.flds); {
|
|
||||||
case n == 0 && len(aggregation) > 0:
|
|
||||||
selector.Select(aggregation...)
|
|
||||||
case n != 0 && len(aggregation) > 0:
|
|
||||||
selector.AppendSelect(aggregation...)
|
|
||||||
}
|
|
||||||
rows := &sql.Rows{}
|
|
||||||
query, args := selector.Query()
|
|
||||||
if err := us.driver.Query(ctx, query, args, rows); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return sql.ScanSlice(rows, v)
|
|
||||||
}
|
|
@ -1,520 +0,0 @@
|
|||||||
// Code generated by ent, DO NOT EDIT.
|
|
||||||
|
|
||||||
package ent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
||||||
"entgo.io/ent/schema/field"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/predicate"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserUpdate is the builder for updating User entities.
|
|
||||||
type UserUpdate struct {
|
|
||||||
config
|
|
||||||
hooks []Hook
|
|
||||||
mutation *UserMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the UserUpdate builder.
|
|
||||||
func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate {
|
|
||||||
uu.mutation.Where(ps...)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetName sets the "name" field.
|
|
||||||
func (uu *UserUpdate) SetName(s string) *UserUpdate {
|
|
||||||
uu.mutation.SetName(s)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableName sets the "name" field if the given value is not nil.
|
|
||||||
func (uu *UserUpdate) SetNillableName(s *string) *UserUpdate {
|
|
||||||
if s != nil {
|
|
||||||
uu.SetName(*s)
|
|
||||||
}
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEmail sets the "email" field.
|
|
||||||
func (uu *UserUpdate) SetEmail(s string) *UserUpdate {
|
|
||||||
uu.mutation.SetEmail(s)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableEmail sets the "email" field if the given value is not nil.
|
|
||||||
func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate {
|
|
||||||
if s != nil {
|
|
||||||
uu.SetEmail(*s)
|
|
||||||
}
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPassword sets the "password" field.
|
|
||||||
func (uu *UserUpdate) SetPassword(s string) *UserUpdate {
|
|
||||||
uu.mutation.SetPassword(s)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillablePassword sets the "password" field if the given value is not nil.
|
|
||||||
func (uu *UserUpdate) SetNillablePassword(s *string) *UserUpdate {
|
|
||||||
if s != nil {
|
|
||||||
uu.SetPassword(*s)
|
|
||||||
}
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetVerified sets the "verified" field.
|
|
||||||
func (uu *UserUpdate) SetVerified(b bool) *UserUpdate {
|
|
||||||
uu.mutation.SetVerified(b)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableVerified sets the "verified" field if the given value is not nil.
|
|
||||||
func (uu *UserUpdate) SetNillableVerified(b *bool) *UserUpdate {
|
|
||||||
if b != nil {
|
|
||||||
uu.SetVerified(*b)
|
|
||||||
}
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOwnerIDs adds the "owner" edge to the PasswordToken entity by IDs.
|
|
||||||
func (uu *UserUpdate) AddOwnerIDs(ids ...int) *UserUpdate {
|
|
||||||
uu.mutation.AddOwnerIDs(ids...)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOwner adds the "owner" edges to the PasswordToken entity.
|
|
||||||
func (uu *UserUpdate) AddOwner(p ...*PasswordToken) *UserUpdate {
|
|
||||||
ids := make([]int, len(p))
|
|
||||||
for i := range p {
|
|
||||||
ids[i] = p[i].ID
|
|
||||||
}
|
|
||||||
return uu.AddOwnerIDs(ids...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the UserMutation object of the builder.
|
|
||||||
func (uu *UserUpdate) Mutation() *UserMutation {
|
|
||||||
return uu.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearOwner clears all "owner" edges to the PasswordToken entity.
|
|
||||||
func (uu *UserUpdate) ClearOwner() *UserUpdate {
|
|
||||||
uu.mutation.ClearOwner()
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOwnerIDs removes the "owner" edge to PasswordToken entities by IDs.
|
|
||||||
func (uu *UserUpdate) RemoveOwnerIDs(ids ...int) *UserUpdate {
|
|
||||||
uu.mutation.RemoveOwnerIDs(ids...)
|
|
||||||
return uu
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOwner removes "owner" edges to PasswordToken entities.
|
|
||||||
func (uu *UserUpdate) RemoveOwner(p ...*PasswordToken) *UserUpdate {
|
|
||||||
ids := make([]int, len(p))
|
|
||||||
for i := range p {
|
|
||||||
ids[i] = p[i].ID
|
|
||||||
}
|
|
||||||
return uu.RemoveOwnerIDs(ids...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
|
||||||
func (uu *UserUpdate) Save(ctx context.Context) (int, error) {
|
|
||||||
return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (uu *UserUpdate) SaveX(ctx context.Context) int {
|
|
||||||
affected, err := uu.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return affected
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query.
|
|
||||||
func (uu *UserUpdate) Exec(ctx context.Context) error {
|
|
||||||
_, err := uu.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (uu *UserUpdate) ExecX(ctx context.Context) {
|
|
||||||
if err := uu.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (uu *UserUpdate) check() error {
|
|
||||||
if v, ok := uu.mutation.Name(); ok {
|
|
||||||
if err := user.NameValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "User.name": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := uu.mutation.Email(); ok {
|
|
||||||
if err := user.EmailValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := uu.mutation.Password(); ok {
|
|
||||||
if err := user.PasswordValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "User.password": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|
||||||
if err := uu.check(); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
_spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt))
|
|
||||||
if ps := uu.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if value, ok := uu.mutation.Name(); ok {
|
|
||||||
_spec.SetField(user.FieldName, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := uu.mutation.Email(); ok {
|
|
||||||
_spec.SetField(user.FieldEmail, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := uu.mutation.Password(); ok {
|
|
||||||
_spec.SetField(user.FieldPassword, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := uu.mutation.Verified(); ok {
|
|
||||||
_spec.SetField(user.FieldVerified, field.TypeBool, value)
|
|
||||||
}
|
|
||||||
if uu.mutation.OwnerCleared() {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
|
||||||
}
|
|
||||||
if nodes := uu.mutation.RemovedOwnerIDs(); len(nodes) > 0 && !uu.mutation.OwnerCleared() {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
|
||||||
}
|
|
||||||
if nodes := uu.mutation.OwnerIDs(); len(nodes) > 0 {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
|
||||||
}
|
|
||||||
if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil {
|
|
||||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
|
||||||
err = &NotFoundError{user.Label}
|
|
||||||
} else if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
uu.mutation.done = true
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserUpdateOne is the builder for updating a single User entity.
|
|
||||||
type UserUpdateOne struct {
|
|
||||||
config
|
|
||||||
fields []string
|
|
||||||
hooks []Hook
|
|
||||||
mutation *UserMutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetName sets the "name" field.
|
|
||||||
func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne {
|
|
||||||
uuo.mutation.SetName(s)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableName sets the "name" field if the given value is not nil.
|
|
||||||
func (uuo *UserUpdateOne) SetNillableName(s *string) *UserUpdateOne {
|
|
||||||
if s != nil {
|
|
||||||
uuo.SetName(*s)
|
|
||||||
}
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEmail sets the "email" field.
|
|
||||||
func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne {
|
|
||||||
uuo.mutation.SetEmail(s)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableEmail sets the "email" field if the given value is not nil.
|
|
||||||
func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne {
|
|
||||||
if s != nil {
|
|
||||||
uuo.SetEmail(*s)
|
|
||||||
}
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPassword sets the "password" field.
|
|
||||||
func (uuo *UserUpdateOne) SetPassword(s string) *UserUpdateOne {
|
|
||||||
uuo.mutation.SetPassword(s)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillablePassword sets the "password" field if the given value is not nil.
|
|
||||||
func (uuo *UserUpdateOne) SetNillablePassword(s *string) *UserUpdateOne {
|
|
||||||
if s != nil {
|
|
||||||
uuo.SetPassword(*s)
|
|
||||||
}
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetVerified sets the "verified" field.
|
|
||||||
func (uuo *UserUpdateOne) SetVerified(b bool) *UserUpdateOne {
|
|
||||||
uuo.mutation.SetVerified(b)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNillableVerified sets the "verified" field if the given value is not nil.
|
|
||||||
func (uuo *UserUpdateOne) SetNillableVerified(b *bool) *UserUpdateOne {
|
|
||||||
if b != nil {
|
|
||||||
uuo.SetVerified(*b)
|
|
||||||
}
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOwnerIDs adds the "owner" edge to the PasswordToken entity by IDs.
|
|
||||||
func (uuo *UserUpdateOne) AddOwnerIDs(ids ...int) *UserUpdateOne {
|
|
||||||
uuo.mutation.AddOwnerIDs(ids...)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddOwner adds the "owner" edges to the PasswordToken entity.
|
|
||||||
func (uuo *UserUpdateOne) AddOwner(p ...*PasswordToken) *UserUpdateOne {
|
|
||||||
ids := make([]int, len(p))
|
|
||||||
for i := range p {
|
|
||||||
ids[i] = p[i].ID
|
|
||||||
}
|
|
||||||
return uuo.AddOwnerIDs(ids...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mutation returns the UserMutation object of the builder.
|
|
||||||
func (uuo *UserUpdateOne) Mutation() *UserMutation {
|
|
||||||
return uuo.mutation
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearOwner clears all "owner" edges to the PasswordToken entity.
|
|
||||||
func (uuo *UserUpdateOne) ClearOwner() *UserUpdateOne {
|
|
||||||
uuo.mutation.ClearOwner()
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOwnerIDs removes the "owner" edge to PasswordToken entities by IDs.
|
|
||||||
func (uuo *UserUpdateOne) RemoveOwnerIDs(ids ...int) *UserUpdateOne {
|
|
||||||
uuo.mutation.RemoveOwnerIDs(ids...)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOwner removes "owner" edges to PasswordToken entities.
|
|
||||||
func (uuo *UserUpdateOne) RemoveOwner(p ...*PasswordToken) *UserUpdateOne {
|
|
||||||
ids := make([]int, len(p))
|
|
||||||
for i := range p {
|
|
||||||
ids[i] = p[i].ID
|
|
||||||
}
|
|
||||||
return uuo.RemoveOwnerIDs(ids...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where appends a list predicates to the UserUpdate builder.
|
|
||||||
func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne {
|
|
||||||
uuo.mutation.Where(ps...)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
|
||||||
// The default is selecting all fields defined in the entity schema.
|
|
||||||
func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne {
|
|
||||||
uuo.fields = append([]string{field}, fields...)
|
|
||||||
return uuo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save executes the query and returns the updated User entity.
|
|
||||||
func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) {
|
|
||||||
return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveX is like Save, but panics if an error occurs.
|
|
||||||
func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User {
|
|
||||||
node, err := uuo.Save(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec executes the query on the entity.
|
|
||||||
func (uuo *UserUpdateOne) Exec(ctx context.Context) error {
|
|
||||||
_, err := uuo.Save(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecX is like Exec, but panics if an error occurs.
|
|
||||||
func (uuo *UserUpdateOne) ExecX(ctx context.Context) {
|
|
||||||
if err := uuo.Exec(ctx); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check runs all checks and user-defined validators on the builder.
|
|
||||||
func (uuo *UserUpdateOne) check() error {
|
|
||||||
if v, ok := uuo.mutation.Name(); ok {
|
|
||||||
if err := user.NameValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "User.name": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := uuo.mutation.Email(); ok {
|
|
||||||
if err := user.EmailValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := uuo.mutation.Password(); ok {
|
|
||||||
if err := user.PasswordValidator(v); err != nil {
|
|
||||||
return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "User.password": %w`, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) {
|
|
||||||
if err := uuo.check(); err != nil {
|
|
||||||
return _node, err
|
|
||||||
}
|
|
||||||
_spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt))
|
|
||||||
id, ok := uuo.mutation.ID()
|
|
||||||
if !ok {
|
|
||||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)}
|
|
||||||
}
|
|
||||||
_spec.Node.ID.Value = id
|
|
||||||
if fields := uuo.fields; len(fields) > 0 {
|
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, user.FieldID)
|
|
||||||
for _, f := range fields {
|
|
||||||
if !user.ValidColumn(f) {
|
|
||||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
|
||||||
}
|
|
||||||
if f != user.FieldID {
|
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ps := uuo.mutation.predicates; len(ps) > 0 {
|
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
|
||||||
for i := range ps {
|
|
||||||
ps[i](selector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if value, ok := uuo.mutation.Name(); ok {
|
|
||||||
_spec.SetField(user.FieldName, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := uuo.mutation.Email(); ok {
|
|
||||||
_spec.SetField(user.FieldEmail, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := uuo.mutation.Password(); ok {
|
|
||||||
_spec.SetField(user.FieldPassword, field.TypeString, value)
|
|
||||||
}
|
|
||||||
if value, ok := uuo.mutation.Verified(); ok {
|
|
||||||
_spec.SetField(user.FieldVerified, field.TypeBool, value)
|
|
||||||
}
|
|
||||||
if uuo.mutation.OwnerCleared() {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
|
||||||
}
|
|
||||||
if nodes := uuo.mutation.RemovedOwnerIDs(); len(nodes) > 0 && !uuo.mutation.OwnerCleared() {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
|
||||||
}
|
|
||||||
if nodes := uuo.mutation.OwnerIDs(); len(nodes) > 0 {
|
|
||||||
edge := &sqlgraph.EdgeSpec{
|
|
||||||
Rel: sqlgraph.O2M,
|
|
||||||
Inverse: true,
|
|
||||||
Table: user.OwnerTable,
|
|
||||||
Columns: []string{user.OwnerColumn},
|
|
||||||
Bidi: false,
|
|
||||||
Target: &sqlgraph.EdgeTarget{
|
|
||||||
IDSpec: sqlgraph.NewFieldSpec(passwordtoken.FieldID, field.TypeInt),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, k := range nodes {
|
|
||||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
|
||||||
}
|
|
||||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
|
||||||
}
|
|
||||||
_node = &User{config: uuo.config}
|
|
||||||
_spec.Assign = _node.assignValues
|
|
||||||
_spec.ScanValues = _node.scanValues
|
|
||||||
if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil {
|
|
||||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
|
||||||
err = &NotFoundError{user.Label}
|
|
||||||
} else if sqlgraph.IsConstraintError(err) {
|
|
||||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
uuo.mutation.done = true
|
|
||||||
return _node, nil
|
|
||||||
}
|
|
71
go.mod
71
go.mod
@ -1,4 +1,4 @@
|
|||||||
module github.com/mikestefanello/pagoda
|
module git.grosinger.net/tgrosinger/saasitone
|
||||||
|
|
||||||
go 1.22
|
go 1.22
|
||||||
|
|
||||||
@ -7,92 +7,83 @@ toolchain go1.22.1
|
|||||||
require (
|
require (
|
||||||
entgo.io/ent v0.13.1
|
entgo.io/ent v0.13.1
|
||||||
github.com/Masterminds/sprig v2.22.0+incompatible
|
github.com/Masterminds/sprig v2.22.0+incompatible
|
||||||
github.com/PuerkitoBio/goquery v1.9.1
|
github.com/PuerkitoBio/goquery v1.9.2
|
||||||
github.com/eko/gocache/lib/v4 v4.1.6
|
github.com/go-playground/validator/v10 v10.22.0
|
||||||
github.com/eko/gocache/store/redis/v4 v4.2.1
|
|
||||||
github.com/go-playground/validator/v10 v10.19.0
|
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible
|
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||||
github.com/gorilla/context v1.1.2
|
github.com/gorilla/context v1.1.2
|
||||||
github.com/gorilla/sessions v1.2.2
|
github.com/gorilla/sessions v1.3.0
|
||||||
github.com/hibiken/asynq v0.24.1
|
|
||||||
github.com/jackc/pgx/v4 v4.18.3
|
|
||||||
github.com/labstack/echo/v4 v4.12.0
|
github.com/labstack/echo/v4 v4.12.0
|
||||||
github.com/labstack/gommon v0.4.2
|
github.com/labstack/gommon v0.4.2
|
||||||
github.com/redis/go-redis/v9 v9.5.1
|
github.com/maragudk/goqite v0.2.3
|
||||||
github.com/spf13/viper v1.18.2
|
github.com/mattn/go-sqlite3 v1.14.22
|
||||||
|
github.com/maypok86/otter v1.2.1
|
||||||
|
github.com/spf13/viper v1.19.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
golang.org/x/crypto v0.22.0
|
golang.org/x/crypto v0.25.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
ariga.io/atlas v0.21.1 // indirect
|
ariga.io/atlas v0.21.1 // indirect
|
||||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||||
github.com/Masterminds/semver v1.5.0 // indirect
|
github.com/Masterminds/semver v1.5.0 // indirect
|
||||||
|
github.com/a-h/templ v0.2.747 // indirect
|
||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
github.com/agext/levenshtein v1.2.3 // indirect
|
||||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
github.com/dolthub/maphash v0.1.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.4 // indirect
|
||||||
|
github.com/gammazero/deque v0.2.1 // indirect
|
||||||
github.com/go-openapi/inflect v0.21.0 // indirect
|
github.com/go-openapi/inflect v0.21.0 // indirect
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/golang/mock v1.6.0 // indirect
|
github.com/golang-migrate/migrate/v4 v4.17.1 // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/hashicorp/hcl/v2 v2.20.1 // indirect
|
github.com/hashicorp/hcl/v2 v2.20.1 // indirect
|
||||||
github.com/huandu/xstrings v1.4.0 // indirect
|
github.com/huandu/xstrings v1.4.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.16 // indirect
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jackc/pgconn v1.14.3 // indirect
|
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
|
||||||
github.com/jackc/pgtype v1.14.3 // indirect
|
|
||||||
github.com/leodido/go-urn v1.4.0 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.1 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_golang v1.19.0 // indirect
|
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/sagikazarmark/locafero v0.6.0 // indirect
|
||||||
github.com/prometheus/common v0.53.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.14.0 // indirect
|
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
|
||||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spf13/afero v1.11.0 // indirect
|
github.com/spf13/afero v1.11.0 // indirect
|
||||||
github.com/spf13/cast v1.6.0 // indirect
|
github.com/spf13/cast v1.6.0 // indirect
|
||||||
|
github.com/spf13/cobra v1.7.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
|
||||||
github.com/zclconf/go-cty v1.14.4 // indirect
|
github.com/zclconf/go-cty v1.14.4 // indirect
|
||||||
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
|
golang.org/x/exp v0.0.0-20240707233637-46b078467d37 // indirect
|
||||||
golang.org/x/mod v0.17.0 // indirect
|
golang.org/x/mod v0.19.0 // indirect
|
||||||
golang.org/x/net v0.24.0 // indirect
|
golang.org/x/net v0.27.0 // indirect
|
||||||
golang.org/x/sync v0.7.0 // indirect
|
golang.org/x/sync v0.7.0 // indirect
|
||||||
golang.org/x/sys v0.19.0 // indirect
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
golang.org/x/tools v0.20.0 // indirect
|
golang.org/x/tools v0.23.0 // indirect
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
292
go.sum
292
go.sum
@ -2,59 +2,43 @@ ariga.io/atlas v0.21.1 h1:Eg9XYhKTH3UHoqP7tKMWFV+Z5JnpVOJCgO3MHrUtKmk=
|
|||||||
ariga.io/atlas v0.21.1/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE=
|
ariga.io/atlas v0.21.1/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE=
|
||||||
entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=
|
entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=
|
||||||
entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A=
|
entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
|
||||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
|
||||||
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
|
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
|
||||||
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||||
github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI=
|
github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI=
|
||||||
github.com/PuerkitoBio/goquery v1.9.1/go.mod h1:cW1n6TmIMDoORQU5IU/P1T3tGFunOeXEpGP2WHRwkbY=
|
github.com/PuerkitoBio/goquery v1.9.1/go.mod h1:cW1n6TmIMDoORQU5IU/P1T3tGFunOeXEpGP2WHRwkbY=
|
||||||
|
github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE=
|
||||||
|
github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk=
|
||||||
|
github.com/a-h/templ v0.2.747 h1:D0dQ2lxC3W7Dxl6fxQ/1zZHBQslSkTSvl5FxP/CfdKg=
|
||||||
|
github.com/a-h/templ v0.2.747/go.mod h1:69ObQIbrcuwPCU32ohNaWce3Cb7qM5GMiqN1K+2yop4=
|
||||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
|
||||||
github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w=
|
|
||||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
|
||||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
|
||||||
github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
|
||||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
|
||||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
|
||||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
|
||||||
github.com/eko/gocache/lib/v4 v4.1.6 h1:5WWIGISKhE7mfkyF+SJyWwqa4Dp2mkdX8QsZpnENqJI=
|
|
||||||
github.com/eko/gocache/lib/v4 v4.1.6/go.mod h1:HFxC8IiG2WeRotg09xEnPD72sCheJiTSr4Li5Ameg7g=
|
|
||||||
github.com/eko/gocache/store/redis/v4 v4.2.1 h1:uPAgZIn7knH6a55tO4ETN9V93VD3Rcyx0ZIyozEqC0I=
|
|
||||||
github.com/eko/gocache/store/redis/v4 v4.2.1/go.mod h1:JoLkNA5yeGNQUwINAM9529cDNQCo88WwiKlO9e/+39I=
|
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s=
|
||||||
|
github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0=
|
||||||
|
github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU=
|
||||||
github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
|
github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
|
||||||
github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
|
||||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||||
@ -65,27 +49,18 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
|
|||||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||||
github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
|
github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
|
||||||
github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
|
||||||
|
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4=
|
||||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/context v1.1.2 h1:WRkNAv2uoa03QNIc1A6u4O7DAGMUVoopZhkiXWA2V1o=
|
github.com/gorilla/context v1.1.2 h1:WRkNAv2uoa03QNIc1A6u4O7DAGMUVoopZhkiXWA2V1o=
|
||||||
@ -94,75 +69,27 @@ github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kX
|
|||||||
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
|
||||||
github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY=
|
github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY=
|
||||||
github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ=
|
github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ=
|
||||||
|
github.com/gorilla/sessions v1.3.0 h1:XYlkq7KcpOB2ZhHBPv5WpjMIxrQosiZanfoy1HLZFzg=
|
||||||
|
github.com/gorilla/sessions v1.3.0/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
|
github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
|
||||||
github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
|
github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
|
||||||
github.com/hibiken/asynq v0.24.1 h1:+5iIEAyA9K/lcSPvx3qoPtsKJeKI5u9aOIvUmSsazEw=
|
|
||||||
github.com/hibiken/asynq v0.24.1/go.mod h1:u5qVeSbrnfT+vtG5Mq8ZPzQu/BmCKMHvTGb91uy9Tts=
|
|
||||||
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
||||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
|
||||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
|
||||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
|
||||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
|
||||||
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
|
||||||
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
|
||||||
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
|
||||||
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
|
|
||||||
github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
|
|
||||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
|
||||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
|
|
||||||
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
|
||||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
|
||||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
|
|
||||||
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
|
|
||||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
|
||||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
|
||||||
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
|
||||||
github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
|
||||||
github.com/jackc/pgtype v1.14.3 h1:h6W9cPuHsRWQFTWUZMAKMgG5jSwQI0Zurzdvlx3Plus=
|
|
||||||
github.com/jackc/pgtype v1.14.3/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
|
||||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
|
||||||
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
|
|
||||||
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
|
|
||||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
@ -172,25 +99,23 @@ github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0
|
|||||||
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
|
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
|
||||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
|
||||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
|
||||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
github.com/maragudk/goqite v0.2.3 h1:R8oVD6IMCQfjhCKyGIYwWxR1w8yxjvT/3uwYtA656jE=
|
||||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
github.com/maragudk/goqite v0.2.3/go.mod h1:5430TCLkycUeLE314c9fifTrTbwcJqJXdU3iyEiF6hM=
|
||||||
|
github.com/maragudk/is v0.1.0 h1:obq9anZNmOYcaNbeT0LMyjIexdNeYTw/TLAPD/BnZHA=
|
||||||
|
github.com/maragudk/is v0.1.0/go.mod h1:W/r6+TpnISu+a88OLXQy5JQGCOhXQXXLD2e5b4xMn5c=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
|
github.com/maypok86/otter v1.2.1 h1:xyvMW+t0vE1sKt/++GTkznLitEl7D/msqXkAbLwiC1M=
|
||||||
|
github.com/maypok86/otter v1.2.1/go.mod h1:mKLfoI7v1HOmQMwFgX4QkRk23mX6ge3RDvjdHOWG4R4=
|
||||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||||
@ -199,68 +124,45 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
|
|||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.1 h1:9TA9+T8+8CUCO2+WYnDLCgrYi9+omqKXyjDtosvtEhg=
|
github.com/pelletier/go-toml/v2 v2.2.1 h1:9TA9+T8+8CUCO2+WYnDLCgrYi9+omqKXyjDtosvtEhg=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.1/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
github.com/pelletier/go-toml/v2 v2.2.1/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
|
||||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
|
||||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
|
||||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
|
||||||
github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s=
|
|
||||||
github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ=
|
|
||||||
github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
|
|
||||||
github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
|
|
||||||
github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
|
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
|
||||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
|
||||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||||
|
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
|
||||||
|
github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
|
||||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
|
||||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
|
||||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
|
||||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
|
||||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
|
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||||
|
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
||||||
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
|
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
|
||||||
|
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||||
|
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
@ -270,156 +172,88 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw
|
|||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
|
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
|
||||||
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||||
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=
|
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=
|
||||||
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
|
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
|
||||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|
||||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
|
||||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
|
||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
|
||||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
|
||||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
|
||||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||||
|
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||||
|
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
|
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
|
||||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w=
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
|
||||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||||
|
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||||
|
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||||
|
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||||
|
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||||
|
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
|
||||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
|
||||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
|
||||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
|
37
pkg/db/time.go
Normal file
37
pkg/db/time.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const timeFormat = "2006-01-02 15:04:05"
|
||||||
|
|
||||||
|
type Time struct {
|
||||||
|
time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Time) Scan(value any) error {
|
||||||
|
valueStr := ""
|
||||||
|
switch s := value.(type) {
|
||||||
|
case string:
|
||||||
|
valueStr = s
|
||||||
|
case []byte:
|
||||||
|
valueStr = string(s)
|
||||||
|
default:
|
||||||
|
return errors.New("Cannot scan value into Time")
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedTime, err := time.Parse(timeFormat, valueStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Time = parsedTime
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Time) Value() (driver.Value, error) {
|
||||||
|
return t.Format(timeFormat), nil
|
||||||
|
}
|
@ -2,7 +2,8 @@ package form
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Form represents a form that can be submitted and validated
|
// Form represents a form that can be submitted and validated
|
||||||
|
@ -4,10 +4,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockForm struct {
|
type mockForm struct {
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/go-playground/validator/v10"
|
"github.com/go-playground/validator/v10"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Submission represents the state of the submission of a form, not including the form itself.
|
// Submission represents the state of the submission of a form, not including the form itself.
|
||||||
@ -103,6 +103,8 @@ func (f *Submission) setErrorMessages(err error) {
|
|||||||
message = "Enter a valid email address."
|
message = "Enter a valid email address."
|
||||||
case "eqfield":
|
case "eqfield":
|
||||||
message = "Does not match."
|
message = "Does not match."
|
||||||
|
case "gte":
|
||||||
|
message = fmt.Sprintf("Must be greater than or equal to %v.", ve.Param())
|
||||||
default:
|
default:
|
||||||
message = "Invalid value."
|
message = "Invalid value."
|
||||||
}
|
}
|
||||||
|
@ -8,9 +8,10 @@ import (
|
|||||||
|
|
||||||
"github.com/go-playground/validator/v10"
|
"github.com/go-playground/validator/v10"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFormSubmission(t *testing.T) {
|
func TestFormSubmission(t *testing.T) {
|
||||||
|
@ -9,13 +9,12 @@ import (
|
|||||||
"github.com/Masterminds/sprig"
|
"github.com/Masterminds/sprig"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/labstack/gommon/random"
|
"github.com/labstack/gommon/random"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// CacheBuster stores a random string used as a cache buster for static files.
|
// CacheBuster stores a random string used as a cache buster for static files.
|
||||||
CacheBuster = random.String(10)
|
var CacheBuster = random.String(10)
|
||||||
)
|
|
||||||
|
|
||||||
type funcMap struct {
|
type funcMap struct {
|
||||||
web *echo.Echo
|
web *echo.Echo
|
||||||
@ -54,6 +53,10 @@ func (fm *funcMap) file(filepath string) string {
|
|||||||
return fmt.Sprintf("/%s/%s?v=%s", config.StaticPrefix, filepath, CacheBuster)
|
return fmt.Sprintf("/%s/%s?v=%s", config.StaticPrefix, filepath, CacheBuster)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func File(filepath string) string {
|
||||||
|
return fmt.Sprintf("/%s/%s?v=%s", config.StaticPrefix, filepath, CacheBuster)
|
||||||
|
}
|
||||||
|
|
||||||
// link outputs HTML for a link element, providing the ability to dynamically set the active class
|
// link outputs HTML for a link element, providing the ability to dynamically set the active class
|
||||||
func (fm *funcMap) link(url, text, currentPath string, classes ...string) template.HTML {
|
func (fm *funcMap) link(url, text, currentPath string, classes ...string) template.HTML {
|
||||||
if currentPath == url {
|
if currentPath == url {
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewFuncMap(t *testing.T) {
|
func TestNewFuncMap(t *testing.T) {
|
||||||
|
@ -1,22 +1,23 @@
|
|||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-playground/validator/v10"
|
"github.com/go-playground/validator/v10"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/form"
|
||||||
"github.com/mikestefanello/pagoda/pkg/form"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/middleware"
|
||||||
"github.com/mikestefanello/pagoda/pkg/middleware"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
"github.com/mikestefanello/pagoda/pkg/msg"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/msg"
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
"github.com/mikestefanello/pagoda/pkg/redirect"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/redirect"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -36,7 +37,7 @@ type (
|
|||||||
Auth struct {
|
Auth struct {
|
||||||
auth *services.AuthClient
|
auth *services.AuthClient
|
||||||
mail *services.MailClient
|
mail *services.MailClient
|
||||||
orm *ent.Client
|
db *services.DBClient
|
||||||
*services.TemplateRenderer
|
*services.TemplateRenderer
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +73,7 @@ func init() {
|
|||||||
|
|
||||||
func (h *Auth) Init(c *services.Container) error {
|
func (h *Auth) Init(c *services.Container) error {
|
||||||
h.TemplateRenderer = c.TemplateRenderer
|
h.TemplateRenderer = c.TemplateRenderer
|
||||||
h.orm = c.ORM
|
h.db = c.DB
|
||||||
h.auth = c.Auth
|
h.auth = c.Auth
|
||||||
h.mail = c.Mail
|
h.mail = c.Mail
|
||||||
return nil
|
return nil
|
||||||
@ -91,7 +92,7 @@ func (h *Auth) Routes(g *echo.Group) {
|
|||||||
noAuth.POST("/password", h.ForgotPasswordSubmit).Name = routeNameForgotPasswordSubmit
|
noAuth.POST("/password", h.ForgotPasswordSubmit).Name = routeNameForgotPasswordSubmit
|
||||||
|
|
||||||
resetGroup := noAuth.Group("/password/reset",
|
resetGroup := noAuth.Group("/password/reset",
|
||||||
middleware.LoadUser(h.orm),
|
middleware.LoadUser(h.db),
|
||||||
middleware.LoadValidPasswordToken(h.auth),
|
middleware.LoadValidPasswordToken(h.auth),
|
||||||
)
|
)
|
||||||
resetGroup.GET("/token/:user/:password_token/:token", h.ResetPasswordPage).Name = routeNameResetPassword
|
resetGroup.GET("/token/:user/:password_token/:token", h.ResetPasswordPage).Name = routeNameResetPassword
|
||||||
@ -128,16 +129,10 @@ func (h *Auth) ForgotPasswordSubmit(ctx echo.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to load the user
|
// Attempt to load the user
|
||||||
u, err := h.orm.User.
|
u, err := h.db.C.GetUserByEmail(ctx.Request().Context(), strings.ToLower(input.Email))
|
||||||
Query().
|
if err == sql.ErrNoRows {
|
||||||
Where(user.Email(strings.ToLower(input.Email))).
|
|
||||||
Only(ctx.Request().Context())
|
|
||||||
|
|
||||||
switch err.(type) {
|
|
||||||
case *ent.NotFoundError:
|
|
||||||
return succeed()
|
return succeed()
|
||||||
case nil:
|
} else if err != nil {
|
||||||
default:
|
|
||||||
return fail(err, "error querying user during forgot password")
|
return fail(err, "error querying user during forgot password")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,7 +154,6 @@ func (h *Auth) ForgotPasswordSubmit(ctx echo.Context) error {
|
|||||||
Subject("Reset your password").
|
Subject("Reset your password").
|
||||||
Body(fmt.Sprintf("Go here to reset your password: %s", url)).
|
Body(fmt.Sprintf("Go here to reset your password: %s", url)).
|
||||||
Send(ctx)
|
Send(ctx)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fail(err, "error sending password reset email")
|
return fail(err, "error sending password reset email")
|
||||||
}
|
}
|
||||||
@ -198,16 +192,10 @@ func (h *Auth) LoginSubmit(ctx echo.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to load the user
|
// Attempt to load the user
|
||||||
u, err := h.orm.User.
|
u, err := h.db.C.GetUserByEmail(ctx.Request().Context(), strings.ToLower(input.Email))
|
||||||
Query().
|
if err == sql.ErrNoRows {
|
||||||
Where(user.Email(strings.ToLower(input.Email))).
|
|
||||||
Only(ctx.Request().Context())
|
|
||||||
|
|
||||||
switch err.(type) {
|
|
||||||
case *ent.NotFoundError:
|
|
||||||
return authFailed()
|
return authFailed()
|
||||||
case nil:
|
} else if err != nil {
|
||||||
default:
|
|
||||||
return fail(err, "error querying user during login")
|
return fail(err, "error querying user during login")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -271,28 +259,27 @@ func (h *Auth) RegisterSubmit(ctx echo.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt creating the user
|
// Attempt creating the user
|
||||||
u, err := h.orm.User.
|
u, err := h.db.C.CreateUser(ctx.Request().Context(), sqlc.CreateUserParams{
|
||||||
Create().
|
Name: input.Name,
|
||||||
SetName(input.Name).
|
Email: input.Email,
|
||||||
SetEmail(input.Email).
|
Password: pwHash,
|
||||||
SetPassword(pwHash).
|
})
|
||||||
Save(ctx.Request().Context())
|
if err != nil {
|
||||||
|
if strings.HasPrefix(err.Error(), "UNIQUE constraint failed") {
|
||||||
switch err.(type) {
|
|
||||||
case nil:
|
|
||||||
log.Ctx(ctx).Info("user created",
|
|
||||||
"user_name", u.Name,
|
|
||||||
"user_id", u.ID,
|
|
||||||
)
|
|
||||||
case *ent.ConstraintError:
|
|
||||||
msg.Warning(ctx, "A user with this email address already exists. Please log in.")
|
msg.Warning(ctx, "A user with this email address already exists. Please log in.")
|
||||||
return redirect.New(ctx).
|
return redirect.New(ctx).
|
||||||
Route(routeNameLogin).
|
Route(routeNameLogin).
|
||||||
Go()
|
Go()
|
||||||
default:
|
}
|
||||||
|
|
||||||
return fail(err, "unable to create user")
|
return fail(err, "unable to create user")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Ctx(ctx).Info("user created",
|
||||||
|
"user_name", u.Name,
|
||||||
|
"user_id", u.ID,
|
||||||
|
)
|
||||||
|
|
||||||
// Log the user in
|
// Log the user in
|
||||||
err = h.auth.Login(ctx, u.ID)
|
err = h.auth.Login(ctx, u.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -316,7 +303,7 @@ func (h *Auth) RegisterSubmit(ctx echo.Context) error {
|
|||||||
Go()
|
Go()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Auth) sendVerificationEmail(ctx echo.Context, usr *ent.User) {
|
func (h *Auth) sendVerificationEmail(ctx echo.Context, usr sqlc.User) {
|
||||||
// Generate a token
|
// Generate a token
|
||||||
token, err := h.auth.GenerateEmailVerificationToken(usr.Email)
|
token, err := h.auth.GenerateEmailVerificationToken(usr.Email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -335,7 +322,6 @@ func (h *Auth) sendVerificationEmail(ctx echo.Context, usr *ent.User) {
|
|||||||
Subject("Confirm your email address").
|
Subject("Confirm your email address").
|
||||||
Body(fmt.Sprintf("Click here to confirm your email address: %s", url)).
|
Body(fmt.Sprintf("Click here to confirm your email address: %s", url)).
|
||||||
Send(ctx)
|
Send(ctx)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Error("unable to send email verification link",
|
log.Ctx(ctx).Error("unable to send email verification link",
|
||||||
"user_id", usr.ID,
|
"user_id", usr.ID,
|
||||||
@ -377,14 +363,13 @@ func (h *Auth) ResetPasswordSubmit(ctx echo.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the requesting user
|
// Get the requesting user
|
||||||
usr := ctx.Get(context.UserKey).(*ent.User)
|
usr := ctx.Get(context.UserKey).(*sqlc.User)
|
||||||
|
|
||||||
// Update the user
|
// Update the user
|
||||||
_, err = usr.
|
err = h.db.C.UpdateUserPassword(ctx.Request().Context(), sqlc.UpdateUserPasswordParams{
|
||||||
Update().
|
Password: hash,
|
||||||
SetPassword(hash).
|
ID: usr.ID,
|
||||||
Save(ctx.Request().Context())
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fail(err, "unable to update password")
|
return fail(err, "unable to update password")
|
||||||
}
|
}
|
||||||
@ -402,9 +387,6 @@ func (h *Auth) ResetPasswordSubmit(ctx echo.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *Auth) VerifyEmail(ctx echo.Context) error {
|
func (h *Auth) VerifyEmail(ctx echo.Context) error {
|
||||||
var usr *ent.User
|
|
||||||
|
|
||||||
// Validate the token
|
|
||||||
token := ctx.Param("token")
|
token := ctx.Param("token")
|
||||||
email, err := h.auth.ValidateEmailVerificationToken(token)
|
email, err := h.auth.ValidateEmailVerificationToken(token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -414,38 +396,10 @@ func (h *Auth) VerifyEmail(ctx echo.Context) error {
|
|||||||
Go()
|
Go()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if it matches the authenticated user
|
err = h.db.C.UpdateUserSetVerified(ctx.Request().Context(), email)
|
||||||
if u := ctx.Get(context.AuthenticatedUserKey); u != nil {
|
|
||||||
authUser := u.(*ent.User)
|
|
||||||
|
|
||||||
if authUser.Email == email {
|
|
||||||
usr = authUser
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query to find a matching user, if needed
|
|
||||||
if usr == nil {
|
|
||||||
usr, err = h.orm.User.
|
|
||||||
Query().
|
|
||||||
Where(user.Email(email)).
|
|
||||||
Only(ctx.Request().Context())
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fail(err, "query failed loading email verification token user")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the user, if needed
|
|
||||||
if !usr.Verified {
|
|
||||||
usr, err = usr.
|
|
||||||
Update().
|
|
||||||
SetVerified(true).
|
|
||||||
Save(ctx.Request().Context())
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fail(err, "failed to set user as verified")
|
return fail(err, "failed to set user as verified")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
msg.Success(ctx, "Your email has been successfully verified.")
|
msg.Success(ctx, "Your email has been successfully verified.")
|
||||||
return redirect.New(ctx).
|
return redirect.New(ctx).
|
||||||
|
93
pkg/handlers/cache.go
Normal file
93
pkg/handlers/cache.go
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/form"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
routeNameCache = "cache"
|
||||||
|
routeNameCacheSubmit = "cache.submit"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Cache struct {
|
||||||
|
cache *services.CacheClient
|
||||||
|
*services.TemplateRenderer
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheForm struct {
|
||||||
|
Value string `form:"value"`
|
||||||
|
form.Submission
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Register(new(Cache))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Cache) Init(c *services.Container) error {
|
||||||
|
h.TemplateRenderer = c.TemplateRenderer
|
||||||
|
h.cache = c.Cache
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Cache) Routes(g *echo.Group) {
|
||||||
|
g.GET("/cache", h.Page).Name = routeNameCache
|
||||||
|
g.POST("/cache", h.Submit).Name = routeNameCacheSubmit
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Cache) Page(ctx echo.Context) error {
|
||||||
|
p := page.New(ctx)
|
||||||
|
p.Layout = templates.LayoutMain
|
||||||
|
p.Name = templates.PageCache
|
||||||
|
p.Title = "Set a cache entry"
|
||||||
|
p.Form = form.Get[cacheForm](ctx)
|
||||||
|
|
||||||
|
// Fetch the value from the cache
|
||||||
|
value, err := h.cache.
|
||||||
|
Get().
|
||||||
|
Key("page_cache_example").
|
||||||
|
Fetch(ctx.Request().Context())
|
||||||
|
|
||||||
|
// Store the value in the page, so it can be rendered, if found
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
p.Data = value.(string)
|
||||||
|
case errors.Is(err, services.ErrCacheMiss):
|
||||||
|
default:
|
||||||
|
return fail(err, "failed to fetch from cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.RenderPage(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Cache) Submit(ctx echo.Context) error {
|
||||||
|
var input cacheForm
|
||||||
|
|
||||||
|
if err := form.Submit(ctx, &input); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the cache
|
||||||
|
err := h.cache.
|
||||||
|
Set().
|
||||||
|
Key("page_cache_example").
|
||||||
|
Data(input.Value).
|
||||||
|
Expiration(30 * time.Minute).
|
||||||
|
Save(ctx.Request().Context())
|
||||||
|
if err != nil {
|
||||||
|
return fail(err, "unable to set cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
form.Clear(ctx)
|
||||||
|
|
||||||
|
return h.Page(ctx)
|
||||||
|
}
|
@ -5,10 +5,11 @@ import (
|
|||||||
|
|
||||||
"github.com/go-playground/validator/v10"
|
"github.com/go-playground/validator/v10"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/form"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/form"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -74,7 +75,6 @@ func (h *Contact) Submit(ctx echo.Context) error {
|
|||||||
Subject("Contact form submitted").
|
Subject("Contact form submitted").
|
||||||
Body(fmt.Sprintf("The message is: %s", input.Message)).
|
Body(fmt.Sprintf("The message is: %s", input.Message)).
|
||||||
Send(ctx)
|
Send(ctx)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fail(err, "unable to send email")
|
return fail(err, "unable to send email")
|
||||||
}
|
}
|
||||||
|
@ -4,11 +4,12 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
|
@ -5,7 +5,8 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
var handlers []Handler
|
var handlers []Handler
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"html/template"
|
"html/template"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templ/pages"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -18,11 +19,7 @@ const (
|
|||||||
type (
|
type (
|
||||||
Pages struct {
|
Pages struct {
|
||||||
*services.TemplateRenderer
|
*services.TemplateRenderer
|
||||||
}
|
*services.DBClient
|
||||||
|
|
||||||
post struct {
|
|
||||||
Title string
|
|
||||||
Body string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
aboutData struct {
|
aboutData struct {
|
||||||
@ -43,6 +40,7 @@ func init() {
|
|||||||
|
|
||||||
func (h *Pages) Init(c *services.Container) error {
|
func (h *Pages) Init(c *services.Container) error {
|
||||||
h.TemplateRenderer = c.TemplateRenderer
|
h.TemplateRenderer = c.TemplateRenderer
|
||||||
|
h.DBClient = c.DB
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,23 +56,11 @@ func (h *Pages) Home(ctx echo.Context) error {
|
|||||||
p.Metatags.Description = "Welcome to the homepage."
|
p.Metatags.Description = "Welcome to the homepage."
|
||||||
p.Metatags.Keywords = []string{"Go", "MVC", "Web", "Software"}
|
p.Metatags.Keywords = []string{"Go", "MVC", "Web", "Software"}
|
||||||
p.Pager = page.NewPager(ctx, 4)
|
p.Pager = page.NewPager(ctx, 4)
|
||||||
p.Data = h.fetchPosts(&p.Pager)
|
|
||||||
|
|
||||||
return h.RenderPage(ctx, p)
|
data := h.Post.FetchAll(&p.Pager)
|
||||||
}
|
component := pages.Home(p, data)
|
||||||
|
|
||||||
// fetchPosts is an mock example of fetching posts to illustrate how paging works
|
return h.RenderPageTempl(ctx, p, component)
|
||||||
func (h *Pages) fetchPosts(pager *page.Pager) []post {
|
|
||||||
pager.SetItems(20)
|
|
||||||
posts := make([]post, 20)
|
|
||||||
|
|
||||||
for k := range posts {
|
|
||||||
posts[k] = post{
|
|
||||||
Title: fmt.Sprintf("Post example #%d", k+1),
|
|
||||||
Body: fmt.Sprintf("Lorem ipsum example #%d ddolor sit amet, consectetur adipiscing elit. Nam elementum vulputate tristique.", k+1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return posts[pager.GetOffset() : pager.GetOffset()+pager.ItemsPerPage]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Pages) About(ctx echo.Context) error {
|
func (h *Pages) About(ctx echo.Context) error {
|
||||||
|
@ -5,9 +5,10 @@ import (
|
|||||||
|
|
||||||
"github.com/gorilla/sessions"
|
"github.com/gorilla/sessions"
|
||||||
echomw "github.com/labstack/echo/v4/middleware"
|
echomw "github.com/labstack/echo/v4/middleware"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/middleware"
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/middleware"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BuildRouter builds the router
|
// BuildRouter builds the router
|
||||||
|
@ -8,13 +8,12 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
|
||||||
|
|
||||||
"github.com/PuerkitoBio/goquery"
|
"github.com/PuerkitoBio/goquery"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -5,9 +5,10 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
const routeNameSearch = "search"
|
const routeNameSearch = "search"
|
||||||
|
88
pkg/handlers/task.go
Normal file
88
pkg/handlers/task.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-playground/validator/v10"
|
||||||
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/form"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/msg"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tasks"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
routeNameTask = "task"
|
||||||
|
routeNameTaskSubmit = "task.submit"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Task struct {
|
||||||
|
tasks *services.TaskClient
|
||||||
|
*services.TemplateRenderer
|
||||||
|
}
|
||||||
|
|
||||||
|
taskForm struct {
|
||||||
|
Delay int `form:"delay" validate:"gte=0"`
|
||||||
|
Message string `form:"message" validate:"required"`
|
||||||
|
form.Submission
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Register(new(Task))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Task) Init(c *services.Container) error {
|
||||||
|
h.TemplateRenderer = c.TemplateRenderer
|
||||||
|
h.tasks = c.Tasks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Task) Routes(g *echo.Group) {
|
||||||
|
g.GET("/task", h.Page).Name = routeNameTask
|
||||||
|
g.POST("/task", h.Submit).Name = routeNameTaskSubmit
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Task) Page(ctx echo.Context) error {
|
||||||
|
p := page.New(ctx)
|
||||||
|
p.Layout = templates.LayoutMain
|
||||||
|
p.Name = templates.PageTask
|
||||||
|
p.Title = "Create a task"
|
||||||
|
p.Form = form.Get[taskForm](ctx)
|
||||||
|
|
||||||
|
return h.RenderPage(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Task) Submit(ctx echo.Context) error {
|
||||||
|
var input taskForm
|
||||||
|
|
||||||
|
err := form.Submit(ctx, &input)
|
||||||
|
|
||||||
|
switch err.(type) {
|
||||||
|
case nil:
|
||||||
|
case validator.ValidationErrors:
|
||||||
|
return h.Page(ctx)
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert the task
|
||||||
|
err = h.tasks.New(tasks.ExampleTask{
|
||||||
|
Message: input.Message,
|
||||||
|
}).
|
||||||
|
Wait(time.Duration(input.Delay) * time.Second).
|
||||||
|
Save()
|
||||||
|
if err != nil {
|
||||||
|
return fail(err, "unable to create a task")
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Success(ctx, fmt.Sprintf("The task has been created. Check the logs in %d seconds.", input.Delay))
|
||||||
|
form.Clear(ctx)
|
||||||
|
|
||||||
|
return h.Page(ctx)
|
||||||
|
}
|
@ -6,19 +6,25 @@ import (
|
|||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Headers (https://htmx.org/docs/#requests)
|
// Request headers: https://htmx.org/docs/#request-headers
|
||||||
const (
|
const (
|
||||||
HeaderRequest = "HX-Request"
|
|
||||||
HeaderBoosted = "HX-Boosted"
|
HeaderBoosted = "HX-Boosted"
|
||||||
|
HeaderHistoryRestoreRequest = "HX-History-Restore-Request"
|
||||||
|
HeaderPrompt = "HX-Prompt"
|
||||||
|
HeaderRequest = "HX-Request"
|
||||||
|
HeaderTarget = "HX-Target"
|
||||||
HeaderTrigger = "HX-Trigger"
|
HeaderTrigger = "HX-Trigger"
|
||||||
HeaderTriggerName = "HX-Trigger-Name"
|
HeaderTriggerName = "HX-Trigger-Name"
|
||||||
HeaderTriggerAfterSwap = "HX-Trigger-After-Swap"
|
)
|
||||||
HeaderTriggerAfterSettle = "HX-Trigger-After-Settle"
|
|
||||||
HeaderTarget = "HX-Target"
|
// Response headers: https://htmx.org/docs/#response-headers
|
||||||
HeaderPrompt = "HX-Prompt"
|
const (
|
||||||
HeaderPush = "HX-Push"
|
HeaderPushURL = "HX-Push-Url"
|
||||||
HeaderRedirect = "HX-Redirect"
|
HeaderRedirect = "HX-Redirect"
|
||||||
|
HeaderReplaceURL = "HX-Replace-Url"
|
||||||
HeaderRefresh = "HX-Refresh"
|
HeaderRefresh = "HX-Refresh"
|
||||||
|
HeaderTriggerAfterSettle = "HX-Trigger-After-Settle"
|
||||||
|
HeaderTriggerAfterSwap = "HX-Trigger-After-Swap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -26,6 +32,7 @@ type (
|
|||||||
Request struct {
|
Request struct {
|
||||||
Enabled bool
|
Enabled bool
|
||||||
Boosted bool
|
Boosted bool
|
||||||
|
HistoryRestore bool
|
||||||
Trigger string
|
Trigger string
|
||||||
TriggerName string
|
TriggerName string
|
||||||
Target string
|
Target string
|
||||||
@ -34,9 +41,10 @@ type (
|
|||||||
|
|
||||||
// Response contain data that the server can communicate back to HTMX
|
// Response contain data that the server can communicate back to HTMX
|
||||||
Response struct {
|
Response struct {
|
||||||
Push string
|
PushURL string
|
||||||
Redirect string
|
Redirect string
|
||||||
Refresh bool
|
Refresh bool
|
||||||
|
ReplaceURL string
|
||||||
Trigger string
|
Trigger string
|
||||||
TriggerAfterSwap string
|
TriggerAfterSwap string
|
||||||
TriggerAfterSettle string
|
TriggerAfterSettle string
|
||||||
@ -53,13 +61,14 @@ func GetRequest(ctx echo.Context) Request {
|
|||||||
TriggerName: ctx.Request().Header.Get(HeaderTriggerName),
|
TriggerName: ctx.Request().Header.Get(HeaderTriggerName),
|
||||||
Target: ctx.Request().Header.Get(HeaderTarget),
|
Target: ctx.Request().Header.Get(HeaderTarget),
|
||||||
Prompt: ctx.Request().Header.Get(HeaderPrompt),
|
Prompt: ctx.Request().Header.Get(HeaderPrompt),
|
||||||
|
HistoryRestore: ctx.Request().Header.Get(HeaderHistoryRestoreRequest) == "true",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply applies data from a Response to a server response
|
// Apply applies data from a Response to a server response
|
||||||
func (r Response) Apply(ctx echo.Context) {
|
func (r Response) Apply(ctx echo.Context) {
|
||||||
if r.Push != "" {
|
if r.PushURL != "" {
|
||||||
ctx.Response().Header().Set(HeaderPush, r.Push)
|
ctx.Response().Header().Set(HeaderPushURL, r.PushURL)
|
||||||
}
|
}
|
||||||
if r.Redirect != "" {
|
if r.Redirect != "" {
|
||||||
ctx.Response().Header().Set(HeaderRedirect, r.Redirect)
|
ctx.Response().Header().Set(HeaderRedirect, r.Redirect)
|
||||||
@ -76,6 +85,9 @@ func (r Response) Apply(ctx echo.Context) {
|
|||||||
if r.TriggerAfterSettle != "" {
|
if r.TriggerAfterSettle != "" {
|
||||||
ctx.Response().Header().Set(HeaderTriggerAfterSettle, r.TriggerAfterSettle)
|
ctx.Response().Header().Set(HeaderTriggerAfterSettle, r.TriggerAfterSettle)
|
||||||
}
|
}
|
||||||
|
if r.ReplaceURL != "" {
|
||||||
|
ctx.Response().Header().Set(HeaderReplaceURL, r.ReplaceURL)
|
||||||
|
}
|
||||||
if r.NoContent {
|
if r.NoContent {
|
||||||
ctx.Response().Status = http.StatusNoContent
|
ctx.Response().Status = http.StatusNoContent
|
||||||
}
|
}
|
||||||
|
@ -4,11 +4,10 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSetRequest(t *testing.T) {
|
func TestSetRequest(t *testing.T) {
|
||||||
@ -19,10 +18,12 @@ func TestSetRequest(t *testing.T) {
|
|||||||
ctx.Request().Header.Set(HeaderTriggerName, "b")
|
ctx.Request().Header.Set(HeaderTriggerName, "b")
|
||||||
ctx.Request().Header.Set(HeaderTarget, "c")
|
ctx.Request().Header.Set(HeaderTarget, "c")
|
||||||
ctx.Request().Header.Set(HeaderPrompt, "d")
|
ctx.Request().Header.Set(HeaderPrompt, "d")
|
||||||
|
ctx.Request().Header.Set(HeaderHistoryRestoreRequest, "true")
|
||||||
|
|
||||||
r := GetRequest(ctx)
|
r := GetRequest(ctx)
|
||||||
assert.Equal(t, true, r.Enabled)
|
assert.Equal(t, true, r.Enabled)
|
||||||
assert.Equal(t, true, r.Boosted)
|
assert.Equal(t, true, r.Boosted)
|
||||||
|
assert.Equal(t, true, r.HistoryRestore)
|
||||||
assert.Equal(t, "a", r.Trigger)
|
assert.Equal(t, "a", r.Trigger)
|
||||||
assert.Equal(t, "b", r.TriggerName)
|
assert.Equal(t, "b", r.TriggerName)
|
||||||
assert.Equal(t, "c", r.Target)
|
assert.Equal(t, "c", r.Target)
|
||||||
@ -32,8 +33,9 @@ func TestSetRequest(t *testing.T) {
|
|||||||
func TestResponse_Apply(t *testing.T) {
|
func TestResponse_Apply(t *testing.T) {
|
||||||
ctx, _ := tests.NewContext(echo.New(), "/")
|
ctx, _ := tests.NewContext(echo.New(), "/")
|
||||||
r := Response{
|
r := Response{
|
||||||
Push: "a",
|
PushURL: "a",
|
||||||
Redirect: "b",
|
Redirect: "b",
|
||||||
|
ReplaceURL: "f",
|
||||||
Refresh: true,
|
Refresh: true,
|
||||||
Trigger: "c",
|
Trigger: "c",
|
||||||
TriggerAfterSwap: "d",
|
TriggerAfterSwap: "d",
|
||||||
@ -42,11 +44,12 @@ func TestResponse_Apply(t *testing.T) {
|
|||||||
}
|
}
|
||||||
r.Apply(ctx)
|
r.Apply(ctx)
|
||||||
|
|
||||||
assert.Equal(t, "a", ctx.Response().Header().Get(HeaderPush))
|
assert.Equal(t, "a", ctx.Response().Header().Get(HeaderPushURL))
|
||||||
assert.Equal(t, "b", ctx.Response().Header().Get(HeaderRedirect))
|
assert.Equal(t, "b", ctx.Response().Header().Get(HeaderRedirect))
|
||||||
assert.Equal(t, "true", ctx.Response().Header().Get(HeaderRefresh))
|
assert.Equal(t, "true", ctx.Response().Header().Get(HeaderRefresh))
|
||||||
assert.Equal(t, "c", ctx.Response().Header().Get(HeaderTrigger))
|
assert.Equal(t, "c", ctx.Response().Header().Get(HeaderTrigger))
|
||||||
assert.Equal(t, "d", ctx.Response().Header().Get(HeaderTriggerAfterSwap))
|
assert.Equal(t, "d", ctx.Response().Header().Get(HeaderTriggerAfterSwap))
|
||||||
assert.Equal(t, "e", ctx.Response().Header().Get(HeaderTriggerAfterSettle))
|
assert.Equal(t, "e", ctx.Response().Header().Get(HeaderTriggerAfterSettle))
|
||||||
|
assert.Equal(t, "f", ctx.Response().Header().Get(HeaderReplaceURL))
|
||||||
assert.Equal(t, http.StatusNoContent, ctx.Response().Status)
|
assert.Equal(t, http.StatusNoContent, ctx.Response().Status)
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,8 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Set sets a logger in the context
|
// Set sets a logger in the context
|
||||||
|
@ -4,8 +4,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCtxSet(t *testing.T) {
|
func TestCtxSet(t *testing.T) {
|
||||||
|
@ -1,17 +1,17 @@
|
|||||||
package middleware
|
package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/msg"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/msg"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadAuthenticatedUser loads the authenticated user, if one, and stores in context
|
// LoadAuthenticatedUser loads the authenticated user, if one, and stores in context
|
||||||
@ -19,17 +19,13 @@ func LoadAuthenticatedUser(authClient *services.AuthClient) echo.MiddlewareFunc
|
|||||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
return func(c echo.Context) error {
|
return func(c echo.Context) error {
|
||||||
u, err := authClient.GetAuthenticatedUser(c)
|
u, err := authClient.GetAuthenticatedUser(c)
|
||||||
switch err.(type) {
|
if err != nil && !errors.Is(err, services.NotAuthenticatedError{}) {
|
||||||
case *ent.NotFoundError:
|
|
||||||
log.Ctx(c).Warn("auth user not found")
|
|
||||||
case services.NotAuthenticatedError:
|
|
||||||
case nil:
|
|
||||||
c.Set(context.AuthenticatedUserKey, u)
|
|
||||||
default:
|
|
||||||
return echo.NewHTTPError(
|
return echo.NewHTTPError(
|
||||||
http.StatusInternalServerError,
|
http.StatusInternalServerError,
|
||||||
fmt.Sprintf("error querying for authenticated user: %v", err),
|
fmt.Sprintf("error querying for authenticated user: %v", err),
|
||||||
)
|
)
|
||||||
|
} else if u != nil {
|
||||||
|
c.Set(context.AuthenticatedUserKey, u)
|
||||||
}
|
}
|
||||||
|
|
||||||
return next(c)
|
return next(c)
|
||||||
@ -48,7 +44,7 @@ func LoadValidPasswordToken(authClient *services.AuthClient) echo.MiddlewareFunc
|
|||||||
if c.Get(context.UserKey) == nil {
|
if c.Get(context.UserKey) == nil {
|
||||||
return echo.NewHTTPError(http.StatusInternalServerError)
|
return echo.NewHTTPError(http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
usr := c.Get(context.UserKey).(*ent.User)
|
usr := c.Get(context.UserKey).(*sqlc.User)
|
||||||
|
|
||||||
// Extract the token ID
|
// Extract the token ID
|
||||||
tokenID, err := strconv.Atoi(c.Param("password_token"))
|
tokenID, err := strconv.Atoi(c.Param("password_token"))
|
||||||
|
@ -5,13 +5,12 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadAuthenticatedUser(t *testing.T) {
|
func TestLoadAuthenticatedUser(t *testing.T) {
|
||||||
@ -30,7 +29,7 @@ func TestLoadAuthenticatedUser(t *testing.T) {
|
|||||||
// Verify the midldeware returns the authenticated user
|
// Verify the midldeware returns the authenticated user
|
||||||
_ = tests.ExecuteMiddleware(ctx, mw)
|
_ = tests.ExecuteMiddleware(ctx, mw)
|
||||||
require.NotNil(t, ctx.Get(context.AuthenticatedUserKey))
|
require.NotNil(t, ctx.Get(context.AuthenticatedUserKey))
|
||||||
ctxUsr, ok := ctx.Get(context.AuthenticatedUserKey).(*ent.User)
|
ctxUsr, ok := ctx.Get(context.AuthenticatedUserKey).(*sqlc.User)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, usr.ID, ctxUsr.ID)
|
assert.Equal(t, usr.ID, ctxUsr.ID)
|
||||||
}
|
}
|
||||||
@ -82,7 +81,7 @@ func TestLoadValidPasswordToken(t *testing.T) {
|
|||||||
// Add user and password token context but no token and expect a redirect
|
// Add user and password token context but no token and expect a redirect
|
||||||
ctx.SetParamNames("user", "password_token")
|
ctx.SetParamNames("user", "password_token")
|
||||||
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID), "1")
|
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID), "1")
|
||||||
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.ORM))
|
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.DB))
|
||||||
err = tests.ExecuteMiddleware(ctx, LoadValidPasswordToken(c.Auth))
|
err = tests.ExecuteMiddleware(ctx, LoadValidPasswordToken(c.Auth))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, http.StatusFound, ctx.Response().Status)
|
assert.Equal(t, http.StatusFound, ctx.Response().Status)
|
||||||
@ -90,7 +89,7 @@ func TestLoadValidPasswordToken(t *testing.T) {
|
|||||||
// Add user context and invalid password token and expect a redirect
|
// Add user context and invalid password token and expect a redirect
|
||||||
ctx.SetParamNames("user", "password_token", "token")
|
ctx.SetParamNames("user", "password_token", "token")
|
||||||
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID), "1", "faketoken")
|
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID), "1", "faketoken")
|
||||||
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.ORM))
|
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.DB))
|
||||||
err = tests.ExecuteMiddleware(ctx, LoadValidPasswordToken(c.Auth))
|
err = tests.ExecuteMiddleware(ctx, LoadValidPasswordToken(c.Auth))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, http.StatusFound, ctx.Response().Status)
|
assert.Equal(t, http.StatusFound, ctx.Response().Status)
|
||||||
@ -102,10 +101,10 @@ func TestLoadValidPasswordToken(t *testing.T) {
|
|||||||
// Add user and valid password token
|
// Add user and valid password token
|
||||||
ctx.SetParamNames("user", "password_token", "token")
|
ctx.SetParamNames("user", "password_token", "token")
|
||||||
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID), fmt.Sprintf("%d", pt.ID), token)
|
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID), fmt.Sprintf("%d", pt.ID), token)
|
||||||
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.ORM))
|
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.DB))
|
||||||
err = tests.ExecuteMiddleware(ctx, LoadValidPasswordToken(c.Auth))
|
err = tests.ExecuteMiddleware(ctx, LoadValidPasswordToken(c.Auth))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
ctxPt, ok := ctx.Get(context.PasswordTokenKey).(*ent.PasswordToken)
|
ctxPt, ok := ctx.Get(context.PasswordTokenKey).(*sqlc.PasswordToken)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, pt.ID, ctxPt.ID)
|
assert.Equal(t, pt.ID, ctxPt.ID)
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,11 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
|
||||||
|
|
||||||
libstore "github.com/eko/gocache/lib/v4/store"
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServeCachedPage attempts to load a page from the cache by matching on the complete request URL
|
// ServeCachedPage attempts to load a page from the cache by matching on the complete request URL
|
||||||
@ -32,10 +31,9 @@ func ServeCachedPage(t *services.TemplateRenderer) echo.MiddlewareFunc {
|
|||||||
|
|
||||||
// Attempt to load from cache
|
// Attempt to load from cache
|
||||||
page, err := t.GetCachedPage(ctx, ctx.Request().URL.String())
|
page, err := t.GetCachedPage(ctx, ctx.Request().URL.String())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, &libstore.NotFound{}):
|
case errors.Is(err, services.ErrCacheMiss):
|
||||||
case context.IsCanceledError(err):
|
case context.IsCanceledError(err):
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
|
@ -5,13 +5,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestServeCachedPage(t *testing.T) {
|
func TestServeCachedPage(t *testing.T) {
|
||||||
|
@ -1,19 +1,19 @@
|
|||||||
package middleware
|
package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadUser loads the user based on the ID provided as a path parameter
|
// LoadUser loads the user based on the ID provided as a path parameter
|
||||||
func LoadUser(orm *ent.Client) echo.MiddlewareFunc {
|
func LoadUser(db *services.DBClient) echo.MiddlewareFunc {
|
||||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
return func(c echo.Context) error {
|
return func(c echo.Context) error {
|
||||||
userID, err := strconv.Atoi(c.Param("user"))
|
userID, err := strconv.Atoi(c.Param("user"))
|
||||||
@ -21,18 +21,16 @@ func LoadUser(orm *ent.Client) echo.MiddlewareFunc {
|
|||||||
return echo.NewHTTPError(http.StatusNotFound)
|
return echo.NewHTTPError(http.StatusNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := orm.User.
|
u, err := db.C.GetUserByID(c.Request().Context(), userID)
|
||||||
Query().
|
if err == nil {
|
||||||
Where(user.ID(userID)).
|
c.Set(context.UserKey, &u)
|
||||||
Only(c.Request().Context())
|
|
||||||
|
|
||||||
switch err.(type) {
|
|
||||||
case nil:
|
|
||||||
c.Set(context.UserKey, u)
|
|
||||||
return next(c)
|
return next(c)
|
||||||
case *ent.NotFoundError:
|
}
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
return echo.NewHTTPError(http.StatusNotFound)
|
return echo.NewHTTPError(http.StatusNotFound)
|
||||||
default:
|
}
|
||||||
|
|
||||||
return echo.NewHTTPError(
|
return echo.NewHTTPError(
|
||||||
http.StatusInternalServerError,
|
http.StatusInternalServerError,
|
||||||
fmt.Sprintf("error querying user: %v", err),
|
fmt.Sprintf("error querying user: %v", err),
|
||||||
@ -40,4 +38,3 @@ func LoadUser(orm *ent.Client) echo.MiddlewareFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
@ -4,20 +4,20 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadUser(t *testing.T) {
|
func TestLoadUser(t *testing.T) {
|
||||||
ctx, _ := tests.NewContext(c.Web, "/")
|
ctx, _ := tests.NewContext(c.Web, "/")
|
||||||
ctx.SetParamNames("user")
|
ctx.SetParamNames("user")
|
||||||
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID))
|
ctx.SetParamValues(fmt.Sprintf("%d", usr.ID))
|
||||||
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.ORM))
|
_ = tests.ExecuteMiddleware(ctx, LoadUser(c.DB))
|
||||||
ctxUsr, ok := ctx.Get(context.UserKey).(*ent.User)
|
ctxUsr, ok := ctx.Get(context.UserKey).(*sqlc.User)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, usr.ID, ctxUsr.ID)
|
assert.Equal(t, usr.ID, ctxUsr.ID)
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetLogger initializes a logger for the current request and stores it in the context.
|
// SetLogger initializes a logger for the current request and stores it in the context.
|
||||||
|
@ -7,11 +7,11 @@ import (
|
|||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
echomw "github.com/labstack/echo/v4/middleware"
|
echomw "github.com/labstack/echo/v4/middleware"
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockLogHandler struct {
|
type mockLogHandler struct {
|
||||||
|
@ -4,15 +4,15 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/config"
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
"github.com/mikestefanello/pagoda/pkg/services"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/services"
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
c *services.Container
|
c *services.Container
|
||||||
usr *ent.User
|
usr *sqlc.User
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
@ -24,7 +24,7 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
// Create a user
|
// Create a user
|
||||||
var err error
|
var err error
|
||||||
if usr, err = tests.CreateUser(c.ORM); err != nil {
|
if usr, err = tests.CreateUser(c.DB.C); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,8 @@ import (
|
|||||||
"github.com/gorilla/context"
|
"github.com/gorilla/context"
|
||||||
"github.com/gorilla/sessions"
|
"github.com/gorilla/sessions"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/session"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/session"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Session sets the session storage in the request context
|
// Session sets the session storage in the request context
|
||||||
|
@ -4,10 +4,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gorilla/sessions"
|
"github.com/gorilla/sessions"
|
||||||
"github.com/mikestefanello/pagoda/pkg/session"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/session"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSession(t *testing.T) {
|
func TestSession(t *testing.T) {
|
||||||
|
31
pkg/models/sqlc/db.go
Normal file
31
pkg/models/sqlc/db.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.25.0
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DBTX interface {
|
||||||
|
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
|
||||||
|
PrepareContext(context.Context, string) (*sql.Stmt, error)
|
||||||
|
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
|
||||||
|
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(db DBTX) *Queries {
|
||||||
|
return &Queries{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Queries struct {
|
||||||
|
db DBTX
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) WithTx(tx *sql.Tx) *Queries {
|
||||||
|
return &Queries{
|
||||||
|
db: tx,
|
||||||
|
}
|
||||||
|
}
|
25
pkg/models/sqlc/models.go
Normal file
25
pkg/models/sqlc/models.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.25.0
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/db"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PasswordToken struct {
|
||||||
|
ID int `db:"id"`
|
||||||
|
Hash string `db:"hash"`
|
||||||
|
CreatedAt db.Time `db:"created_at"`
|
||||||
|
PasswordTokenUser int `db:"password_token_user"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
ID int `db:"id"`
|
||||||
|
Name string `db:"name"`
|
||||||
|
Email string `db:"email"`
|
||||||
|
Password string `db:"password"`
|
||||||
|
Verified int `db:"verified"`
|
||||||
|
CreatedAt db.Time `db:"created_at"`
|
||||||
|
}
|
126
pkg/models/sqlc/password_tokens.sql.go
Normal file
126
pkg/models/sqlc/password_tokens.sql.go
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.25.0
|
||||||
|
// source: password_tokens.sql
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/db"
|
||||||
|
)
|
||||||
|
|
||||||
|
const createPasswordToken = `-- name: CreatePasswordToken :one
|
||||||
|
INSERT INTO password_tokens (
|
||||||
|
hash, password_token_user
|
||||||
|
) VALUES (
|
||||||
|
?, ?
|
||||||
|
) RETURNING id, hash, created_at, password_token_user
|
||||||
|
`
|
||||||
|
|
||||||
|
type CreatePasswordTokenParams struct {
|
||||||
|
Hash string `db:"hash"`
|
||||||
|
PasswordTokenUser int `db:"password_token_user"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) CreatePasswordToken(ctx context.Context, arg CreatePasswordTokenParams) (PasswordToken, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, createPasswordToken, arg.Hash, arg.PasswordTokenUser)
|
||||||
|
var i PasswordToken
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.Hash,
|
||||||
|
&i.CreatedAt,
|
||||||
|
&i.PasswordTokenUser,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const deletePasswordTokens = `-- name: DeletePasswordTokens :exec
|
||||||
|
DELETE FROM password_tokens
|
||||||
|
WHERE password_token_user = ?
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) DeletePasswordTokens(ctx context.Context, passwordTokenUser int) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, deletePasswordTokens, passwordTokenUser)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const getAllPasswordTokensForUser = `-- name: GetAllPasswordTokensForUser :many
|
||||||
|
SELECT id, hash, created_at, password_token_user FROM password_tokens
|
||||||
|
WHERE password_token_user = ?
|
||||||
|
`
|
||||||
|
|
||||||
|
// GetAllPasswordTokensForUser retrieves all password tokens without checking expiration.
|
||||||
|
func (q *Queries) GetAllPasswordTokensForUser(ctx context.Context, passwordTokenUser int) ([]PasswordToken, error) {
|
||||||
|
rows, err := q.db.QueryContext(ctx, getAllPasswordTokensForUser, passwordTokenUser)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
var items []PasswordToken
|
||||||
|
for rows.Next() {
|
||||||
|
var i PasswordToken
|
||||||
|
if err := rows.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.Hash,
|
||||||
|
&i.CreatedAt,
|
||||||
|
&i.PasswordTokenUser,
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
items = append(items, i)
|
||||||
|
}
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const getValidPasswordToken = `-- name: GetValidPasswordToken :one
|
||||||
|
SELECT id, hash, created_at, password_token_user FROM password_tokens
|
||||||
|
WHERE
|
||||||
|
id = ?
|
||||||
|
AND password_token_user = ?
|
||||||
|
AND datetime(created_at) > datetime(?)
|
||||||
|
LIMIT 1
|
||||||
|
`
|
||||||
|
|
||||||
|
type GetValidPasswordTokenParams struct {
|
||||||
|
ID int `db:"id"`
|
||||||
|
PasswordTokenUser int `db:"password_token_user"`
|
||||||
|
Datetime interface{} `db:"datetime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetValidPasswordToken returns only valid password tokens for the provided
|
||||||
|
// user, and only if the created_at time is greater than the provided time.
|
||||||
|
func (q *Queries) GetValidPasswordToken(ctx context.Context, arg GetValidPasswordTokenParams) (PasswordToken, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, getValidPasswordToken, arg.ID, arg.PasswordTokenUser, arg.Datetime)
|
||||||
|
var i PasswordToken
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.Hash,
|
||||||
|
&i.CreatedAt,
|
||||||
|
&i.PasswordTokenUser,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const updatePasswordTokenCreatedAt = `-- name: UpdatePasswordTokenCreatedAt :exec
|
||||||
|
UPDATE password_tokens
|
||||||
|
SET created_at = ?
|
||||||
|
WHERE id = ?
|
||||||
|
`
|
||||||
|
|
||||||
|
type UpdatePasswordTokenCreatedAtParams struct {
|
||||||
|
CreatedAt db.Time `db:"created_at"`
|
||||||
|
ID int `db:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) UpdatePasswordTokenCreatedAt(ctx context.Context, arg UpdatePasswordTokenCreatedAtParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, updatePasswordTokenCreatedAt, arg.CreatedAt, arg.ID)
|
||||||
|
return err
|
||||||
|
}
|
105
pkg/models/sqlc/users.sql.go
Normal file
105
pkg/models/sqlc/users.sql.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.25.0
|
||||||
|
// source: users.sql
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
const createUser = `-- name: CreateUser :one
|
||||||
|
INSERT INTO users (
|
||||||
|
name, email, password
|
||||||
|
) VALUES (
|
||||||
|
?, ?, ?
|
||||||
|
) RETURNING id, name, email, password, verified, created_at
|
||||||
|
`
|
||||||
|
|
||||||
|
type CreateUserParams struct {
|
||||||
|
Name string `db:"name"`
|
||||||
|
Email string `db:"email"`
|
||||||
|
Password string `db:"password"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) CreateUser(ctx context.Context, arg CreateUserParams) (User, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, createUser, arg.Name, arg.Email, arg.Password)
|
||||||
|
var i User
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.Name,
|
||||||
|
&i.Email,
|
||||||
|
&i.Password,
|
||||||
|
&i.Verified,
|
||||||
|
&i.CreatedAt,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const getUserByEmail = `-- name: GetUserByEmail :one
|
||||||
|
SELECT id, name, email, password, verified, created_at FROM users
|
||||||
|
WHERE email = lower(?)
|
||||||
|
LIMIT 1
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) GetUserByEmail(ctx context.Context, lower string) (User, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, getUserByEmail, lower)
|
||||||
|
var i User
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.Name,
|
||||||
|
&i.Email,
|
||||||
|
&i.Password,
|
||||||
|
&i.Verified,
|
||||||
|
&i.CreatedAt,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const getUserByID = `-- name: GetUserByID :one
|
||||||
|
SELECT id, name, email, password, verified, created_at FROM users
|
||||||
|
WHERE id = ?
|
||||||
|
LIMIT 1
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) GetUserByID(ctx context.Context, id int) (User, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, getUserByID, id)
|
||||||
|
var i User
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.Name,
|
||||||
|
&i.Email,
|
||||||
|
&i.Password,
|
||||||
|
&i.Verified,
|
||||||
|
&i.CreatedAt,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateUserPassword = `-- name: UpdateUserPassword :exec
|
||||||
|
UPDATE users
|
||||||
|
SET password = ?
|
||||||
|
WHERE id = ?
|
||||||
|
`
|
||||||
|
|
||||||
|
type UpdateUserPasswordParams struct {
|
||||||
|
Password string `db:"password"`
|
||||||
|
ID int `db:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) UpdateUserPassword(ctx context.Context, arg UpdateUserPasswordParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, updateUserPassword, arg.Password, arg.ID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateUserSetVerified = `-- name: UpdateUserSetVerified :exec
|
||||||
|
UPDATE users
|
||||||
|
SET verified = 1
|
||||||
|
WHERE email = ?
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) UpdateUserSetVerified(ctx context.Context, email string) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, updateUserSetVerified, email)
|
||||||
|
return err
|
||||||
|
}
|
@ -3,8 +3,9 @@ package msg
|
|||||||
import (
|
import (
|
||||||
"github.com/gorilla/sessions"
|
"github.com/gorilla/sessions"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/session"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/session"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Type is a message type
|
// Type is a message type
|
||||||
|
@ -3,12 +3,11 @@ package msg
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMsg(t *testing.T) {
|
func TestMsg(t *testing.T) {
|
||||||
|
@ -1,19 +1,17 @@
|
|||||||
package page
|
package page
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"html/template"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/htmx"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/msg"
|
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
|
||||||
|
|
||||||
echomw "github.com/labstack/echo/v4/middleware"
|
echomw "github.com/labstack/echo/v4/middleware"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/htmx"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/msg"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Page consists of all data that will be used to render a page response for a given route.
|
// Page consists of all data that will be used to render a page response for a given route.
|
||||||
@ -40,6 +38,9 @@ type Page struct {
|
|||||||
// URL stores the URL of the current request
|
// URL stores the URL of the current request
|
||||||
URL string
|
URL string
|
||||||
|
|
||||||
|
// ToURL is a function to convert a route name and optional route parameters to a URL
|
||||||
|
ToURL func(name string, params ...interface{}) string
|
||||||
|
|
||||||
// Data stores whatever additional data that needs to be passed to the templates.
|
// Data stores whatever additional data that needs to be passed to the templates.
|
||||||
// This is what the handler uses to pass the content of the page.
|
// This is what the handler uses to pass the content of the page.
|
||||||
Data any
|
Data any
|
||||||
@ -68,7 +69,7 @@ type Page struct {
|
|||||||
IsAuth bool
|
IsAuth bool
|
||||||
|
|
||||||
// AuthUser stores the authenticated user
|
// AuthUser stores the authenticated user
|
||||||
AuthUser *ent.User
|
AuthUser *sqlc.User
|
||||||
|
|
||||||
// StatusCode stores the HTTP status code that will be returned
|
// StatusCode stores the HTTP status code that will be returned
|
||||||
StatusCode int
|
StatusCode int
|
||||||
@ -128,6 +129,7 @@ func New(ctx echo.Context) Page {
|
|||||||
Context: ctx,
|
Context: ctx,
|
||||||
Path: ctx.Request().URL.Path,
|
Path: ctx.Request().URL.Path,
|
||||||
URL: ctx.Request().URL.String(),
|
URL: ctx.Request().URL.String(),
|
||||||
|
ToURL: ctx.Echo().Reverse,
|
||||||
StatusCode: http.StatusOK,
|
StatusCode: http.StatusOK,
|
||||||
Pager: NewPager(ctx, DefaultItemsPerPage),
|
Pager: NewPager(ctx, DefaultItemsPerPage),
|
||||||
Headers: make(map[string]string),
|
Headers: make(map[string]string),
|
||||||
@ -142,7 +144,7 @@ func New(ctx echo.Context) Page {
|
|||||||
|
|
||||||
if u := ctx.Get(context.AuthenticatedUserKey); u != nil {
|
if u := ctx.Get(context.AuthenticatedUserKey); u != nil {
|
||||||
p.IsAuth = true
|
p.IsAuth = true
|
||||||
p.AuthUser = u.(*ent.User)
|
p.AuthUser = u.(*sqlc.User)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.HTMX.Request = htmx.GetRequest(ctx)
|
p.HTMX.Request = htmx.GetRequest(ctx)
|
||||||
@ -152,11 +154,6 @@ func New(ctx echo.Context) Page {
|
|||||||
|
|
||||||
// GetMessages gets all flash messages for a given type.
|
// GetMessages gets all flash messages for a given type.
|
||||||
// This allows for easy access to flash messages from the templates.
|
// This allows for easy access to flash messages from the templates.
|
||||||
func (p Page) GetMessages(typ msg.Type) []template.HTML {
|
func (p Page) GetMessages(typ msg.Type) []string {
|
||||||
strs := msg.Get(p.Context, typ)
|
return msg.Get(p.Context, typ)
|
||||||
ret := make([]template.HTML, len(strs))
|
|
||||||
for k, v := range strs {
|
|
||||||
ret[k] = template.HTML(v)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
@ -5,13 +5,13 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/msg"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
|
|
||||||
echomw "github.com/labstack/echo/v4/middleware"
|
echomw "github.com/labstack/echo/v4/middleware"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/msg"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
func TestNew(t *testing.T) {
|
||||||
@ -31,7 +31,7 @@ func TestNew(t *testing.T) {
|
|||||||
assert.False(t, p.Cache.Enabled)
|
assert.False(t, p.Cache.Enabled)
|
||||||
|
|
||||||
ctx, _ = tests.NewContext(e, "/abc?def=123")
|
ctx, _ = tests.NewContext(e, "/abc?def=123")
|
||||||
usr := &ent.User{
|
usr := &sqlc.User{
|
||||||
ID: 1,
|
ID: 1,
|
||||||
}
|
}
|
||||||
ctx.Set(context.AuthenticatedUserKey, usr)
|
ctx.Set(context.AuthenticatedUserKey, usr)
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewPager(t *testing.T) {
|
func TestNewPager(t *testing.T) {
|
||||||
|
@ -7,7 +7,8 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/htmx"
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/htmx"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Redirect is a helper to perform HTTP redirects.
|
// Redirect is a helper to perform HTTP redirects.
|
||||||
|
@ -6,10 +6,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/pkg/htmx"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/htmx"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRedirect(t *testing.T) {
|
func TestRedirect(t *testing.T) {
|
||||||
|
@ -2,21 +2,19 @@ package services
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"database/sql"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt"
|
"github.com/golang-jwt/jwt"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/session"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/session"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -49,14 +47,14 @@ func (e InvalidPasswordTokenError) Error() string {
|
|||||||
// AuthClient is the client that handles authentication requests
|
// AuthClient is the client that handles authentication requests
|
||||||
type AuthClient struct {
|
type AuthClient struct {
|
||||||
config *config.Config
|
config *config.Config
|
||||||
orm *ent.Client
|
db *DBClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthClient creates a new authentication client
|
// NewAuthClient creates a new authentication client
|
||||||
func NewAuthClient(cfg *config.Config, orm *ent.Client) *AuthClient {
|
func NewAuthClient(cfg *config.Config, db *DBClient) *AuthClient {
|
||||||
return &AuthClient{
|
return &AuthClient{
|
||||||
config: cfg,
|
config: cfg,
|
||||||
orm: orm,
|
db: db,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,11 +94,13 @@ func (c *AuthClient) GetAuthenticatedUserID(ctx echo.Context) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetAuthenticatedUser returns the authenticated user if the user is logged in
|
// GetAuthenticatedUser returns the authenticated user if the user is logged in
|
||||||
func (c *AuthClient) GetAuthenticatedUser(ctx echo.Context) (*ent.User, error) {
|
func (c *AuthClient) GetAuthenticatedUser(ctx echo.Context) (*sqlc.User, error) {
|
||||||
if userID, err := c.GetAuthenticatedUserID(ctx); err == nil {
|
if userID, err := c.GetAuthenticatedUserID(ctx); err == nil {
|
||||||
return c.orm.User.Query().
|
u, err := c.db.C.GetUserByID(ctx.Request().Context(), userID)
|
||||||
Where(user.ID(userID)).
|
if err != nil {
|
||||||
Only(ctx.Request().Context())
|
return nil, err
|
||||||
|
}
|
||||||
|
return &u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, NotAuthenticatedError{}
|
return nil, NotAuthenticatedError{}
|
||||||
@ -124,25 +124,24 @@ func (c *AuthClient) CheckPassword(password, hash string) error {
|
|||||||
// For security purposes, the token itself is not stored in the database but rather
|
// For security purposes, the token itself is not stored in the database but rather
|
||||||
// a hash of the token, exactly how passwords are handled. This method returns both
|
// a hash of the token, exactly how passwords are handled. This method returns both
|
||||||
// the generated token as well as the token entity which only contains the hash.
|
// the generated token as well as the token entity which only contains the hash.
|
||||||
func (c *AuthClient) GeneratePasswordResetToken(ctx echo.Context, userID int) (string, *ent.PasswordToken, error) {
|
func (c *AuthClient) GeneratePasswordResetToken(ctx echo.Context, userID int) (string, sqlc.PasswordToken, error) {
|
||||||
// Generate the token, which is what will go in the URL, but not the database
|
// Generate the token, which is what will go in the URL, but not the database
|
||||||
token, err := c.RandomToken(c.config.App.PasswordToken.Length)
|
token, err := c.RandomToken(c.config.App.PasswordToken.Length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", sqlc.PasswordToken{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hash the token, which is what will be stored in the database
|
// Hash the token, which is what will be stored in the database
|
||||||
hash, err := c.HashPassword(token)
|
hash, err := c.HashPassword(token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", sqlc.PasswordToken{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create and save the password reset token
|
// Create and save the password reset token
|
||||||
pt, err := c.orm.PasswordToken.
|
pt, err := c.db.C.CreatePasswordToken(ctx.Request().Context(), sqlc.CreatePasswordTokenParams{
|
||||||
Create().
|
Hash: hash,
|
||||||
SetHash(hash).
|
PasswordTokenUser: userID,
|
||||||
SetUserID(userID).
|
})
|
||||||
Save(ctx.Request().Context())
|
|
||||||
|
|
||||||
return token, pt, err
|
return token, pt, err
|
||||||
}
|
}
|
||||||
@ -150,43 +149,35 @@ func (c *AuthClient) GeneratePasswordResetToken(ctx echo.Context, userID int) (s
|
|||||||
// GetValidPasswordToken returns a valid, non-expired password token entity for a given user, token ID and token.
|
// GetValidPasswordToken returns a valid, non-expired password token entity for a given user, token ID and token.
|
||||||
// Since the actual token is not stored in the database for security purposes, if a matching password token entity is
|
// Since the actual token is not stored in the database for security purposes, if a matching password token entity is
|
||||||
// found a hash of the provided token is compared with the hash stored in the database in order to validate.
|
// found a hash of the provided token is compared with the hash stored in the database in order to validate.
|
||||||
func (c *AuthClient) GetValidPasswordToken(ctx echo.Context, userID, tokenID int, token string) (*ent.PasswordToken, error) {
|
func (c *AuthClient) GetValidPasswordToken(ctx echo.Context, userID, tokenID int, token string) (*sqlc.PasswordToken, error) {
|
||||||
// Ensure expired tokens are never returned
|
// Ensure expired tokens are never returned
|
||||||
expiration := time.Now().Add(-c.config.App.PasswordToken.Expiration)
|
expiration := time.Now().Add(-c.config.App.PasswordToken.Expiration)
|
||||||
|
|
||||||
// Query to find a password token entity that matches the given user and token ID
|
// Query to find a password token entity that matches the given user and token ID
|
||||||
pt, err := c.orm.PasswordToken.
|
pt, err := c.db.C.GetValidPasswordToken(ctx.Request().Context(),
|
||||||
Query().
|
sqlc.GetValidPasswordTokenParams{
|
||||||
Where(passwordtoken.ID(tokenID)).
|
ID: tokenID,
|
||||||
Where(passwordtoken.HasUserWith(user.ID(userID))).
|
PasswordTokenUser: userID,
|
||||||
Where(passwordtoken.CreatedAtGTE(expiration)).
|
Datetime: expiration,
|
||||||
Only(ctx.Request().Context())
|
})
|
||||||
|
|
||||||
switch err.(type) {
|
if err == sql.ErrNoRows {
|
||||||
case *ent.NotFoundError:
|
return nil, InvalidPasswordTokenError{}
|
||||||
case nil:
|
} else if err != nil {
|
||||||
// Check the token for a hash match
|
|
||||||
if err := c.CheckPassword(token, pt.Hash); err == nil {
|
|
||||||
return pt, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if !context.IsCanceledError(err) {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
// Check the token for a hash match
|
||||||
|
if err := c.CheckPassword(token, pt.Hash); err == nil {
|
||||||
|
return &pt, nil
|
||||||
|
}
|
||||||
return nil, InvalidPasswordTokenError{}
|
return nil, InvalidPasswordTokenError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeletePasswordTokens deletes all password tokens in the database for a belonging to a given user.
|
// DeletePasswordTokens deletes all password tokens in the database for a belonging to a given user.
|
||||||
// This should be called after a successful password reset.
|
// This should be called after a successful password reset.
|
||||||
func (c *AuthClient) DeletePasswordTokens(ctx echo.Context, userID int) error {
|
func (c *AuthClient) DeletePasswordTokens(ctx echo.Context, userID int) error {
|
||||||
_, err := c.orm.PasswordToken.
|
return c.db.C.DeletePasswordTokens(ctx.Request().Context(), userID)
|
||||||
Delete().
|
|
||||||
Where(passwordtoken.HasUserWith(user.ID(userID))).
|
|
||||||
Exec(ctx.Request().Context())
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RandomToken generates a random token string of a given length
|
// RandomToken generates a random token string of a given length
|
||||||
@ -220,7 +211,6 @@ func (c *AuthClient) ValidateEmailVerificationToken(token string) (string, error
|
|||||||
|
|
||||||
return []byte(c.config.App.EncryptionKey), nil
|
return []byte(c.config.App.EncryptionKey), nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/ent/passwordtoken"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/mikestefanello/pagoda/ent/user"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/db"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAuthClient_Auth(t *testing.T) {
|
func TestAuthClient_Auth(t *testing.T) {
|
||||||
@ -70,13 +69,11 @@ func TestAuthClient_GetValidPasswordToken(t *testing.T) {
|
|||||||
assert.Equal(t, pt.ID, pt2.ID)
|
assert.Equal(t, pt.ID, pt2.ID)
|
||||||
|
|
||||||
// Expire the token by pushing the date far enough back
|
// Expire the token by pushing the date far enough back
|
||||||
count, err := c.ORM.PasswordToken.
|
err = c.DB.C.UpdatePasswordTokenCreatedAt(context.Background(), sqlc.UpdatePasswordTokenCreatedAtParams{
|
||||||
Update().
|
CreatedAt: db.Time{Time: time.Now().Add(-(c.Config.App.PasswordToken.Expiration + time.Hour))},
|
||||||
SetCreatedAt(time.Now().Add(-(c.Config.App.PasswordToken.Expiration + time.Hour))).
|
ID: pt.ID,
|
||||||
Where(passwordtoken.ID(pt.ID)).
|
})
|
||||||
Save(context.Background())
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, count)
|
|
||||||
|
|
||||||
// Expired tokens should not be valid
|
// Expired tokens should not be valid
|
||||||
_, err = c.Auth.GetValidPasswordToken(ctx, usr.ID, pt.ID, token)
|
_, err = c.Auth.GetValidPasswordToken(ctx, usr.ID, pt.ID, token)
|
||||||
@ -95,13 +92,9 @@ func TestAuthClient_DeletePasswordTokens(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Check that no tokens remain
|
// Check that no tokens remain
|
||||||
count, err := c.ORM.PasswordToken.
|
tokens, err := c.DB.C.GetAllPasswordTokensForUser(context.Background(), usr.ID)
|
||||||
Query().
|
|
||||||
Where(passwordtoken.HasUserWith(user.ID(usr.ID))).
|
|
||||||
Count(context.Background())
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 0, count)
|
assert.Len(t, tokens, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthClient_RandomToken(t *testing.T) {
|
func TestAuthClient_RandomToken(t *testing.T) {
|
||||||
|
@ -4,28 +4,39 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/eko/gocache/lib/v4/cache"
|
"github.com/maypok86/otter"
|
||||||
"github.com/eko/gocache/lib/v4/marshaler"
|
|
||||||
libstore "github.com/eko/gocache/lib/v4/store"
|
|
||||||
redisstore "github.com/eko/gocache/store/redis/v4"
|
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/redis/go-redis/v9"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
// ErrCacheMiss indicates that the requested key does not exist in the cache
|
||||||
// CacheClient is the client that allows you to interact with the cache
|
var ErrCacheMiss = errors.New("cache miss")
|
||||||
CacheClient struct {
|
|
||||||
// Client stores the client to the underlying cache service
|
|
||||||
Client *redis.Client
|
|
||||||
|
|
||||||
// cache stores the cache interface
|
type (
|
||||||
cache *cache.Cache[any]
|
// CacheStore provides an interface for cache storage
|
||||||
|
CacheStore interface {
|
||||||
|
// get attempts to get a cached value
|
||||||
|
get(context.Context, *CacheGetOp) (any, error)
|
||||||
|
|
||||||
|
// set attempts to set an entry in the cache
|
||||||
|
set(context.Context, *CacheSetOp) error
|
||||||
|
|
||||||
|
// flush removes a given key and/or tags from the cache
|
||||||
|
flush(context.Context, *CacheFlushOp) error
|
||||||
|
|
||||||
|
// close shuts down the cache storage
|
||||||
|
close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheSet handles chaining a set operation
|
// CacheClient is the client that allows you to interact with the cache
|
||||||
cacheSet struct {
|
CacheClient struct {
|
||||||
|
// store holds the Cache storage
|
||||||
|
store CacheStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheSetOp handles chaining a set operation
|
||||||
|
CacheSetOp struct {
|
||||||
client *CacheClient
|
client *CacheClient
|
||||||
key string
|
key string
|
||||||
group string
|
group string
|
||||||
@ -34,76 +45,69 @@ type (
|
|||||||
tags []string
|
tags []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheGet handles chaining a get operation
|
// CacheGetOp handles chaining a get operation
|
||||||
cacheGet struct {
|
CacheGetOp struct {
|
||||||
client *CacheClient
|
client *CacheClient
|
||||||
key string
|
key string
|
||||||
group string
|
group string
|
||||||
dataType any
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheFlush handles chaining a flush operation
|
// CacheFlushOp handles chaining a flush operation
|
||||||
cacheFlush struct {
|
CacheFlushOp struct {
|
||||||
client *CacheClient
|
client *CacheClient
|
||||||
key string
|
key string
|
||||||
group string
|
group string
|
||||||
tags []string
|
tags []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// inMemoryCacheStore is a cache store implementation in memory
|
||||||
|
inMemoryCacheStore struct {
|
||||||
|
store *otter.CacheWithVariableTTL[string, any]
|
||||||
|
tagIndex *tagIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagIndex maintains an index to support cache tags for in-memory cache stores.
|
||||||
|
// There is a performance and memory impact to using cache tags since set and get operations using tags will require
|
||||||
|
// locking, and we need to keep track of this index in order to keep everything in sync.
|
||||||
|
// If using something like Redis for caching, you can leverage sets to store the index.
|
||||||
|
// Cache tags can be useful and convenient, so you should decide if your app benefits enough from this.
|
||||||
|
// As it stands here, there is no limiting how much memory this will consume and it will track all keys
|
||||||
|
// and tags added and removed from the cache. You could store these in the cache itself but allowing these to
|
||||||
|
// be evicted poses challenges.
|
||||||
|
tagIndex struct {
|
||||||
|
sync.Mutex
|
||||||
|
tags map[string]map[string]struct{} // tag->keys
|
||||||
|
keys map[string]map[string]struct{} // key->tags
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCacheClient creates a new cache client
|
// NewCacheClient creates a new cache client
|
||||||
func NewCacheClient(cfg *config.Config) (*CacheClient, error) {
|
func NewCacheClient(store CacheStore) *CacheClient {
|
||||||
// Determine the database based on the environment
|
return &CacheClient{store: store}
|
||||||
db := cfg.Cache.Database
|
|
||||||
if cfg.App.Environment == config.EnvTest {
|
|
||||||
db = cfg.Cache.TestDatabase
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to the cache
|
|
||||||
c := &CacheClient{}
|
|
||||||
c.Client = redis.NewClient(&redis.Options{
|
|
||||||
Addr: fmt.Sprintf("%s:%d", cfg.Cache.Hostname, cfg.Cache.Port),
|
|
||||||
Password: cfg.Cache.Password,
|
|
||||||
DB: db,
|
|
||||||
})
|
|
||||||
if _, err := c.Client.Ping(context.Background()).Result(); err != nil {
|
|
||||||
return c, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush the database if this is the test environment
|
|
||||||
if cfg.App.Environment == config.EnvTest {
|
|
||||||
if err := c.Client.FlushDB(context.Background()).Err(); err != nil {
|
|
||||||
return c, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cacheStore := redisstore.NewRedis(c.Client)
|
|
||||||
c.cache = cache.New[any](cacheStore)
|
|
||||||
return c, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the connection to the cache
|
// Close closes the connection to the cache
|
||||||
func (c *CacheClient) Close() error {
|
func (c *CacheClient) Close() {
|
||||||
return c.Client.Close()
|
c.store.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set creates a cache set operation
|
// Set creates a cache set operation
|
||||||
func (c *CacheClient) Set() *cacheSet {
|
func (c *CacheClient) Set() *CacheSetOp {
|
||||||
return &cacheSet{
|
return &CacheSetOp{
|
||||||
client: c,
|
client: c,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get creates a cache get operation
|
// Get creates a cache get operation
|
||||||
func (c *CacheClient) Get() *cacheGet {
|
func (c *CacheClient) Get() *CacheGetOp {
|
||||||
return &cacheGet{
|
return &CacheGetOp{
|
||||||
client: c,
|
client: c,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush creates a cache flush operation
|
// Flush creates a cache flush operation
|
||||||
func (c *CacheClient) Flush() *cacheFlush {
|
func (c *CacheClient) Flush() *CacheFlushOp {
|
||||||
return &cacheFlush{
|
return &CacheFlushOp{
|
||||||
client: c,
|
client: c,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,111 +121,231 @@ func (c *CacheClient) cacheKey(group, key string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Key sets the cache key
|
// Key sets the cache key
|
||||||
func (c *cacheSet) Key(key string) *cacheSet {
|
func (c *CacheSetOp) Key(key string) *CacheSetOp {
|
||||||
c.key = key
|
c.key = key
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Group sets the cache group
|
// Group sets the cache group
|
||||||
func (c *cacheSet) Group(group string) *cacheSet {
|
func (c *CacheSetOp) Group(group string) *CacheSetOp {
|
||||||
c.group = group
|
c.group = group
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Data sets the data to cache
|
// Data sets the data to cache
|
||||||
func (c *cacheSet) Data(data any) *cacheSet {
|
func (c *CacheSetOp) Data(data any) *CacheSetOp {
|
||||||
c.data = data
|
c.data = data
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expiration sets the expiration duration of the cached data
|
// Expiration sets the expiration duration of the cached data
|
||||||
func (c *cacheSet) Expiration(expiration time.Duration) *cacheSet {
|
func (c *CacheSetOp) Expiration(expiration time.Duration) *CacheSetOp {
|
||||||
c.expiration = expiration
|
c.expiration = expiration
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tags sets the cache tags
|
// Tags sets the cache tags
|
||||||
func (c *cacheSet) Tags(tags ...string) *cacheSet {
|
func (c *CacheSetOp) Tags(tags ...string) *CacheSetOp {
|
||||||
c.tags = tags
|
c.tags = tags
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save saves the data in the cache
|
// Save saves the data in the cache
|
||||||
func (c *cacheSet) Save(ctx context.Context) error {
|
func (c *CacheSetOp) Save(ctx context.Context) error {
|
||||||
if c.key == "" {
|
switch {
|
||||||
|
case c.key == "":
|
||||||
return errors.New("no cache key specified")
|
return errors.New("no cache key specified")
|
||||||
|
case c.data == nil:
|
||||||
|
return errors.New("no cache data specified")
|
||||||
|
case c.expiration == 0:
|
||||||
|
return errors.New("no cache expiration specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := []libstore.Option{
|
return c.client.store.set(ctx, c)
|
||||||
libstore.WithExpiration(c.expiration),
|
|
||||||
libstore.WithTags(c.tags),
|
|
||||||
}
|
|
||||||
|
|
||||||
return marshaler.
|
|
||||||
New(c.client.cache).
|
|
||||||
Set(ctx, c.client.cacheKey(c.group, c.key), c.data, opts...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Key sets the cache key
|
// Key sets the cache key
|
||||||
func (c *cacheGet) Key(key string) *cacheGet {
|
func (c *CacheGetOp) Key(key string) *CacheGetOp {
|
||||||
c.key = key
|
c.key = key
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Group sets the cache group
|
// Group sets the cache group
|
||||||
func (c *cacheGet) Group(group string) *cacheGet {
|
func (c *CacheGetOp) Group(group string) *CacheGetOp {
|
||||||
c.group = group
|
c.group = group
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type sets the expected Go type of the data being retrieved from the cache
|
|
||||||
func (c *cacheGet) Type(expectedType any) *cacheGet {
|
|
||||||
c.dataType = expectedType
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch fetches the data from the cache
|
// Fetch fetches the data from the cache
|
||||||
func (c *cacheGet) Fetch(ctx context.Context) (any, error) {
|
func (c *CacheGetOp) Fetch(ctx context.Context) (any, error) {
|
||||||
if c.key == "" {
|
if c.key == "" {
|
||||||
return nil, errors.New("no cache key specified")
|
return nil, errors.New("no cache key specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
return marshaler.New(c.client.cache).Get(
|
return c.client.store.get(ctx, c)
|
||||||
ctx,
|
|
||||||
c.client.cacheKey(c.group, c.key),
|
|
||||||
c.dataType,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Key sets the cache key
|
// Key sets the cache key
|
||||||
func (c *cacheFlush) Key(key string) *cacheFlush {
|
func (c *CacheFlushOp) Key(key string) *CacheFlushOp {
|
||||||
c.key = key
|
c.key = key
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Group sets the cache group
|
// Group sets the cache group
|
||||||
func (c *cacheFlush) Group(group string) *cacheFlush {
|
func (c *CacheFlushOp) Group(group string) *CacheFlushOp {
|
||||||
c.group = group
|
c.group = group
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tags sets the cache tags
|
// Tags sets the cache tags
|
||||||
func (c *cacheFlush) Tags(tags ...string) *cacheFlush {
|
func (c *CacheFlushOp) Tags(tags ...string) *CacheFlushOp {
|
||||||
c.tags = tags
|
c.tags = tags
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute flushes the data from the cache
|
// Execute flushes the data from the cache
|
||||||
func (c *cacheFlush) Execute(ctx context.Context) error {
|
func (c *CacheFlushOp) Execute(ctx context.Context) error {
|
||||||
if len(c.tags) > 0 {
|
return c.client.store.flush(ctx, c)
|
||||||
if err := c.client.cache.Invalidate(ctx, libstore.WithInvalidateTags(c.tags)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.key != "" {
|
// newInMemoryCache creates a new in-memory CacheStore
|
||||||
return c.client.cache.Delete(ctx, c.client.cacheKey(c.group, c.key))
|
func newInMemoryCache(capacity int) (CacheStore, error) {
|
||||||
|
s := &inMemoryCacheStore{
|
||||||
|
tagIndex: newTagIndex(),
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := otter.MustBuilder[string, any](capacity).
|
||||||
|
WithVariableTTL().
|
||||||
|
DeletionListener(func(key string, value any, cause otter.DeletionCause) {
|
||||||
|
s.tagIndex.purgeKeys(key)
|
||||||
|
}).
|
||||||
|
Build()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.store = &store
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *inMemoryCacheStore) get(_ context.Context, op *CacheGetOp) (any, error) {
|
||||||
|
v, exists := s.store.Get(op.client.cacheKey(op.group, op.key))
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
return nil, ErrCacheMiss
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *inMemoryCacheStore) set(_ context.Context, op *CacheSetOp) error {
|
||||||
|
key := op.client.cacheKey(op.group, op.key)
|
||||||
|
|
||||||
|
added := s.store.Set(
|
||||||
|
key,
|
||||||
|
op.data,
|
||||||
|
op.expiration,
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(op.tags) > 0 {
|
||||||
|
s.tagIndex.setTags(key, op.tags...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !added {
|
||||||
|
return errors.New("cache set failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *inMemoryCacheStore) flush(_ context.Context, op *CacheFlushOp) error {
|
||||||
|
keys := make([]string, 0)
|
||||||
|
|
||||||
|
if key := op.client.cacheKey(op.group, op.key); key != "" {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(op.tags) > 0 {
|
||||||
|
keys = append(keys, s.tagIndex.purgeTags(op.tags...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
s.store.Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.tagIndex.purgeKeys(keys...)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *inMemoryCacheStore) close() {
|
||||||
|
s.store.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTagIndex() *tagIndex {
|
||||||
|
return &tagIndex{
|
||||||
|
tags: make(map[string]map[string]struct{}),
|
||||||
|
keys: make(map[string]map[string]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *tagIndex) setTags(key string, tags ...string) {
|
||||||
|
i.Lock()
|
||||||
|
defer i.Unlock()
|
||||||
|
|
||||||
|
if _, exists := i.keys[key]; !exists {
|
||||||
|
i.keys[key] = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tag := range tags {
|
||||||
|
if _, exists := i.tags[tag]; !exists {
|
||||||
|
i.tags[tag] = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
i.tags[tag][key] = struct{}{}
|
||||||
|
i.keys[key][tag] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *tagIndex) purgeTags(tags ...string) []string {
|
||||||
|
i.Lock()
|
||||||
|
defer i.Unlock()
|
||||||
|
|
||||||
|
keys := make([]string, 0)
|
||||||
|
|
||||||
|
for _, tag := range tags {
|
||||||
|
if tagKeys, exists := i.tags[tag]; exists {
|
||||||
|
delete(i.tags, tag)
|
||||||
|
|
||||||
|
for key := range tagKeys {
|
||||||
|
delete(i.keys[key], tag)
|
||||||
|
if len(i.keys[key]) == 0 {
|
||||||
|
delete(i.keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *tagIndex) purgeKeys(keys ...string) {
|
||||||
|
i.Lock()
|
||||||
|
defer i.Unlock()
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
if keyTags, exists := i.keys[key]; exists {
|
||||||
|
delete(i.keys, key)
|
||||||
|
|
||||||
|
for tag := range keyTags {
|
||||||
|
delete(i.tags[tag], key)
|
||||||
|
if len(i.tags[tag]) == 0 {
|
||||||
|
delete(i.tags, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -2,11 +2,9 @@ package services
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
libstore "github.com/eko/gocache/lib/v4/store"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -15,6 +13,7 @@ func TestCacheClient(t *testing.T) {
|
|||||||
type cacheTest struct {
|
type cacheTest struct {
|
||||||
Value string
|
Value string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache some data
|
// Cache some data
|
||||||
data := cacheTest{Value: "abcdef"}
|
data := cacheTest{Value: "abcdef"}
|
||||||
group := "testgroup"
|
group := "testgroup"
|
||||||
@ -24,6 +23,7 @@ func TestCacheClient(t *testing.T) {
|
|||||||
Group(group).
|
Group(group).
|
||||||
Key(key).
|
Key(key).
|
||||||
Data(data).
|
Data(data).
|
||||||
|
Expiration(500 * time.Millisecond).
|
||||||
Save(context.Background())
|
Save(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -32,20 +32,18 @@ func TestCacheClient(t *testing.T) {
|
|||||||
Get().
|
Get().
|
||||||
Group(group).
|
Group(group).
|
||||||
Key(key).
|
Key(key).
|
||||||
Type(new(cacheTest)).
|
|
||||||
Fetch(context.Background())
|
Fetch(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cast, ok := fromCache.(*cacheTest)
|
cast, ok := fromCache.(cacheTest)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, data, *cast)
|
assert.Equal(t, data, cast)
|
||||||
|
|
||||||
// The same key with the wrong group should fail
|
// The same key with the wrong group should fail
|
||||||
_, err = c.Cache.
|
_, err = c.Cache.
|
||||||
Get().
|
Get().
|
||||||
Key(key).
|
Key(key).
|
||||||
Type(new(cacheTest)).
|
|
||||||
Fetch(context.Background())
|
Fetch(context.Background())
|
||||||
assert.Error(t, err)
|
assert.Equal(t, ErrCacheMiss, err)
|
||||||
|
|
||||||
// Flush the data
|
// Flush the data
|
||||||
err = c.Cache.
|
err = c.Cache.
|
||||||
@ -56,29 +54,42 @@ func TestCacheClient(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// The data should be gone
|
// The data should be gone
|
||||||
assertFlushed := func() {
|
assertFlushed := func(key string) {
|
||||||
// The data should be gone
|
// The data should be gone
|
||||||
_, err = c.Cache.
|
_, err = c.Cache.
|
||||||
Get().
|
Get().
|
||||||
Group(group).
|
Group(group).
|
||||||
Key(key).
|
Key(key).
|
||||||
Type(new(cacheTest)).
|
|
||||||
Fetch(context.Background())
|
Fetch(context.Background())
|
||||||
assert.True(t, errors.Is(err, &libstore.NotFound{}))
|
assert.Equal(t, ErrCacheMiss, err)
|
||||||
}
|
}
|
||||||
assertFlushed()
|
assertFlushed(key)
|
||||||
|
|
||||||
// Set with tags
|
// Set with tags
|
||||||
|
key = "testkey2"
|
||||||
err = c.Cache.
|
err = c.Cache.
|
||||||
Set().
|
Set().
|
||||||
Group(group).
|
Group(group).
|
||||||
Key(key).
|
Key(key).
|
||||||
Data(data).
|
Data(data).
|
||||||
Tags("tag1").
|
Tags("tag1", "tag2").
|
||||||
|
Expiration(time.Hour).
|
||||||
Save(context.Background())
|
Save(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Flush the tag
|
// Check the tag index
|
||||||
|
index := c.Cache.store.(*inMemoryCacheStore).tagIndex
|
||||||
|
gk := c.Cache.cacheKey(group, key)
|
||||||
|
_, exists := index.tags["tag1"][gk]
|
||||||
|
assert.True(t, exists)
|
||||||
|
_, exists = index.tags["tag2"][gk]
|
||||||
|
assert.True(t, exists)
|
||||||
|
_, exists = index.keys[gk]["tag1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
_, exists = index.keys[gk]["tag2"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
|
||||||
|
// Flush one of tags
|
||||||
err = c.Cache.
|
err = c.Cache.
|
||||||
Flush().
|
Flush().
|
||||||
Tags("tag1").
|
Tags("tag1").
|
||||||
@ -86,21 +97,9 @@ func TestCacheClient(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// The data should be gone
|
// The data should be gone
|
||||||
assertFlushed()
|
assertFlushed(key)
|
||||||
|
|
||||||
// Set with expiration
|
// The index should be empty
|
||||||
err = c.Cache.
|
assert.Empty(t, index.tags)
|
||||||
Set().
|
assert.Empty(t, index.keys)
|
||||||
Group(group).
|
|
||||||
Key(key).
|
|
||||||
Data(data).
|
|
||||||
Expiration(time.Millisecond).
|
|
||||||
Save(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(time.Millisecond * 2)
|
|
||||||
|
|
||||||
// The data should be gone
|
|
||||||
assertFlushed()
|
|
||||||
}
|
}
|
||||||
|
@ -1,24 +1,14 @@
|
|||||||
package services
|
package services
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
|
||||||
"entgo.io/ent/dialect"
|
|
||||||
entsql "entgo.io/ent/dialect/sql"
|
|
||||||
"entgo.io/ent/dialect/sql/schema"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/funcmap"
|
|
||||||
|
|
||||||
// Required by ent
|
|
||||||
_ "github.com/jackc/pgx/v4/stdlib"
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
|
|
||||||
// Require by ent
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
_ "github.com/mikestefanello/pagoda/ent/runtime"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/funcmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Container contains all services used by the application and provides an easy way to handle dependency
|
// Container contains all services used by the application and provides an easy way to handle dependency
|
||||||
@ -36,11 +26,8 @@ type Container struct {
|
|||||||
// Cache contains the cache client
|
// Cache contains the cache client
|
||||||
Cache *CacheClient
|
Cache *CacheClient
|
||||||
|
|
||||||
// Database stores the connection to the database
|
// DB is the connection to the database and models for interacting with it
|
||||||
Database *sql.DB
|
DB *DBClient
|
||||||
|
|
||||||
// ORM stores a client to the ORM
|
|
||||||
ORM *ent.Client
|
|
||||||
|
|
||||||
// Mail stores an email sending client
|
// Mail stores an email sending client
|
||||||
Mail *MailClient
|
Mail *MailClient
|
||||||
@ -63,7 +50,6 @@ func NewContainer() *Container {
|
|||||||
c.initWeb()
|
c.initWeb()
|
||||||
c.initCache()
|
c.initCache()
|
||||||
c.initDatabase()
|
c.initDatabase()
|
||||||
c.initORM()
|
|
||||||
c.initAuth()
|
c.initAuth()
|
||||||
c.initTemplateRenderer()
|
c.initTemplateRenderer()
|
||||||
c.initMail()
|
c.initMail()
|
||||||
@ -71,20 +57,13 @@ func NewContainer() *Container {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shuts the Container down and disconnects all connections
|
// Shutdown shuts the Container down and disconnects all connections.
|
||||||
|
// If the task runner was started, cancel the context to shut it down prior to calling this.
|
||||||
func (c *Container) Shutdown() error {
|
func (c *Container) Shutdown() error {
|
||||||
if err := c.Tasks.Close(); err != nil {
|
if err := c.DB.Close(); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.Cache.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.ORM.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.Database.Close(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
c.Cache.Close()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -120,65 +99,27 @@ func (c *Container) initWeb() {
|
|||||||
|
|
||||||
// initCache initializes the cache
|
// initCache initializes the cache
|
||||||
func (c *Container) initCache() {
|
func (c *Container) initCache() {
|
||||||
var err error
|
store, err := newInMemoryCache(c.Config.Cache.Capacity)
|
||||||
if c.Cache, err = NewCacheClient(c.Config); err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.Cache = NewCacheClient(store)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initDatabase initializes the database
|
// initDatabase initializes the database
|
||||||
// If the environment is set to test, the test database will be used and will be dropped, recreated and migrated
|
|
||||||
func (c *Container) initDatabase() {
|
func (c *Container) initDatabase() {
|
||||||
var err error
|
client, err := NewDBClient(c.Config)
|
||||||
|
|
||||||
getAddr := func(dbName string) string {
|
|
||||||
return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s",
|
|
||||||
c.Config.Database.User,
|
|
||||||
c.Config.Database.Password,
|
|
||||||
c.Config.Database.Hostname,
|
|
||||||
c.Config.Database.Port,
|
|
||||||
dbName,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Database, err = sql.Open("pgx", getAddr(c.Config.Database.Database))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("failed to connect to database: %v", err))
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if this is a test environment
|
c.DB = client
|
||||||
if c.Config.App.Environment == config.EnvTest {
|
|
||||||
// Drop the test database, ignoring errors in case it doesn't yet exist
|
|
||||||
_, _ = c.Database.Exec("DROP DATABASE " + c.Config.Database.TestDatabase)
|
|
||||||
|
|
||||||
// Create the test database
|
|
||||||
if _, err = c.Database.Exec("CREATE DATABASE " + c.Config.Database.TestDatabase); err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to create test database: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to the test database
|
|
||||||
if err = c.Database.Close(); err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to close database connection: %v", err))
|
|
||||||
}
|
|
||||||
c.Database, err = sql.Open("pgx", getAddr(c.Config.Database.TestDatabase))
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to connect to database: %v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// initORM initializes the ORM
|
|
||||||
func (c *Container) initORM() {
|
|
||||||
drv := entsql.OpenDB(dialect.Postgres, c.Database)
|
|
||||||
c.ORM = ent.NewClient(ent.Driver(drv))
|
|
||||||
if err := c.ORM.Schema.Create(context.Background(), schema.WithAtlas(true)); err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to create database schema: %v", err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// initAuth initializes the authentication client
|
// initAuth initializes the authentication client
|
||||||
func (c *Container) initAuth() {
|
func (c *Container) initAuth() {
|
||||||
c.Auth = NewAuthClient(c.Config, c.ORM)
|
c.Auth = NewAuthClient(c.Config, c.DB)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initTemplateRenderer initializes the template renderer
|
// initTemplateRenderer initializes the template renderer
|
||||||
@ -197,5 +138,11 @@ func (c *Container) initMail() {
|
|||||||
|
|
||||||
// initTasks initializes the task client
|
// initTasks initializes the task client
|
||||||
func (c *Container) initTasks() {
|
func (c *Container) initTasks() {
|
||||||
c.Tasks = NewTaskClient(c.Config)
|
var err error
|
||||||
|
// You could use a separate database for tasks, if you'd like. but using one
|
||||||
|
// makes transaction support easier
|
||||||
|
c.Tasks, err = NewTaskClient(c.Config.Tasks, c.DB.DB())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to create task client: %v", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,8 +11,7 @@ func TestNewContainer(t *testing.T) {
|
|||||||
assert.NotNil(t, c.Config)
|
assert.NotNil(t, c.Config)
|
||||||
assert.NotNil(t, c.Validator)
|
assert.NotNil(t, c.Validator)
|
||||||
assert.NotNil(t, c.Cache)
|
assert.NotNil(t, c.Cache)
|
||||||
assert.NotNil(t, c.Database)
|
assert.NotNil(t, c.DB)
|
||||||
assert.NotNil(t, c.ORM)
|
|
||||||
assert.NotNil(t, c.Mail)
|
assert.NotNil(t, c.Mail)
|
||||||
assert.NotNil(t, c.Auth)
|
assert.NotNil(t, c.Auth)
|
||||||
assert.NotNil(t, c.TemplateRenderer)
|
assert.NotNil(t, c.TemplateRenderer)
|
||||||
|
119
pkg/services/db.go
Normal file
119
pkg/services/db.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/golang-migrate/migrate/v4"
|
||||||
|
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||||
|
"github.com/golang-migrate/migrate/v4/source/file"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DBClient struct {
|
||||||
|
db *sql.DB
|
||||||
|
C *sqlc.Queries
|
||||||
|
|
||||||
|
User *DBUserClient
|
||||||
|
Post *DBPostClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDBClient(cfg *config.Config) (*DBClient, error) {
|
||||||
|
logger := slog.Default()
|
||||||
|
|
||||||
|
dbFilepath := cfg.Storage.DatabaseFile
|
||||||
|
if cfg.App.Environment == config.EnvTest {
|
||||||
|
// In memory only
|
||||||
|
dbFilepath = ":memory:"
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("Opening database file",
|
||||||
|
"filepath", dbFilepath)
|
||||||
|
|
||||||
|
fn := fmt.Sprintf("file:%s?_fk=1&_journal=WAL&cache=shared&_busy_timeout=5000", dbFilepath)
|
||||||
|
db, err := sql.Open("sqlite3", fn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := DBClient{
|
||||||
|
db: db,
|
||||||
|
C: sqlc.New(db),
|
||||||
|
}
|
||||||
|
client.User = &DBUserClient{db: db}
|
||||||
|
client.Post = &DBPostClient{db: db}
|
||||||
|
|
||||||
|
migrationsDirPath := cfg.Storage.MigrationsDir
|
||||||
|
logger.Info("Loading schema migrations",
|
||||||
|
"filepath", migrationsDirPath)
|
||||||
|
err = client.initSchema(migrationsDirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initSchema ensures that the database is current with the migrations contained
|
||||||
|
// in db/migrations.
|
||||||
|
func (c *DBClient) initSchema(migrationsDir string) error {
|
||||||
|
driver, err := sqlite3.WithInstance(c.db, &sqlite3.Config{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fSrc, err := (&file.File{}).Open(migrationsDir)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Got here 2: " + migrationsDir)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := migrate.NewWithInstance("file", fSrc, "sqlite", driver)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.Up()
|
||||||
|
if err == migrate.ErrNoChange {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTx executes the provided callback with access to a database transaction.
|
||||||
|
// If the callback returns an error the transaction will be rolled back.
|
||||||
|
func (c *DBClient) WithTx(fn func(tx *sql.Tx) error) error {
|
||||||
|
tx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fn(tx)
|
||||||
|
if err != nil {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *DBClient) WithSqlcTx(fn func(*sqlc.Queries) error) error {
|
||||||
|
return c.WithTx(
|
||||||
|
func(tx *sql.Tx) error {
|
||||||
|
return fn(c.C.WithTx(tx))
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DB returns the underlying database object. Avoid whenever possible and use
|
||||||
|
// either sqlc (preferred) or sub-clients.
|
||||||
|
func (c *DBClient) DB() *sql.DB {
|
||||||
|
return c.db
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *DBClient) Close() error {
|
||||||
|
return c.db.Close()
|
||||||
|
}
|
@ -4,10 +4,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -75,7 +75,6 @@ func (m *MailClient) send(email *mail, ctx echo.Context) error {
|
|||||||
Base(email.template).
|
Base(email.template).
|
||||||
Files(fmt.Sprintf("emails/%s", email.template)).
|
Files(fmt.Sprintf("emails/%s", email.template)).
|
||||||
Execute(email.templateData)
|
Execute(email.templateData)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
31
pkg/services/posts.go
Normal file
31
pkg/services/posts.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Post struct {
|
||||||
|
Title string
|
||||||
|
Body string
|
||||||
|
}
|
||||||
|
|
||||||
|
type DBPostClient struct {
|
||||||
|
db *sql.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchAll is an mock example of fetching posts to illustrate how paging works
|
||||||
|
func (c *DBPostClient) FetchAll(pager *page.Pager) []Post {
|
||||||
|
pager.SetItems(20)
|
||||||
|
posts := make([]Post, 20)
|
||||||
|
|
||||||
|
for k := range posts {
|
||||||
|
posts[k] = Post{
|
||||||
|
Title: fmt.Sprintf("Post example #%d", k+1),
|
||||||
|
Body: fmt.Sprintf("Lorem ipsum example #%d ddolor sit amet, consectetur adipiscing elit. Nam elementum vulputate tristique.", k+1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return posts[pager.GetOffset() : pager.GetOffset()+pager.ItemsPerPage]
|
||||||
|
}
|
@ -4,17 +4,17 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/ent"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/models/sqlc"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
c *Container
|
c *Container
|
||||||
ctx echo.Context
|
ctx echo.Context
|
||||||
usr *ent.User
|
usr *sqlc.User
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
@ -30,7 +30,7 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
// Create a test user
|
// Create a test user
|
||||||
var err error
|
var err error
|
||||||
if usr, err = tests.CreateUser(c.ORM); err != nil {
|
if usr, err = tests.CreateUser(c.DB.C); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,179 +1,205 @@
|
|||||||
package services
|
package services
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"bytes"
|
||||||
"fmt"
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"encoding/gob"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hibiken/asynq"
|
"github.com/maragudk/goqite"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
"github.com/maragudk/goqite/jobs"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// TaskClient is that client that allows you to queue or schedule task execution
|
// TaskClient is that client that allows you to queue or schedule task execution.
|
||||||
|
// Under the hood we create only a single queue using goqite for all tasks because we do not want more than one
|
||||||
|
// runner to process the tasks. The TaskClient wrapper provides abstractions for separate, type-safe queues.
|
||||||
TaskClient struct {
|
TaskClient struct {
|
||||||
// client stores the asynq client
|
queue *goqite.Queue
|
||||||
client *asynq.Client
|
runner *jobs.Runner
|
||||||
|
buffers sync.Pool
|
||||||
// scheduler stores the asynq scheduler
|
|
||||||
scheduler *asynq.Scheduler
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// task handles task creation operations
|
// Task is a job that can be added to a queue and later passed to and executed by a QueueSubscriber.
|
||||||
task struct {
|
// See pkg/tasks for an example of how this can be used with a queue.
|
||||||
|
Task interface {
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskSaveOp handles task save operations
|
||||||
|
TaskSaveOp struct {
|
||||||
client *TaskClient
|
client *TaskClient
|
||||||
typ string
|
task Task
|
||||||
payload any
|
tx *sql.Tx
|
||||||
periodic *string
|
|
||||||
queue *string
|
|
||||||
maxRetries *int
|
|
||||||
timeout *time.Duration
|
|
||||||
deadline *time.Time
|
|
||||||
at *time.Time
|
at *time.Time
|
||||||
wait *time.Duration
|
wait *time.Duration
|
||||||
retain *time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Queue is a queue that a Task can be pushed to for execution.
|
||||||
|
// While this can be implemented directly, it's recommended to use NewQueue() which uses generics in
|
||||||
|
// order to provide type-safe queues and queue subscriber callbacks for task execution.
|
||||||
|
Queue interface {
|
||||||
|
// Name returns the name of the task this queue processes
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Receive receives the Task payload to be processed
|
||||||
|
Receive(ctx context.Context, payload []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// queue provides a type-safe implementation of Queue
|
||||||
|
queue[T Task] struct {
|
||||||
|
name string
|
||||||
|
subscriber QueueSubscriber[T]
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueSubscriber is a generic subscriber callback for a given queue to process Tasks
|
||||||
|
QueueSubscriber[T Task] func(context.Context, T) error
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewTaskClient creates a new task client
|
// NewTaskClient creates a new task client
|
||||||
func NewTaskClient(cfg *config.Config) *TaskClient {
|
func NewTaskClient(cfg config.TasksConfig, db *sql.DB) (*TaskClient, error) {
|
||||||
// Determine the database based on the environment
|
// Install the schema
|
||||||
db := cfg.Cache.Database
|
if err := goqite.Setup(context.Background(), db); err != nil {
|
||||||
if cfg.App.Environment == config.EnvTest {
|
// An error is returned if we already ran this and there's no better way to check.
|
||||||
db = cfg.Cache.TestDatabase
|
// You can and probably should handle this via migrations
|
||||||
|
if !strings.Contains(err.Error(), "already exists") {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
conn := asynq.RedisClientOpt{
|
t := &TaskClient{
|
||||||
Addr: fmt.Sprintf("%s:%d", cfg.Cache.Hostname, cfg.Cache.Port),
|
queue: goqite.New(goqite.NewOpts{
|
||||||
Password: cfg.Cache.Password,
|
|
||||||
DB: db,
|
DB: db,
|
||||||
|
Name: "tasks",
|
||||||
|
MaxReceive: cfg.MaxRetries,
|
||||||
|
}),
|
||||||
|
buffers: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return bytes.NewBuffer(nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
return &TaskClient{
|
t.runner = jobs.NewRunner(jobs.NewRunnerOpts{
|
||||||
client: asynq.NewClient(conn),
|
Limit: cfg.Goroutines,
|
||||||
scheduler: asynq.NewScheduler(conn, nil),
|
Log: log.Default(),
|
||||||
}
|
PollInterval: cfg.PollInterval,
|
||||||
|
Queue: t.queue,
|
||||||
|
})
|
||||||
|
|
||||||
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the connection to the task service
|
// StartRunner starts the scheduler service which adds scheduled tasks to the queue.
|
||||||
func (t *TaskClient) Close() error {
|
// This must be running in order to execute queued tasked.
|
||||||
return t.client.Close()
|
// To stop the runner, cancel the context.
|
||||||
|
// This is a blocking call.
|
||||||
|
func (t *TaskClient) StartRunner(ctx context.Context) {
|
||||||
|
t.runner.Start(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartScheduler starts the scheduler service which adds scheduled tasks to the queue
|
// Register registers a queue so tasks can be added to it and processed
|
||||||
// This must be running in order to queue tasks set for periodic execution
|
func (t *TaskClient) Register(queue Queue) {
|
||||||
func (t *TaskClient) StartScheduler() error {
|
t.runner.Register(queue.Name(), queue.Receive)
|
||||||
return t.scheduler.Run()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New starts a task creation operation
|
// New starts a task save operation
|
||||||
func (t *TaskClient) New(typ string) *task {
|
func (t *TaskClient) New(task Task) *TaskSaveOp {
|
||||||
return &task{
|
return &TaskSaveOp{
|
||||||
client: t,
|
client: t,
|
||||||
typ: typ,
|
task: task,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Payload sets the task payload data which will be sent to the task handler
|
|
||||||
func (t *task) Payload(payload any) *task {
|
|
||||||
t.payload = payload
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Periodic sets the task to execute periodically according to a given interval
|
|
||||||
// The interval can be either in cron form ("*/5 * * * *") or "@every 30s"
|
|
||||||
func (t *task) Periodic(interval string) *task {
|
|
||||||
t.periodic = &interval
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queue specifies the name of the queue to add the task to
|
|
||||||
// The default queue will be used if this is not set
|
|
||||||
func (t *task) Queue(queue string) *task {
|
|
||||||
t.queue = &queue
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout sets the task timeout, meaning the task must execute within a given duration
|
|
||||||
func (t *task) Timeout(timeout time.Duration) *task {
|
|
||||||
t.timeout = &timeout
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deadline sets the task execution deadline to a specific date and time
|
|
||||||
func (t *task) Deadline(deadline time.Time) *task {
|
|
||||||
t.deadline = &deadline
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// At sets the exact date and time the task should be executed
|
// At sets the exact date and time the task should be executed
|
||||||
func (t *task) At(processAt time.Time) *task {
|
func (t *TaskSaveOp) At(processAt time.Time) *TaskSaveOp {
|
||||||
t.at = &processAt
|
t.Wait(time.Until(processAt))
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait instructs the task to wait a given duration before it is executed
|
// Wait instructs the task to wait a given duration before it is executed
|
||||||
func (t *task) Wait(duration time.Duration) *task {
|
func (t *TaskSaveOp) Wait(duration time.Duration) *TaskSaveOp {
|
||||||
t.wait = &duration
|
t.wait = &duration
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retain instructs the task service to retain the task data for a given duration after execution is complete
|
// Tx will include the task as part of a given database transaction
|
||||||
func (t *task) Retain(duration time.Duration) *task {
|
func (t *TaskSaveOp) Tx(tx *sql.Tx) *TaskSaveOp {
|
||||||
t.retain = &duration
|
t.tx = tx
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxRetries sets the maximum amount of times to retry executing the task in the event of a failure
|
// Save saves the task, so it can be queued for execution
|
||||||
func (t *task) MaxRetries(retries int) *task {
|
func (t *TaskSaveOp) Save() error {
|
||||||
t.maxRetries = &retries
|
type message struct {
|
||||||
return t
|
Name string
|
||||||
|
Message []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save saves the task so it can be executed
|
// Encode the task
|
||||||
func (t *task) Save() error {
|
taskBuf := t.client.buffers.Get().(*bytes.Buffer)
|
||||||
var err error
|
if err := gob.NewEncoder(taskBuf).Encode(t.task); err != nil {
|
||||||
|
|
||||||
// Build the payload
|
|
||||||
var payload []byte
|
|
||||||
if t.payload != nil {
|
|
||||||
if payload, err = json.Marshal(t.payload); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wrap and encode the message
|
||||||
|
// This is needed as a workaround because goqite doesn't support delays using the jobs package,
|
||||||
|
// so we format the message the way it expects but use the queue to supply the delay
|
||||||
|
msgBuf := t.client.buffers.Get().(*bytes.Buffer)
|
||||||
|
wrapper := message{Name: t.task.Name(), Message: taskBuf.Bytes()}
|
||||||
|
if err := gob.NewEncoder(msgBuf).Encode(wrapper); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the task options
|
msg := goqite.Message{
|
||||||
opts := make([]asynq.Option, 0)
|
Body: msgBuf.Bytes(),
|
||||||
if t.queue != nil {
|
|
||||||
opts = append(opts, asynq.Queue(*t.queue))
|
|
||||||
}
|
|
||||||
if t.maxRetries != nil {
|
|
||||||
opts = append(opts, asynq.MaxRetry(*t.maxRetries))
|
|
||||||
}
|
|
||||||
if t.timeout != nil {
|
|
||||||
opts = append(opts, asynq.Timeout(*t.timeout))
|
|
||||||
}
|
|
||||||
if t.deadline != nil {
|
|
||||||
opts = append(opts, asynq.Deadline(*t.deadline))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.wait != nil {
|
if t.wait != nil {
|
||||||
opts = append(opts, asynq.ProcessIn(*t.wait))
|
msg.Delay = *t.wait
|
||||||
}
|
|
||||||
if t.retain != nil {
|
|
||||||
opts = append(opts, asynq.Retention(*t.retain))
|
|
||||||
}
|
|
||||||
if t.at != nil {
|
|
||||||
opts = append(opts, asynq.ProcessAt(*t.at))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the task
|
// Put the buffers back in the pool for re-use
|
||||||
task := asynq.NewTask(t.typ, payload, opts...)
|
taskBuf.Reset()
|
||||||
|
msgBuf.Reset()
|
||||||
|
t.client.buffers.Put(taskBuf)
|
||||||
|
t.client.buffers.Put(msgBuf)
|
||||||
|
|
||||||
// Schedule, if needed
|
if t.tx == nil {
|
||||||
if t.periodic != nil {
|
return t.client.queue.Send(context.Background(), msg)
|
||||||
_, err = t.client.scheduler.Register(*t.periodic, task)
|
|
||||||
} else {
|
} else {
|
||||||
_, err = t.client.client.Enqueue(task)
|
return t.client.queue.SendTx(context.Background(), t.tx, msg)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueue queues a new type-safe Queue of a given Task type
|
||||||
|
func NewQueue[T Task](subscriber QueueSubscriber[T]) Queue {
|
||||||
|
var task T
|
||||||
|
|
||||||
|
q := &queue[T]{
|
||||||
|
name: task.Name(),
|
||||||
|
subscriber: subscriber,
|
||||||
|
}
|
||||||
|
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *queue[T]) Name() string {
|
||||||
|
return q.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *queue[T]) Receive(ctx context.Context, payload []byte) error {
|
||||||
|
var obj T
|
||||||
|
err := gob.NewDecoder(bytes.NewReader(payload)).Decode(&obj)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return q.subscriber(ctx, obj)
|
||||||
|
}
|
||||||
|
@ -1,35 +1,69 @@
|
|||||||
package services
|
package services
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTaskClient_New(t *testing.T) {
|
type testTask struct {
|
||||||
now := time.Now()
|
Val int
|
||||||
tk := c.Tasks.
|
}
|
||||||
New("task1").
|
|
||||||
Payload("payload").
|
func (t testTask) Name() string {
|
||||||
Queue("queue").
|
return "test_task"
|
||||||
Periodic("@every 5s").
|
}
|
||||||
MaxRetries(5).
|
|
||||||
Timeout(5 * time.Second).
|
func TestTaskClient_New(t *testing.T) {
|
||||||
Deadline(now).
|
var subCalled bool
|
||||||
At(now).
|
|
||||||
Wait(6 * time.Second).
|
queue := NewQueue[testTask](func(ctx context.Context, task testTask) error {
|
||||||
Retain(7 * time.Second)
|
subCalled = true
|
||||||
|
assert.Equal(t, 123, task.Val)
|
||||||
assert.Equal(t, "task1", tk.typ)
|
return nil
|
||||||
assert.Equal(t, "payload", tk.payload.(string))
|
})
|
||||||
assert.Equal(t, "queue", *tk.queue)
|
c.Tasks.Register(queue)
|
||||||
assert.Equal(t, "@every 5s", *tk.periodic)
|
|
||||||
assert.Equal(t, 5, *tk.maxRetries)
|
task := testTask{Val: 123}
|
||||||
assert.Equal(t, 5*time.Second, *tk.timeout)
|
|
||||||
assert.Equal(t, now, *tk.deadline)
|
tx := &sql.Tx{}
|
||||||
assert.Equal(t, now, *tk.at)
|
|
||||||
assert.Equal(t, 6*time.Second, *tk.wait)
|
op := c.Tasks.
|
||||||
assert.Equal(t, 7*time.Second, *tk.retain)
|
New(task).
|
||||||
assert.NoError(t, tk.Save())
|
Wait(5 * time.Second).
|
||||||
|
Tx(tx)
|
||||||
|
|
||||||
|
// Check that the task op was built correctly
|
||||||
|
assert.Equal(t, task, op.task)
|
||||||
|
assert.Equal(t, tx, op.tx)
|
||||||
|
assert.Equal(t, 5*time.Second, *op.wait)
|
||||||
|
|
||||||
|
// Remove the transaction and delay so we can process the task immediately
|
||||||
|
op.tx, op.wait = nil, nil
|
||||||
|
err := op.Save()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Start the runner
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go c.Tasks.StartRunner(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Check for up to 5 seconds if the task executed
|
||||||
|
start := time.Now()
|
||||||
|
waitLoop:
|
||||||
|
for {
|
||||||
|
switch {
|
||||||
|
case subCalled:
|
||||||
|
break waitLoop
|
||||||
|
case time.Since(start) > (5 * time.Second):
|
||||||
|
break waitLoop
|
||||||
|
default:
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, subCalled)
|
||||||
}
|
}
|
||||||
|
@ -9,12 +9,15 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/a-h/templ"
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/context"
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
"github.com/mikestefanello/pagoda/pkg/log"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/context"
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/log"
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templ/layouts"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
// cachedPageGroup stores the cache group for cached pages
|
// cachedPageGroup stores the cache group for cached pages
|
||||||
@ -95,6 +98,54 @@ func (t *TemplateRenderer) Parse() *templateBuilder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TemplateRenderer) RenderPageTempl(ctx echo.Context, page page.Page, content templ.Component) error {
|
||||||
|
// Page name is required
|
||||||
|
if page.Name == "" {
|
||||||
|
return echo.NewHTTPError(http.StatusInternalServerError, "page render failed due to missing name")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the app name in configuration if a value was not set
|
||||||
|
if page.AppName == "" {
|
||||||
|
page.AppName = t.config.App.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
layout := layouts.Main
|
||||||
|
|
||||||
|
// Check if this is an HTMX non-boosted request which indicates that only partial
|
||||||
|
// content should be rendered
|
||||||
|
if page.HTMX.Request.Enabled && !page.HTMX.Request.Boosted {
|
||||||
|
// TODO: Change layout to HTML layout
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
temp := layout(page, content)
|
||||||
|
err := temp.Render(ctx.Request().Context(), &buf)
|
||||||
|
if err != nil {
|
||||||
|
return echo.NewHTTPError(
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
fmt.Sprintf("failed to parse and execute templates: %s", err),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the status code
|
||||||
|
ctx.Response().Status = page.StatusCode
|
||||||
|
|
||||||
|
// Set any headers
|
||||||
|
for k, v := range page.Headers {
|
||||||
|
ctx.Response().Header().Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the HTMX response, if one
|
||||||
|
if page.HTMX.Response != nil {
|
||||||
|
page.HTMX.Response.Apply(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache this page, if caching was enabled
|
||||||
|
t.cachePage(ctx, page, &buf)
|
||||||
|
|
||||||
|
return ctx.HTMLBlob(ctx.Response().Status, buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
// RenderPage renders a Page as an HTTP response
|
// RenderPage renders a Page as an HTTP response
|
||||||
func (t *TemplateRenderer) RenderPage(ctx echo.Context, page page.Page) error {
|
func (t *TemplateRenderer) RenderPage(ctx echo.Context, page page.Page) error {
|
||||||
var buf *bytes.Buffer
|
var buf *bytes.Buffer
|
||||||
@ -138,7 +189,6 @@ func (t *TemplateRenderer) RenderPage(ctx echo.Context, page page.Page) error {
|
|||||||
).
|
).
|
||||||
Directories("components").
|
Directories("components").
|
||||||
Execute(page)
|
Execute(page)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return echo.NewHTTPError(
|
return echo.NewHTTPError(
|
||||||
http.StatusInternalServerError,
|
http.StatusInternalServerError,
|
||||||
@ -185,7 +235,7 @@ func (t *TemplateRenderer) cachePage(ctx echo.Context, page page.Page, html *byt
|
|||||||
// The request URL is used as the cache key so the middleware can serve the
|
// The request URL is used as the cache key so the middleware can serve the
|
||||||
// cached page on matching requests
|
// cached page on matching requests
|
||||||
key := ctx.Request().URL.String()
|
key := ctx.Request().URL.String()
|
||||||
cp := CachedPage{
|
cp := &CachedPage{
|
||||||
URL: key,
|
URL: key,
|
||||||
HTML: html.Bytes(),
|
HTML: html.Bytes(),
|
||||||
Headers: headers,
|
Headers: headers,
|
||||||
@ -217,9 +267,7 @@ func (t *TemplateRenderer) GetCachedPage(ctx echo.Context, url string) (*CachedP
|
|||||||
Get().
|
Get().
|
||||||
Group(cachedPageGroup).
|
Group(cachedPageGroup).
|
||||||
Key(url).
|
Key(url).
|
||||||
Type(new(CachedPage)).
|
|
||||||
Fetch(ctx.Request().Context())
|
Fetch(ctx.Request().Context())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -8,14 +8,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/mikestefanello/pagoda/config"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/htmx"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/page"
|
|
||||||
"github.com/mikestefanello/pagoda/pkg/tests"
|
|
||||||
"github.com/mikestefanello/pagoda/templates"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/config"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/htmx"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/page"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/pkg/tests"
|
||||||
|
"git.grosinger.net/tgrosinger/saasitone/templates"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTemplateRenderer(t *testing.T) {
|
func TestTemplateRenderer(t *testing.T) {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user