feat: add worker distributor and model registry
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
129
internal/workscheduler/repositories/queries.sql.go
Normal file
129
internal/workscheduler/repositories/queries.sql.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.23.0
|
||||
// source: queries.sql
|
||||
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const archive = `-- name: Archive :exec
|
||||
UPDATE work_schedule
|
||||
SET
|
||||
state = 'archived'
|
||||
WHERE
|
||||
schedule_id = $1
|
||||
`
|
||||
|
||||
func (q *Queries) Archive(ctx context.Context, scheduleID uuid.UUID) error {
|
||||
_, err := q.db.Exec(ctx, archive, scheduleID)
|
||||
return err
|
||||
}
|
||||
|
||||
const getCurrentQueueSize = `-- name: GetCurrentQueueSize :one
|
||||
SELECT
|
||||
COUNT(*) current_queue_size
|
||||
FROM
|
||||
work_schedule
|
||||
WHERE
|
||||
worker_id = $1
|
||||
AND state <> 'archived'
|
||||
`
|
||||
|
||||
func (q *Queries) GetCurrentQueueSize(ctx context.Context, workerID uuid.UUID) (int64, error) {
|
||||
row := q.db.QueryRow(ctx, getCurrentQueueSize, workerID)
|
||||
var current_queue_size int64
|
||||
err := row.Scan(¤t_queue_size)
|
||||
return current_queue_size, err
|
||||
}
|
||||
|
||||
const getNext = `-- name: GetNext :one
|
||||
SELECT
|
||||
schedule_id, worker_id, start_run, end_run, updated_at, state
|
||||
FROM
|
||||
work_schedule
|
||||
WHERE
|
||||
worker_id = $1
|
||||
AND state = 'pending'
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetNext(ctx context.Context, workerID uuid.UUID) (*WorkSchedule, error) {
|
||||
row := q.db.QueryRow(ctx, getNext, workerID)
|
||||
var i WorkSchedule
|
||||
err := row.Scan(
|
||||
&i.ScheduleID,
|
||||
&i.WorkerID,
|
||||
&i.StartRun,
|
||||
&i.EndRun,
|
||||
&i.UpdatedAt,
|
||||
&i.State,
|
||||
)
|
||||
return &i, err
|
||||
}
|
||||
|
||||
const insertQueueItem = `-- name: InsertQueueItem :exec
|
||||
INSERT INTO work_schedule
|
||||
(
|
||||
schedule_id
|
||||
, worker_id
|
||||
, start_run
|
||||
, end_run
|
||||
, state
|
||||
)
|
||||
VALUES
|
||||
(
|
||||
$1
|
||||
, $2
|
||||
, $3
|
||||
, $4
|
||||
, 'pending'
|
||||
)
|
||||
`
|
||||
|
||||
type InsertQueueItemParams struct {
|
||||
ScheduleID uuid.UUID `json:"schedule_id"`
|
||||
WorkerID uuid.UUID `json:"worker_id"`
|
||||
StartRun pgtype.Timestamptz `json:"start_run"`
|
||||
EndRun pgtype.Timestamptz `json:"end_run"`
|
||||
}
|
||||
|
||||
func (q *Queries) InsertQueueItem(ctx context.Context, arg *InsertQueueItemParams) error {
|
||||
_, err := q.db.Exec(ctx, insertQueueItem,
|
||||
arg.ScheduleID,
|
||||
arg.WorkerID,
|
||||
arg.StartRun,
|
||||
arg.EndRun,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const ping = `-- name: Ping :one
|
||||
SELECT 1
|
||||
`
|
||||
|
||||
func (q *Queries) Ping(ctx context.Context) (int32, error) {
|
||||
row := q.db.QueryRow(ctx, ping)
|
||||
var column_1 int32
|
||||
err := row.Scan(&column_1)
|
||||
return column_1, err
|
||||
}
|
||||
|
||||
const startProcessing = `-- name: StartProcessing :exec
|
||||
UPDATE work_schedule
|
||||
SET
|
||||
state = 'processing'
|
||||
WHERE
|
||||
schedule_id = $1
|
||||
`
|
||||
|
||||
func (q *Queries) StartProcessing(ctx context.Context, scheduleID uuid.UUID) error {
|
||||
_, err := q.db.Exec(ctx, startProcessing, scheduleID)
|
||||
return err
|
||||
}
|
Reference in New Issue
Block a user