mirror of
https://github.com/gabehf/Koito.git
synced 2026-03-07 21:48:18 -08:00
247 lines
6.6 KiB
Go
247 lines
6.6 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.30.0
|
|
// source: interest.sql
|
|
|
|
package repository
|
|
|
|
import (
|
|
"context"
|
|
"time"
|
|
)
|
|
|
|
const getGroupedListensFromArtist = `-- name: GetGroupedListensFromArtist :many
|
|
WITH bounds AS (
|
|
SELECT
|
|
MIN(l.listened_at) AS start_time,
|
|
NOW() AS end_time
|
|
FROM listens l
|
|
JOIN tracks t ON t.id = l.track_id
|
|
JOIN artist_tracks at ON at.track_id = t.id
|
|
WHERE at.artist_id = $1
|
|
),
|
|
stats AS (
|
|
SELECT
|
|
start_time,
|
|
end_time,
|
|
EXTRACT(EPOCH FROM (end_time - start_time)) AS total_seconds,
|
|
((end_time - start_time) / $2::int) AS bucket_interval
|
|
FROM bounds
|
|
),
|
|
bucket_series AS (
|
|
SELECT generate_series(0, $2::int - 1) AS idx
|
|
),
|
|
listen_indices AS (
|
|
SELECT
|
|
LEAST(
|
|
$2::int - 1,
|
|
FLOOR(
|
|
(EXTRACT(EPOCH FROM (l.listened_at - s.start_time)) / NULLIF(s.total_seconds, 0))
|
|
* $2::int
|
|
)::int
|
|
) AS bucket_idx
|
|
FROM listens l
|
|
JOIN tracks t ON t.id = l.track_id
|
|
JOIN artist_tracks at ON at.track_id = t.id
|
|
CROSS JOIN stats s
|
|
WHERE at.artist_id = $1
|
|
AND s.start_time IS NOT NULL
|
|
)
|
|
SELECT
|
|
(s.start_time + (s.bucket_interval * bs.idx))::timestamptz AS bucket_start,
|
|
(s.start_time + (s.bucket_interval * (bs.idx + 1)))::timestamptz AS bucket_end,
|
|
COUNT(li.bucket_idx) AS listen_count
|
|
FROM bucket_series bs
|
|
CROSS JOIN stats s
|
|
LEFT JOIN listen_indices li ON bs.idx = li.bucket_idx
|
|
WHERE s.start_time IS NOT NULL
|
|
GROUP BY bs.idx, s.start_time, s.bucket_interval
|
|
ORDER BY bs.idx
|
|
`
|
|
|
|
type GetGroupedListensFromArtistParams struct {
|
|
ArtistID int32
|
|
BucketCount int32
|
|
}
|
|
|
|
type GetGroupedListensFromArtistRow struct {
|
|
BucketStart time.Time
|
|
BucketEnd time.Time
|
|
ListenCount int64
|
|
}
|
|
|
|
func (q *Queries) GetGroupedListensFromArtist(ctx context.Context, arg GetGroupedListensFromArtistParams) ([]GetGroupedListensFromArtistRow, error) {
|
|
rows, err := q.db.Query(ctx, getGroupedListensFromArtist, arg.ArtistID, arg.BucketCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetGroupedListensFromArtistRow
|
|
for rows.Next() {
|
|
var i GetGroupedListensFromArtistRow
|
|
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getGroupedListensFromRelease = `-- name: GetGroupedListensFromRelease :many
|
|
WITH bounds AS (
|
|
SELECT
|
|
MIN(l.listened_at) AS start_time,
|
|
NOW() AS end_time
|
|
FROM listens l
|
|
JOIN tracks t ON t.id = l.track_id
|
|
WHERE t.release_id = $1
|
|
),
|
|
stats AS (
|
|
SELECT
|
|
start_time,
|
|
end_time,
|
|
EXTRACT(EPOCH FROM (end_time - start_time)) AS total_seconds,
|
|
((end_time - start_time) / $2::int) AS bucket_interval
|
|
FROM bounds
|
|
),
|
|
bucket_series AS (
|
|
SELECT generate_series(0, $2::int - 1) AS idx
|
|
),
|
|
listen_indices AS (
|
|
SELECT
|
|
LEAST(
|
|
$2::int - 1,
|
|
FLOOR(
|
|
(EXTRACT(EPOCH FROM (l.listened_at - s.start_time)) / NULLIF(s.total_seconds, 0))
|
|
* $2::int
|
|
)::int
|
|
) AS bucket_idx
|
|
FROM listens l
|
|
JOIN tracks t ON t.id = l.track_id
|
|
CROSS JOIN stats s
|
|
WHERE t.release_id = $1
|
|
AND s.start_time IS NOT NULL
|
|
)
|
|
SELECT
|
|
(s.start_time + (s.bucket_interval * bs.idx))::timestamptz AS bucket_start,
|
|
(s.start_time + (s.bucket_interval * (bs.idx + 1)))::timestamptz AS bucket_end,
|
|
COUNT(li.bucket_idx) AS listen_count
|
|
FROM bucket_series bs
|
|
CROSS JOIN stats s
|
|
LEFT JOIN listen_indices li ON bs.idx = li.bucket_idx
|
|
WHERE s.start_time IS NOT NULL
|
|
GROUP BY bs.idx, s.start_time, s.bucket_interval
|
|
ORDER BY bs.idx
|
|
`
|
|
|
|
type GetGroupedListensFromReleaseParams struct {
|
|
ReleaseID int32
|
|
BucketCount int32
|
|
}
|
|
|
|
type GetGroupedListensFromReleaseRow struct {
|
|
BucketStart time.Time
|
|
BucketEnd time.Time
|
|
ListenCount int64
|
|
}
|
|
|
|
func (q *Queries) GetGroupedListensFromRelease(ctx context.Context, arg GetGroupedListensFromReleaseParams) ([]GetGroupedListensFromReleaseRow, error) {
|
|
rows, err := q.db.Query(ctx, getGroupedListensFromRelease, arg.ReleaseID, arg.BucketCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetGroupedListensFromReleaseRow
|
|
for rows.Next() {
|
|
var i GetGroupedListensFromReleaseRow
|
|
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getGroupedListensFromTrack = `-- name: GetGroupedListensFromTrack :many
|
|
WITH bounds AS (
|
|
SELECT
|
|
MIN(l.listened_at) AS start_time,
|
|
NOW() AS end_time
|
|
FROM listens l
|
|
JOIN tracks t ON t.id = l.track_id
|
|
WHERE t.id = $1
|
|
),
|
|
stats AS (
|
|
SELECT
|
|
start_time,
|
|
end_time,
|
|
EXTRACT(EPOCH FROM (end_time - start_time)) AS total_seconds,
|
|
((end_time - start_time) / $2::int) AS bucket_interval
|
|
FROM bounds
|
|
),
|
|
bucket_series AS (
|
|
SELECT generate_series(0, $2::int - 1) AS idx
|
|
),
|
|
listen_indices AS (
|
|
SELECT
|
|
LEAST(
|
|
$2::int - 1,
|
|
FLOOR(
|
|
(EXTRACT(EPOCH FROM (l.listened_at - s.start_time)) / NULLIF(s.total_seconds, 0))
|
|
* $2::int
|
|
)::int
|
|
) AS bucket_idx
|
|
FROM listens l
|
|
JOIN tracks t ON t.id = l.track_id
|
|
CROSS JOIN stats s
|
|
WHERE t.id = $1
|
|
AND s.start_time IS NOT NULL
|
|
)
|
|
SELECT
|
|
(s.start_time + (s.bucket_interval * bs.idx))::timestamptz AS bucket_start,
|
|
(s.start_time + (s.bucket_interval * (bs.idx + 1)))::timestamptz AS bucket_end,
|
|
COUNT(li.bucket_idx) AS listen_count
|
|
FROM bucket_series bs
|
|
CROSS JOIN stats s
|
|
LEFT JOIN listen_indices li ON bs.idx = li.bucket_idx
|
|
WHERE s.start_time IS NOT NULL
|
|
GROUP BY bs.idx, s.start_time, s.bucket_interval
|
|
ORDER BY bs.idx
|
|
`
|
|
|
|
type GetGroupedListensFromTrackParams struct {
|
|
ID int32
|
|
BucketCount int32
|
|
}
|
|
|
|
type GetGroupedListensFromTrackRow struct {
|
|
BucketStart time.Time
|
|
BucketEnd time.Time
|
|
ListenCount int64
|
|
}
|
|
|
|
func (q *Queries) GetGroupedListensFromTrack(ctx context.Context, arg GetGroupedListensFromTrackParams) ([]GetGroupedListensFromTrackRow, error) {
|
|
rows, err := q.db.Query(ctx, getGroupedListensFromTrack, arg.ID, arg.BucketCount)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetGroupedListensFromTrackRow
|
|
for rows.Next() {
|
|
var i GetGroupedListensFromTrackRow
|
|
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|