mirror of
https://github.com/gabehf/Koito.git
synced 2026-03-07 21:48:18 -08:00
feat: interest over time graph (#127)
* api * ui * test * add margin to prevent clipping
This commit is contained in:
parent
e45099c71a
commit
231eb1b0fb
16 changed files with 1097 additions and 4 deletions
270
internal/repository/interest.sql.go
Normal file
270
internal/repository/interest.sql.go
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.30.0
|
||||
// source: interest.sql
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
const getGroupedListensFromArtist = `-- name: GetGroupedListensFromArtist :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
JOIN artist_tracks at ON at.track_id = t.id
|
||||
WHERE at.artist_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromArtistParams struct {
|
||||
ArtistID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromArtistRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromArtist(ctx context.Context, arg GetGroupedListensFromArtistParams) ([]GetGroupedListensFromArtistRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromArtist, arg.ArtistID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromArtistRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromArtistRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getGroupedListensFromRelease = `-- name: GetGroupedListensFromRelease :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.release_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromReleaseParams struct {
|
||||
ReleaseID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromReleaseRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromRelease(ctx context.Context, arg GetGroupedListensFromReleaseParams) ([]GetGroupedListensFromReleaseRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromRelease, arg.ReleaseID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromReleaseRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromReleaseRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getGroupedListensFromTrack = `-- name: GetGroupedListensFromTrack :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromTrackParams struct {
|
||||
ID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromTrackRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromTrack(ctx context.Context, arg GetGroupedListensFromTrackParams) ([]GetGroupedListensFromTrackRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromTrack, arg.ID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromTrackRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromTrackRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue