mirror of
https://github.com/gabehf/Koito.git
synced 2026-03-07 13:38:15 -08:00
feat: interest over time graph (#127)
* api * ui * test * add margin to prevent clipping
This commit is contained in:
parent
e45099c71a
commit
231eb1b0fb
16 changed files with 1097 additions and 4 deletions
|
|
@ -30,6 +30,7 @@ type DB interface {
|
|||
GetUserBySession(ctx context.Context, sessionId uuid.UUID) (*models.User, error)
|
||||
GetUserByUsername(ctx context.Context, username string) (*models.User, error)
|
||||
GetUserByApiKey(ctx context.Context, key string) (*models.User, error)
|
||||
GetInterest(ctx context.Context, opts GetInterestOpts) ([]InterestBucket, error)
|
||||
|
||||
// Save
|
||||
|
||||
|
|
|
|||
|
|
@ -153,3 +153,10 @@ type GetExportPageOpts struct {
|
|||
TrackID int32
|
||||
Limit int32
|
||||
}
|
||||
|
||||
type GetInterestOpts struct {
|
||||
Buckets int
|
||||
AlbumID int32
|
||||
ArtistID int32
|
||||
TrackID int32
|
||||
}
|
||||
|
|
|
|||
70
internal/db/psql/interest.go
Normal file
70
internal/db/psql/interest.go
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
package psql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/gabehf/koito/internal/db"
|
||||
"github.com/gabehf/koito/internal/repository"
|
||||
)
|
||||
|
||||
func (d *Psql) GetInterest(ctx context.Context, opts db.GetInterestOpts) ([]db.InterestBucket, error) {
|
||||
if opts.Buckets == 0 {
|
||||
return nil, errors.New("GetInterest: bucket count must be provided")
|
||||
}
|
||||
|
||||
ret := make([]db.InterestBucket, opts.Buckets)
|
||||
|
||||
if opts.ArtistID != 0 {
|
||||
resp, err := d.q.GetGroupedListensFromArtist(ctx, repository.GetGroupedListensFromArtistParams{
|
||||
ArtistID: opts.ArtistID,
|
||||
BucketCount: opts.Buckets,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetInterest: GetGroupedListensFromArtist: %w", err)
|
||||
}
|
||||
for i, v := range resp {
|
||||
ret[i] = db.InterestBucket{
|
||||
BucketStart: v.BucketStart,
|
||||
BucketEnd: v.BucketEnd,
|
||||
ListenCount: v.ListenCount,
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
} else if opts.AlbumID != 0 {
|
||||
resp, err := d.q.GetGroupedListensFromRelease(ctx, repository.GetGroupedListensFromReleaseParams{
|
||||
ReleaseID: opts.AlbumID,
|
||||
BucketCount: opts.Buckets,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetInterest: GetGroupedListensFromRelease: %w", err)
|
||||
}
|
||||
for i, v := range resp {
|
||||
ret[i] = db.InterestBucket{
|
||||
BucketStart: v.BucketStart,
|
||||
BucketEnd: v.BucketEnd,
|
||||
ListenCount: v.ListenCount,
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
} else if opts.TrackID != 0 {
|
||||
resp, err := d.q.GetGroupedListensFromTrack(ctx, repository.GetGroupedListensFromTrackParams{
|
||||
ID: opts.TrackID,
|
||||
BucketCount: opts.Buckets,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetInterest: GetGroupedListensFromTrack: %w", err)
|
||||
}
|
||||
for i, v := range resp {
|
||||
ret[i] = db.InterestBucket{
|
||||
BucketStart: v.BucketStart,
|
||||
BucketEnd: v.BucketEnd,
|
||||
ListenCount: v.ListenCount,
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
} else {
|
||||
return nil, errors.New("GetInterest: artist id, album id, or track id must be provided")
|
||||
}
|
||||
}
|
||||
112
internal/db/psql/interest_test.go
Normal file
112
internal/db/psql/interest_test.go
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
package psql_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gabehf/koito/internal/db"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// an llm wrote this because i didn't feel like it. it looks like it works, although
|
||||
// it could stand to be more thorough
|
||||
func TestGetInterest(t *testing.T) {
|
||||
truncateTestData(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// --- Setup Data ---
|
||||
|
||||
// Insert Artists
|
||||
err := store.Exec(ctx, `
|
||||
INSERT INTO artists (musicbrainz_id)
|
||||
VALUES ('00000000-0000-0000-0000-000000000001'),
|
||||
('00000000-0000-0000-0000-000000000002')`)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert Releases (Albums)
|
||||
err = store.Exec(ctx, `
|
||||
INSERT INTO releases (musicbrainz_id)
|
||||
VALUES ('00000000-0000-0000-0000-000000000011')`)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert Tracks (Both on Release 1)
|
||||
err = store.Exec(ctx, `
|
||||
INSERT INTO tracks (musicbrainz_id, release_id)
|
||||
VALUES ('11111111-1111-1111-1111-111111111111', 1),
|
||||
('22222222-2222-2222-2222-222222222222', 1)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Link Artists to Tracks
|
||||
// Artist 1 -> Track 1
|
||||
// Artist 2 -> Track 2
|
||||
err = store.Exec(ctx, `
|
||||
INSERT INTO artist_tracks (artist_id, track_id)
|
||||
VALUES (1, 1), (2, 2)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert Listens
|
||||
// Track 1 (Artist 1, Release 1): 3 Listens
|
||||
// Track 2 (Artist 2, Release 1): 2 Listens
|
||||
err = store.Exec(ctx, `
|
||||
INSERT INTO listens (user_id, track_id, listened_at) VALUES
|
||||
(1, 1, NOW() - INTERVAL '1 hour'),
|
||||
(1, 1, NOW() - INTERVAL '2 hours'),
|
||||
(1, 1, NOW() - INTERVAL '3 hours'),
|
||||
(1, 2, NOW() - INTERVAL '1 hour'),
|
||||
(1, 2, NOW() - INTERVAL '2 hours')
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
|
||||
// --- Test Validation ---
|
||||
|
||||
t.Run("Validation", func(t *testing.T) {
|
||||
// Error: Missing Buckets
|
||||
_, err := store.GetInterest(ctx, db.GetInterestOpts{ArtistID: 1})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "bucket count must be provided")
|
||||
|
||||
// Error: Missing ID
|
||||
_, err = store.GetInterest(ctx, db.GetInterestOpts{Buckets: 10})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "must be provided")
|
||||
})
|
||||
|
||||
// --- Test Data Retrieval ---
|
||||
// Note: We use Buckets: 1 to ensure all listens are aggregated into a single result
|
||||
// for easier assertion, avoiding complex date/time math in the test.
|
||||
|
||||
t.Run("Artist Interest", func(t *testing.T) {
|
||||
// Artist 1 should have 3 listens (from Track 1)
|
||||
buckets, err := store.GetInterest(ctx, db.GetInterestOpts{
|
||||
ArtistID: 1,
|
||||
Buckets: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, buckets, 1)
|
||||
assert.EqualValues(t, 3, buckets[0].ListenCount, "Artist 1 should have 3 listens")
|
||||
})
|
||||
|
||||
t.Run("Album Interest", func(t *testing.T) {
|
||||
// Album 1 contains Track 1 (3 listens) and Track 2 (2 listens) = 5 Total
|
||||
buckets, err := store.GetInterest(ctx, db.GetInterestOpts{
|
||||
AlbumID: 1,
|
||||
Buckets: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, buckets, 1)
|
||||
assert.EqualValues(t, 5, buckets[0].ListenCount, "Album 1 should have 5 listens total")
|
||||
})
|
||||
|
||||
t.Run("Track Interest", func(t *testing.T) {
|
||||
// Track 2 should have 2 listens
|
||||
buckets, err := store.GetInterest(ctx, db.GetInterestOpts{
|
||||
TrackID: 2,
|
||||
Buckets: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, buckets, 1)
|
||||
assert.EqualValues(t, 2, buckets[0].ListenCount, "Track 2 should have 2 listens")
|
||||
})
|
||||
}
|
||||
|
|
@ -44,3 +44,9 @@ type ExportItem struct {
|
|||
ReleaseAliases []models.Alias
|
||||
Artists []models.ArtistWithFullAliases
|
||||
}
|
||||
|
||||
type InterestBucket struct {
|
||||
BucketStart time.Time `json:"bucket_start"`
|
||||
BucketEnd time.Time `json:"bucket_end"`
|
||||
ListenCount int64 `json:"listen_count"`
|
||||
}
|
||||
|
|
|
|||
270
internal/repository/interest.sql.go
Normal file
270
internal/repository/interest.sql.go
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.30.0
|
||||
// source: interest.sql
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
const getGroupedListensFromArtist = `-- name: GetGroupedListensFromArtist :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
JOIN artist_tracks at ON at.track_id = t.id
|
||||
WHERE at.artist_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromArtistParams struct {
|
||||
ArtistID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromArtistRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromArtist(ctx context.Context, arg GetGroupedListensFromArtistParams) ([]GetGroupedListensFromArtistRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromArtist, arg.ArtistID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromArtistRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromArtistRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getGroupedListensFromRelease = `-- name: GetGroupedListensFromRelease :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.release_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromReleaseParams struct {
|
||||
ReleaseID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromReleaseRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromRelease(ctx context.Context, arg GetGroupedListensFromReleaseParams) ([]GetGroupedListensFromReleaseRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromRelease, arg.ReleaseID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromReleaseRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromReleaseRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getGroupedListensFromTrack = `-- name: GetGroupedListensFromTrack :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromTrackParams struct {
|
||||
ID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromTrackRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromTrack(ctx context.Context, arg GetGroupedListensFromTrackParams) ([]GetGroupedListensFromTrackRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromTrack, arg.ID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromTrackRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromTrackRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue