From c8b1bd357668fb71683988b0208684fc07b6c75e Mon Sep 17 00:00:00 2001 From: Gabe Farrell Date: Mon, 12 Jan 2026 13:51:12 -0500 Subject: [PATCH] api --- db/queries/interest.sql | 162 +++++++++++++++++ engine/handlers/interest.go | 47 +++++ engine/routes.go | 1 + internal/db/db.go | 1 + internal/db/opts.go | 7 + internal/db/psql/interest.go | 70 ++++++++ internal/db/types.go | 6 + internal/repository/interest.sql.go | 270 ++++++++++++++++++++++++++++ 8 files changed, 564 insertions(+) create mode 100644 db/queries/interest.sql create mode 100644 engine/handlers/interest.go create mode 100644 internal/db/psql/interest.go create mode 100644 internal/repository/interest.sql.go diff --git a/db/queries/interest.sql b/db/queries/interest.sql new file mode 100644 index 0000000..389c75b --- /dev/null +++ b/db/queries/interest.sql @@ -0,0 +1,162 @@ +-- name: GetGroupedListensFromArtist :many +WITH artist_listens AS ( + SELECT + l.listened_at + FROM listens l + JOIN tracks t ON t.id = l.track_id + JOIN artist_tracks at ON at.track_id = t.id + WHERE at.artist_id = $1 +), +bounds AS ( + SELECT + MIN(listened_at) AS start_time, + MAX(listened_at) AS end_time + FROM artist_listens +), +bucketed AS ( + SELECT + LEAST( + sqlc.arg(bucket_count) - 1, + FLOOR( + ( + EXTRACT(EPOCH FROM (al.listened_at - b.start_time)) + / + NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0) + ) * sqlc.arg(bucket_count) + )::int + ) AS bucket_idx, + b.start_time, + b.end_time + FROM artist_listens al + CROSS JOIN bounds b +), +aggregated AS ( + SELECT + start_time + + ( + bucket_idx * (end_time - start_time) + / sqlc.arg(bucket_count) + ) AS bucket_start, + start_time + + ( + (bucket_idx + 1) * (end_time - start_time) + / sqlc.arg(bucket_count) + ) AS bucket_end, + COUNT(*) AS listen_count + FROM bucketed + GROUP BY bucket_idx, start_time, end_time +) +SELECT + bucket_start::timestamptz, + bucket_end::timestamptz, + listen_count +FROM aggregated +ORDER BY bucket_start; + +-- name: GetGroupedListensFromRelease :many +WITH artist_listens AS ( + SELECT + l.listened_at + FROM listens l + JOIN tracks t ON t.id = l.track_id + WHERE t.release_id = $1 +), +bounds AS ( + SELECT + MIN(listened_at) AS start_time, + MAX(listened_at) AS end_time + FROM artist_listens +), +bucketed AS ( + SELECT + LEAST( + sqlc.arg(bucket_count) - 1, + FLOOR( + ( + EXTRACT(EPOCH FROM (al.listened_at - b.start_time)) + / + NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0) + ) * sqlc.arg(bucket_count) + )::int + ) AS bucket_idx, + b.start_time, + b.end_time + FROM artist_listens al + CROSS JOIN bounds b +), +aggregated AS ( + SELECT + start_time + + ( + bucket_idx * (end_time - start_time) + / sqlc.arg(bucket_count) + ) AS bucket_start, + start_time + + ( + (bucket_idx + 1) * (end_time - start_time) + / sqlc.arg(bucket_count) + ) AS bucket_end, + COUNT(*) AS listen_count + FROM bucketed + GROUP BY bucket_idx, start_time, end_time +) +SELECT + bucket_start::timestamptz, + bucket_end::timestamptz, + listen_count +FROM aggregated +ORDER BY bucket_start; + +-- name: GetGroupedListensFromTrack :many +WITH artist_listens AS ( + SELECT + l.listened_at + FROM listens l + JOIN tracks t ON t.id = l.track_id + WHERE t.id = $1 +), +bounds AS ( + SELECT + MIN(listened_at) AS start_time, + MAX(listened_at) AS end_time + FROM artist_listens +), +bucketed AS ( + SELECT + LEAST( + sqlc.arg(bucket_count) - 1, + FLOOR( + ( + EXTRACT(EPOCH FROM (al.listened_at - b.start_time)) + / + NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0) + ) * sqlc.arg(bucket_count) + )::int + ) AS bucket_idx, + b.start_time, + b.end_time + FROM artist_listens al + CROSS JOIN bounds b +), +aggregated AS ( + SELECT + start_time + + ( + bucket_idx * (end_time - start_time) + / sqlc.arg(bucket_count) + ) AS bucket_start, + start_time + + ( + (bucket_idx + 1) * (end_time - start_time) + / sqlc.arg(bucket_count) + ) AS bucket_end, + COUNT(*) AS listen_count + FROM bucketed + GROUP BY bucket_idx, start_time, end_time +) +SELECT + bucket_start::timestamptz, + bucket_end::timestamptz, + listen_count +FROM aggregated +ORDER BY bucket_start; diff --git a/engine/handlers/interest.go b/engine/handlers/interest.go new file mode 100644 index 0000000..9787c45 --- /dev/null +++ b/engine/handlers/interest.go @@ -0,0 +1,47 @@ +package handlers + +import ( + "net/http" + "strconv" + + "github.com/gabehf/koito/internal/db" + "github.com/gabehf/koito/internal/logger" + "github.com/gabehf/koito/internal/utils" +) + +func GetInterestHandler(store db.DB) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + l := logger.FromContext(ctx) + + l.Debug().Msg("GetInterestHandler: Received request to retrieve interest") + + // im just using this to parse the artist/album/track id, which is bad + parsed := OptsFromRequest(r) + + bucketCountStr := r.URL.Query().Get("buckets") + var buckets = 0 + var err error + if buckets, err = strconv.Atoi(bucketCountStr); err != nil { + l.Debug().Msg("GetInterestHandler: Buckets is not an integer") + utils.WriteError(w, "parameter 'buckets' must be an integer", http.StatusBadRequest) + return + } + + opts := db.GetInterestOpts{ + Buckets: buckets, + AlbumID: int32(parsed.AlbumID), + ArtistID: int32(parsed.ArtistID), + TrackID: int32(parsed.TrackID), + } + + interest, err := store.GetInterest(ctx, opts) + if err != nil { + l.Err(err).Msg("GetInterestHandler: Failed to query interest") + utils.WriteError(w, "Failed to retrieve interest: "+err.Error(), http.StatusInternalServerError) + return + } + + utils.WriteJSON(w, http.StatusOK, interest) + } +} diff --git a/engine/routes.go b/engine/routes.go index 54100ed..e1c5fda 100644 --- a/engine/routes.go +++ b/engine/routes.go @@ -55,6 +55,7 @@ func bindRoutes( r.Get("/search", handlers.SearchHandler(db)) r.Get("/aliases", handlers.GetAliasesHandler(db)) r.Get("/summary", handlers.SummaryHandler(db)) + r.Get("/interest", handlers.GetInterestHandler(db)) }) r.Post("/logout", handlers.LogoutHandler(db)) if !cfg.RateLimitDisabled() { diff --git a/internal/db/db.go b/internal/db/db.go index f2364be..e725bc8 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -30,6 +30,7 @@ type DB interface { GetUserBySession(ctx context.Context, sessionId uuid.UUID) (*models.User, error) GetUserByUsername(ctx context.Context, username string) (*models.User, error) GetUserByApiKey(ctx context.Context, key string) (*models.User, error) + GetInterest(ctx context.Context, opts GetInterestOpts) ([]InterestBucket, error) // Save diff --git a/internal/db/opts.go b/internal/db/opts.go index 65834f2..cb23bd3 100644 --- a/internal/db/opts.go +++ b/internal/db/opts.go @@ -153,3 +153,10 @@ type GetExportPageOpts struct { TrackID int32 Limit int32 } + +type GetInterestOpts struct { + Buckets int + AlbumID int32 + ArtistID int32 + TrackID int32 +} diff --git a/internal/db/psql/interest.go b/internal/db/psql/interest.go new file mode 100644 index 0000000..9e8a623 --- /dev/null +++ b/internal/db/psql/interest.go @@ -0,0 +1,70 @@ +package psql + +import ( + "context" + "errors" + "fmt" + + "github.com/gabehf/koito/internal/db" + "github.com/gabehf/koito/internal/repository" +) + +func (d *Psql) GetInterest(ctx context.Context, opts db.GetInterestOpts) ([]db.InterestBucket, error) { + if opts.Buckets == 0 { + return nil, errors.New("GetInterest: bucket count must be provided") + } + + ret := make([]db.InterestBucket, opts.Buckets) + + if opts.ArtistID != 0 { + resp, err := d.q.GetGroupedListensFromArtist(ctx, repository.GetGroupedListensFromArtistParams{ + ArtistID: opts.ArtistID, + BucketCount: opts.Buckets, + }) + if err != nil { + return nil, fmt.Errorf("GetInterest: GetGroupedListensFromArtist: %w", err) + } + for i, v := range resp { + ret[i] = db.InterestBucket{ + BucketStart: v.BucketStart, + BucketEnd: v.BucketEnd, + ListenCount: v.ListenCount, + } + } + return ret, nil + } else if opts.AlbumID != 0 { + resp, err := d.q.GetGroupedListensFromRelease(ctx, repository.GetGroupedListensFromReleaseParams{ + ReleaseID: opts.AlbumID, + BucketCount: opts.Buckets, + }) + if err != nil { + return nil, fmt.Errorf("GetInterest: GetGroupedListensFromRelease: %w", err) + } + for i, v := range resp { + ret[i] = db.InterestBucket{ + BucketStart: v.BucketStart, + BucketEnd: v.BucketEnd, + ListenCount: v.ListenCount, + } + } + return ret, nil + } else if opts.TrackID != 0 { + resp, err := d.q.GetGroupedListensFromTrack(ctx, repository.GetGroupedListensFromTrackParams{ + ID: opts.TrackID, + BucketCount: opts.Buckets, + }) + if err != nil { + return nil, fmt.Errorf("GetInterest: GetGroupedListensFromTrack: %w", err) + } + for i, v := range resp { + ret[i] = db.InterestBucket{ + BucketStart: v.BucketStart, + BucketEnd: v.BucketEnd, + ListenCount: v.ListenCount, + } + } + return ret, nil + } else { + return nil, errors.New("GetInterest: artist id, album id, or track id must be provided") + } +} diff --git a/internal/db/types.go b/internal/db/types.go index 421832f..93ff031 100644 --- a/internal/db/types.go +++ b/internal/db/types.go @@ -44,3 +44,9 @@ type ExportItem struct { ReleaseAliases []models.Alias Artists []models.ArtistWithFullAliases } + +type InterestBucket struct { + BucketStart time.Time `json:"bucket_start"` + BucketEnd time.Time `json:"bucket_end"` + ListenCount int64 `json:"listen_count"` +} diff --git a/internal/repository/interest.sql.go b/internal/repository/interest.sql.go new file mode 100644 index 0000000..27c1920 --- /dev/null +++ b/internal/repository/interest.sql.go @@ -0,0 +1,270 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: interest.sql + +package repository + +import ( + "context" + "time" +) + +const getGroupedListensFromArtist = `-- name: GetGroupedListensFromArtist :many +WITH artist_listens AS ( + SELECT + l.listened_at + FROM listens l + JOIN tracks t ON t.id = l.track_id + JOIN artist_tracks at ON at.track_id = t.id + WHERE at.artist_id = $1 +), +bounds AS ( + SELECT + MIN(listened_at) AS start_time, + MAX(listened_at) AS end_time + FROM artist_listens +), +bucketed AS ( + SELECT + LEAST( + $2 - 1, + FLOOR( + ( + EXTRACT(EPOCH FROM (al.listened_at - b.start_time)) + / + NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0) + ) * $2 + )::int + ) AS bucket_idx, + b.start_time, + b.end_time + FROM artist_listens al + CROSS JOIN bounds b +), +aggregated AS ( + SELECT + start_time + + ( + bucket_idx * (end_time - start_time) + / $2 + ) AS bucket_start, + start_time + + ( + (bucket_idx + 1) * (end_time - start_time) + / $2 + ) AS bucket_end, + COUNT(*) AS listen_count + FROM bucketed + GROUP BY bucket_idx, start_time, end_time +) +SELECT + bucket_start::timestamptz, + bucket_end::timestamptz, + listen_count +FROM aggregated +ORDER BY bucket_start +` + +type GetGroupedListensFromArtistParams struct { + ArtistID int32 + BucketCount interface{} +} + +type GetGroupedListensFromArtistRow struct { + BucketStart time.Time + BucketEnd time.Time + ListenCount int64 +} + +func (q *Queries) GetGroupedListensFromArtist(ctx context.Context, arg GetGroupedListensFromArtistParams) ([]GetGroupedListensFromArtistRow, error) { + rows, err := q.db.Query(ctx, getGroupedListensFromArtist, arg.ArtistID, arg.BucketCount) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetGroupedListensFromArtistRow + for rows.Next() { + var i GetGroupedListensFromArtistRow + if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getGroupedListensFromRelease = `-- name: GetGroupedListensFromRelease :many +WITH artist_listens AS ( + SELECT + l.listened_at + FROM listens l + JOIN tracks t ON t.id = l.track_id + WHERE t.release_id = $1 +), +bounds AS ( + SELECT + MIN(listened_at) AS start_time, + MAX(listened_at) AS end_time + FROM artist_listens +), +bucketed AS ( + SELECT + LEAST( + $2 - 1, + FLOOR( + ( + EXTRACT(EPOCH FROM (al.listened_at - b.start_time)) + / + NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0) + ) * $2 + )::int + ) AS bucket_idx, + b.start_time, + b.end_time + FROM artist_listens al + CROSS JOIN bounds b +), +aggregated AS ( + SELECT + start_time + + ( + bucket_idx * (end_time - start_time) + / $2 + ) AS bucket_start, + start_time + + ( + (bucket_idx + 1) * (end_time - start_time) + / $2 + ) AS bucket_end, + COUNT(*) AS listen_count + FROM bucketed + GROUP BY bucket_idx, start_time, end_time +) +SELECT + bucket_start::timestamptz, + bucket_end::timestamptz, + listen_count +FROM aggregated +ORDER BY bucket_start +` + +type GetGroupedListensFromReleaseParams struct { + ReleaseID int32 + BucketCount interface{} +} + +type GetGroupedListensFromReleaseRow struct { + BucketStart time.Time + BucketEnd time.Time + ListenCount int64 +} + +func (q *Queries) GetGroupedListensFromRelease(ctx context.Context, arg GetGroupedListensFromReleaseParams) ([]GetGroupedListensFromReleaseRow, error) { + rows, err := q.db.Query(ctx, getGroupedListensFromRelease, arg.ReleaseID, arg.BucketCount) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetGroupedListensFromReleaseRow + for rows.Next() { + var i GetGroupedListensFromReleaseRow + if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getGroupedListensFromTrack = `-- name: GetGroupedListensFromTrack :many +WITH artist_listens AS ( + SELECT + l.listened_at + FROM listens l + JOIN tracks t ON t.id = l.track_id + WHERE t.id = $1 +), +bounds AS ( + SELECT + MIN(listened_at) AS start_time, + MAX(listened_at) AS end_time + FROM artist_listens +), +bucketed AS ( + SELECT + LEAST( + $2 - 1, + FLOOR( + ( + EXTRACT(EPOCH FROM (al.listened_at - b.start_time)) + / + NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0) + ) * $2 + )::int + ) AS bucket_idx, + b.start_time, + b.end_time + FROM artist_listens al + CROSS JOIN bounds b +), +aggregated AS ( + SELECT + start_time + + ( + bucket_idx * (end_time - start_time) + / $2 + ) AS bucket_start, + start_time + + ( + (bucket_idx + 1) * (end_time - start_time) + / $2 + ) AS bucket_end, + COUNT(*) AS listen_count + FROM bucketed + GROUP BY bucket_idx, start_time, end_time +) +SELECT + bucket_start::timestamptz, + bucket_end::timestamptz, + listen_count +FROM aggregated +ORDER BY bucket_start +` + +type GetGroupedListensFromTrackParams struct { + ID int32 + BucketCount interface{} +} + +type GetGroupedListensFromTrackRow struct { + BucketStart time.Time + BucketEnd time.Time + ListenCount int64 +} + +func (q *Queries) GetGroupedListensFromTrack(ctx context.Context, arg GetGroupedListensFromTrackParams) ([]GetGroupedListensFromTrackRow, error) { + rows, err := q.db.Query(ctx, getGroupedListensFromTrack, arg.ID, arg.BucketCount) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetGroupedListensFromTrackRow + for rows.Next() { + var i GetGroupedListensFromTrackRow + if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +}