mirror of
https://github.com/gabehf/Koito.git
synced 2026-04-22 12:01:52 -07:00
api
This commit is contained in:
parent
e45099c71a
commit
c8b1bd3576
8 changed files with 564 additions and 0 deletions
162
db/queries/interest.sql
Normal file
162
db/queries/interest.sql
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
-- name: GetGroupedListensFromArtist :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
JOIN artist_tracks at ON at.track_id = t.id
|
||||
WHERE at.artist_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
sqlc.arg(bucket_count) - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * sqlc.arg(bucket_count)
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ sqlc.arg(bucket_count)
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ sqlc.arg(bucket_count)
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start;
|
||||
|
||||
-- name: GetGroupedListensFromRelease :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.release_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
sqlc.arg(bucket_count) - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * sqlc.arg(bucket_count)
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ sqlc.arg(bucket_count)
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ sqlc.arg(bucket_count)
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start;
|
||||
|
||||
-- name: GetGroupedListensFromTrack :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
sqlc.arg(bucket_count) - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * sqlc.arg(bucket_count)
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ sqlc.arg(bucket_count)
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ sqlc.arg(bucket_count)
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start;
|
||||
47
engine/handlers/interest.go
Normal file
47
engine/handlers/interest.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gabehf/koito/internal/db"
|
||||
"github.com/gabehf/koito/internal/logger"
|
||||
"github.com/gabehf/koito/internal/utils"
|
||||
)
|
||||
|
||||
func GetInterestHandler(store db.DB) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
l := logger.FromContext(ctx)
|
||||
|
||||
l.Debug().Msg("GetInterestHandler: Received request to retrieve interest")
|
||||
|
||||
// im just using this to parse the artist/album/track id, which is bad
|
||||
parsed := OptsFromRequest(r)
|
||||
|
||||
bucketCountStr := r.URL.Query().Get("buckets")
|
||||
var buckets = 0
|
||||
var err error
|
||||
if buckets, err = strconv.Atoi(bucketCountStr); err != nil {
|
||||
l.Debug().Msg("GetInterestHandler: Buckets is not an integer")
|
||||
utils.WriteError(w, "parameter 'buckets' must be an integer", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
opts := db.GetInterestOpts{
|
||||
Buckets: buckets,
|
||||
AlbumID: int32(parsed.AlbumID),
|
||||
ArtistID: int32(parsed.ArtistID),
|
||||
TrackID: int32(parsed.TrackID),
|
||||
}
|
||||
|
||||
interest, err := store.GetInterest(ctx, opts)
|
||||
if err != nil {
|
||||
l.Err(err).Msg("GetInterestHandler: Failed to query interest")
|
||||
utils.WriteError(w, "Failed to retrieve interest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
utils.WriteJSON(w, http.StatusOK, interest)
|
||||
}
|
||||
}
|
||||
|
|
@ -55,6 +55,7 @@ func bindRoutes(
|
|||
r.Get("/search", handlers.SearchHandler(db))
|
||||
r.Get("/aliases", handlers.GetAliasesHandler(db))
|
||||
r.Get("/summary", handlers.SummaryHandler(db))
|
||||
r.Get("/interest", handlers.GetInterestHandler(db))
|
||||
})
|
||||
r.Post("/logout", handlers.LogoutHandler(db))
|
||||
if !cfg.RateLimitDisabled() {
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ type DB interface {
|
|||
GetUserBySession(ctx context.Context, sessionId uuid.UUID) (*models.User, error)
|
||||
GetUserByUsername(ctx context.Context, username string) (*models.User, error)
|
||||
GetUserByApiKey(ctx context.Context, key string) (*models.User, error)
|
||||
GetInterest(ctx context.Context, opts GetInterestOpts) ([]InterestBucket, error)
|
||||
|
||||
// Save
|
||||
|
||||
|
|
|
|||
|
|
@ -153,3 +153,10 @@ type GetExportPageOpts struct {
|
|||
TrackID int32
|
||||
Limit int32
|
||||
}
|
||||
|
||||
type GetInterestOpts struct {
|
||||
Buckets int
|
||||
AlbumID int32
|
||||
ArtistID int32
|
||||
TrackID int32
|
||||
}
|
||||
|
|
|
|||
70
internal/db/psql/interest.go
Normal file
70
internal/db/psql/interest.go
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
package psql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/gabehf/koito/internal/db"
|
||||
"github.com/gabehf/koito/internal/repository"
|
||||
)
|
||||
|
||||
func (d *Psql) GetInterest(ctx context.Context, opts db.GetInterestOpts) ([]db.InterestBucket, error) {
|
||||
if opts.Buckets == 0 {
|
||||
return nil, errors.New("GetInterest: bucket count must be provided")
|
||||
}
|
||||
|
||||
ret := make([]db.InterestBucket, opts.Buckets)
|
||||
|
||||
if opts.ArtistID != 0 {
|
||||
resp, err := d.q.GetGroupedListensFromArtist(ctx, repository.GetGroupedListensFromArtistParams{
|
||||
ArtistID: opts.ArtistID,
|
||||
BucketCount: opts.Buckets,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetInterest: GetGroupedListensFromArtist: %w", err)
|
||||
}
|
||||
for i, v := range resp {
|
||||
ret[i] = db.InterestBucket{
|
||||
BucketStart: v.BucketStart,
|
||||
BucketEnd: v.BucketEnd,
|
||||
ListenCount: v.ListenCount,
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
} else if opts.AlbumID != 0 {
|
||||
resp, err := d.q.GetGroupedListensFromRelease(ctx, repository.GetGroupedListensFromReleaseParams{
|
||||
ReleaseID: opts.AlbumID,
|
||||
BucketCount: opts.Buckets,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetInterest: GetGroupedListensFromRelease: %w", err)
|
||||
}
|
||||
for i, v := range resp {
|
||||
ret[i] = db.InterestBucket{
|
||||
BucketStart: v.BucketStart,
|
||||
BucketEnd: v.BucketEnd,
|
||||
ListenCount: v.ListenCount,
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
} else if opts.TrackID != 0 {
|
||||
resp, err := d.q.GetGroupedListensFromTrack(ctx, repository.GetGroupedListensFromTrackParams{
|
||||
ID: opts.TrackID,
|
||||
BucketCount: opts.Buckets,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetInterest: GetGroupedListensFromTrack: %w", err)
|
||||
}
|
||||
for i, v := range resp {
|
||||
ret[i] = db.InterestBucket{
|
||||
BucketStart: v.BucketStart,
|
||||
BucketEnd: v.BucketEnd,
|
||||
ListenCount: v.ListenCount,
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
} else {
|
||||
return nil, errors.New("GetInterest: artist id, album id, or track id must be provided")
|
||||
}
|
||||
}
|
||||
|
|
@ -44,3 +44,9 @@ type ExportItem struct {
|
|||
ReleaseAliases []models.Alias
|
||||
Artists []models.ArtistWithFullAliases
|
||||
}
|
||||
|
||||
type InterestBucket struct {
|
||||
BucketStart time.Time `json:"bucket_start"`
|
||||
BucketEnd time.Time `json:"bucket_end"`
|
||||
ListenCount int64 `json:"listen_count"`
|
||||
}
|
||||
|
|
|
|||
270
internal/repository/interest.sql.go
Normal file
270
internal/repository/interest.sql.go
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.30.0
|
||||
// source: interest.sql
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
const getGroupedListensFromArtist = `-- name: GetGroupedListensFromArtist :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
JOIN artist_tracks at ON at.track_id = t.id
|
||||
WHERE at.artist_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromArtistParams struct {
|
||||
ArtistID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromArtistRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromArtist(ctx context.Context, arg GetGroupedListensFromArtistParams) ([]GetGroupedListensFromArtistRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromArtist, arg.ArtistID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromArtistRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromArtistRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getGroupedListensFromRelease = `-- name: GetGroupedListensFromRelease :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.release_id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromReleaseParams struct {
|
||||
ReleaseID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromReleaseRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromRelease(ctx context.Context, arg GetGroupedListensFromReleaseParams) ([]GetGroupedListensFromReleaseRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromRelease, arg.ReleaseID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromReleaseRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromReleaseRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getGroupedListensFromTrack = `-- name: GetGroupedListensFromTrack :many
|
||||
WITH artist_listens AS (
|
||||
SELECT
|
||||
l.listened_at
|
||||
FROM listens l
|
||||
JOIN tracks t ON t.id = l.track_id
|
||||
WHERE t.id = $1
|
||||
),
|
||||
bounds AS (
|
||||
SELECT
|
||||
MIN(listened_at) AS start_time,
|
||||
MAX(listened_at) AS end_time
|
||||
FROM artist_listens
|
||||
),
|
||||
bucketed AS (
|
||||
SELECT
|
||||
LEAST(
|
||||
$2 - 1,
|
||||
FLOOR(
|
||||
(
|
||||
EXTRACT(EPOCH FROM (al.listened_at - b.start_time))
|
||||
/
|
||||
NULLIF(EXTRACT(EPOCH FROM (b.end_time - b.start_time)), 0)
|
||||
) * $2
|
||||
)::int
|
||||
) AS bucket_idx,
|
||||
b.start_time,
|
||||
b.end_time
|
||||
FROM artist_listens al
|
||||
CROSS JOIN bounds b
|
||||
),
|
||||
aggregated AS (
|
||||
SELECT
|
||||
start_time
|
||||
+ (
|
||||
bucket_idx * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_start,
|
||||
start_time
|
||||
+ (
|
||||
(bucket_idx + 1) * (end_time - start_time)
|
||||
/ $2
|
||||
) AS bucket_end,
|
||||
COUNT(*) AS listen_count
|
||||
FROM bucketed
|
||||
GROUP BY bucket_idx, start_time, end_time
|
||||
)
|
||||
SELECT
|
||||
bucket_start::timestamptz,
|
||||
bucket_end::timestamptz,
|
||||
listen_count
|
||||
FROM aggregated
|
||||
ORDER BY bucket_start
|
||||
`
|
||||
|
||||
type GetGroupedListensFromTrackParams struct {
|
||||
ID int32
|
||||
BucketCount interface{}
|
||||
}
|
||||
|
||||
type GetGroupedListensFromTrackRow struct {
|
||||
BucketStart time.Time
|
||||
BucketEnd time.Time
|
||||
ListenCount int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetGroupedListensFromTrack(ctx context.Context, arg GetGroupedListensFromTrackParams) ([]GetGroupedListensFromTrackRow, error) {
|
||||
rows, err := q.db.Query(ctx, getGroupedListensFromTrack, arg.ID, arg.BucketCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetGroupedListensFromTrackRow
|
||||
for rows.Next() {
|
||||
var i GetGroupedListensFromTrackRow
|
||||
if err := rows.Scan(&i.BucketStart, &i.BucketEnd, &i.ListenCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue