diff --git a/api/app-routes.go b/api/app-routes.go index 6a77cd9..568a46f 100644 --- a/api/app-routes.go +++ b/api/app-routes.go @@ -146,7 +146,7 @@ func (api *API) createAppResourcesRoute(routeName string, args ...map[string]any } templateVars["Data"] = document - templateVars["TotalTimeLeftSeconds"] = (document.Pages - document.Page) * document.SecondsPerPage + templateVars["TotalTimeLeftSeconds"] = int64((100.0 - document.Percentage) * float64(document.SecondsPerPercent)) } else if routeName == "activity" { activityFilter := database.GetActivityParams{ UserID: userID, @@ -177,13 +177,13 @@ func (api *API) createAppResourcesRoute(routeName string, args ...map[string]any log.Info("GetDatabaseInfo Performance: ", time.Since(start)) streaks, _ := api.DB.Queries.GetUserStreaks(api.DB.Ctx, userID) - wpn_leaderboard, _ := api.DB.Queries.GetWPMLeaderboard(api.DB.Ctx) + wpm_leaderboard, _ := api.DB.Queries.GetWPMLeaderboard(api.DB.Ctx) templateVars["Data"] = gin.H{ "Streaks": streaks, "GraphData": read_graph_data, "DatabaseInfo": database_info, - "WPMLeaderboard": wpn_leaderboard, + "WPMLeaderboard": wpm_leaderboard, } } else if routeName == "settings" { user, err := api.DB.Queries.GetUser(api.DB.Ctx, userID) @@ -456,6 +456,14 @@ func (api *API) uploadNewDocument(c *gin.Context) { return } + // Get Word Count + wordCount, err := metadata.GetWordCount(tempFile.Name()) + if err != nil { + log.Error("[uploadNewDocument] Word Count Failure:", err) + errorPage(c, http.StatusInternalServerError, "Unable to calculate word count.") + return + } + // Derive Filename var fileName string if *metadataInfo.Author != "" { @@ -499,6 +507,7 @@ func (api *API) uploadNewDocument(c *gin.Context) { Title: metadataInfo.Title, Author: metadataInfo.Author, Description: metadataInfo.Description, + Words: &wordCount, Md5: fileHash, Filepath: &fileName, }); err != nil { @@ -711,7 +720,7 @@ func (api *API) identifyDocument(c *gin.Context) { } templateVars["Data"] = document - templateVars["TotalTimeLeftSeconds"] = (document.Pages - document.Page) * document.SecondsPerPage + templateVars["TotalTimeLeftSeconds"] = int64((100.0 - document.Percentage) * float64(document.SecondsPerPercent)) c.HTML(http.StatusOK, "document", templateVars) } @@ -814,6 +823,14 @@ func (api *API) saveNewDocument(c *gin.Context) { return } + // Get Word Count + wordCount, err := metadata.GetWordCount(safePath) + if err != nil { + log.Error("[saveNewDocument] Word Count Failure:", err) + errorPage(c, http.StatusInternalServerError, "Unable to calculate word count.") + return + } + // Upsert Document if _, err = api.DB.Queries.UpsertDocument(api.DB.Ctx, database.UpsertDocumentParams{ ID: partialMD5, @@ -821,6 +838,7 @@ func (api *API) saveNewDocument(c *gin.Context) { Author: rDocAdd.Author, Md5: fileHash, Filepath: &fileName, + Words: &wordCount, }); err != nil { log.Error("[saveNewDocument] UpsertDocument DB Error:", err) errorPage(c, http.StatusInternalServerError, fmt.Sprintf("UpsertDocument DB Error: %v", err)) diff --git a/api/ko-routes.go b/api/ko-routes.go index 8e56815..806456e 100644 --- a/api/ko-routes.go +++ b/api/ko-routes.go @@ -19,6 +19,7 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/exp/slices" "reichard.io/bbank/database" + "reichard.io/bbank/metadata" ) type activityItem struct { @@ -263,13 +264,13 @@ func (api *API) addActivities(c *gin.Context) { // Add All Activity for _, item := range rActivity.Activity { if _, err := qtx.AddActivity(api.DB.Ctx, database.AddActivityParams{ - UserID: rUser.(string), - DocumentID: item.DocumentID, - DeviceID: rActivity.DeviceID, - StartTime: time.Unix(int64(item.StartTime), 0).UTC().Format(time.RFC3339), - Duration: int64(item.Duration), - Page: int64(item.Page), - Pages: int64(item.Pages), + UserID: rUser.(string), + DocumentID: item.DocumentID, + DeviceID: rActivity.DeviceID, + StartTime: time.Unix(int64(item.StartTime), 0).UTC().Format(time.RFC3339), + Duration: int64(item.Duration), + StartPercentage: float64(item.Page) / float64(item.Pages), + EndPercentage: float64(item.Page+1) / float64(item.Pages), }); err != nil { log.Error("[addActivities] AddActivity DB Error:", err) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Activity"}) @@ -284,14 +285,6 @@ func (api *API) addActivities(c *gin.Context) { return } - // Update Temp Tables - go func() { - log.Info("[addActivities] Caching Temp Tables") - if err := api.DB.CacheTempTables(); err != nil { - log.Warn("[addActivities] CacheTempTables Failure: ", err) - } - }() - c.JSON(http.StatusOK, gin.H{ "added": len(rActivity.Activity), }) @@ -367,7 +360,7 @@ func (api *API) addDocuments(c *gin.Context) { // Upsert Documents for _, doc := range rNewDocs.Documents { - doc, err := qtx.UpsertDocument(api.DB.Ctx, database.UpsertDocumentParams{ + _, err := qtx.UpsertDocument(api.DB.Ctx, database.UpsertDocumentParams{ ID: doc.ID, Title: api.sanitizeInput(doc.Title), Author: api.sanitizeInput(doc.Author), @@ -381,16 +374,6 @@ func (api *API) addDocuments(c *gin.Context) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Document"}) return } - - if _, err = qtx.UpdateDocumentSync(api.DB.Ctx, database.UpdateDocumentSyncParams{ - ID: doc.ID, - Synced: true, - }); err != nil { - log.Error("[addDocuments] UpdateDocumentSync DB Error:", err) - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Document"}) - return - } - } // Commit Transaction @@ -416,7 +399,7 @@ func (api *API) checkDocumentsSync(c *gin.Context) { } // Upsert Device - device, err := api.DB.Queries.UpsertDevice(api.DB.Ctx, database.UpsertDeviceParams{ + _, err := api.DB.Queries.UpsertDevice(api.DB.Ctx, database.UpsertDeviceParams{ ID: rCheckDocs.DeviceID, UserID: rUser.(string), DeviceName: rCheckDocs.Device, @@ -431,22 +414,20 @@ func (api *API) checkDocumentsSync(c *gin.Context) { missingDocs := []database.Document{} deletedDocIDs := []string{} - if device.Sync == true { - // Get Missing Documents - missingDocs, err = api.DB.Queries.GetMissingDocuments(api.DB.Ctx, rCheckDocs.Have) - if err != nil { - log.Error("[checkDocumentsSync] GetMissingDocuments DB Error", err) - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Request"}) - return - } + // Get Missing Documents + missingDocs, err = api.DB.Queries.GetMissingDocuments(api.DB.Ctx, rCheckDocs.Have) + if err != nil { + log.Error("[checkDocumentsSync] GetMissingDocuments DB Error", err) + c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Request"}) + return + } - // Get Deleted Documents - deletedDocIDs, err = api.DB.Queries.GetDeletedDocuments(api.DB.Ctx, rCheckDocs.Have) - if err != nil { - log.Error("[checkDocumentsSync] GetDeletedDocuments DB Error", err) - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Request"}) - return - } + // Get Deleted Documents + deletedDocIDs, err = api.DB.Queries.GetDeletedDocuments(api.DB.Ctx, rCheckDocs.Have) + if err != nil { + log.Error("[checkDocumentsSync] GetDeletedDocuments DB Error", err) + c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Request"}) + return } // Get Wanted Documents @@ -576,27 +557,26 @@ func (api *API) uploadExistingDocument(c *gin.Context) { return } + // Get Word Count + wordCount, err := metadata.GetWordCount(safePath) + if err != nil { + log.Error("[uploadExistingDocument] Word Count Failure:", err) + c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "File Error"}) + return + } + // Upsert Document if _, err = api.DB.Queries.UpsertDocument(api.DB.Ctx, database.UpsertDocumentParams{ ID: document.ID, Md5: fileHash, Filepath: &fileName, + Words: &wordCount, }); err != nil { log.Error("[uploadExistingDocument] UpsertDocument DB Error:", err) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Document Error"}) return } - // Update Document Sync Attribute - if _, err = api.DB.Queries.UpdateDocumentSync(api.DB.Ctx, database.UpdateDocumentSyncParams{ - ID: document.ID, - Synced: true, - }); err != nil { - log.Error("[uploadExistingDocument] UpdateDocumentSync DB Error:", err) - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "Invalid Document"}) - return - } - c.JSON(http.StatusOK, gin.H{ "status": "ok", }) diff --git a/api/opds-routes.go b/api/opds-routes.go index 6a76506..1ba77f1 100644 --- a/api/opds-routes.go +++ b/api/opds-routes.go @@ -56,7 +56,7 @@ func (api *API) opdsDocuments(c *gin.Context) { fileType := splitFilepath[len(splitFilepath)-1] item := opds.Entry{ - Title: fmt.Sprintf("[%3d%%] %s", int(doc.Percentage.(float64)), *doc.Title), + Title: fmt.Sprintf("[%3d%%] %s", int(doc.Percentage), *doc.Title), Author: []opds.Author{ { Name: *doc.Author, diff --git a/database/manager.go b/database/manager.go index 56afc75..b0f6093 100644 --- a/database/manager.go +++ b/database/manager.go @@ -56,6 +56,10 @@ func NewMgr(c *config.Config) *DBManager { return dbm } +func (dbm *DBManager) Shutdown() error { + return dbm.DB.Close() +} + func (dbm *DBManager) CacheTempTables() error { if _, err := dbm.DB.ExecContext(dbm.Ctx, tsql); err != nil { return err diff --git a/database/manager_test.go b/database/manager_test.go index abf930c..0388c6a 100644 --- a/database/manager_test.go +++ b/database/manager_test.go @@ -122,13 +122,13 @@ func (dt *databaseTest) TestActivity() { // Add Item activity, err := dt.dbm.Queries.AddActivity(dt.dbm.Ctx, AddActivityParams{ - DocumentID: documentID, - DeviceID: deviceID, - UserID: userID, - StartTime: d.UTC().Format(time.RFC3339), - Duration: 60, - Page: counter, - Pages: 100, + DocumentID: documentID, + DeviceID: deviceID, + UserID: userID, + StartTime: d.UTC().Format(time.RFC3339), + Duration: 60, + StartPercentage: float64(counter) / 100.0, + EndPercentage: float64(counter+1) / 100.0, }) // Validate No Error @@ -143,9 +143,7 @@ func (dt *databaseTest) TestActivity() { } // Initiate Cache - if err := dt.dbm.CacheTempTables(); err != nil { - t.Fatalf(`Error: %v`, err) - } + dt.dbm.CacheTempTables() // Validate Exists existsRows, err := dt.dbm.Queries.GetActivity(dt.dbm.Ctx, GetActivityParams{ diff --git a/database/models.go b/database/models.go index 3b50be8..9b2f1d8 100644 --- a/database/models.go +++ b/database/models.go @@ -9,14 +9,15 @@ import ( ) type Activity struct { - UserID string `json:"user_id"` - DocumentID string `json:"document_id"` - DeviceID string `json:"device_id"` - CreatedAt string `json:"created_at"` - StartTime string `json:"start_time"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` - Duration int64 `json:"duration"` + ID int64 `json:"id"` + UserID string `json:"user_id"` + DocumentID string `json:"document_id"` + DeviceID string `json:"device_id"` + StartTime string `json:"start_time"` + StartPercentage float64 `json:"start_percentage"` + EndPercentage float64 `json:"end_percentage"` + Duration int64 `json:"duration"` + CreatedAt string `json:"created_at"` } type Device struct { @@ -63,10 +64,8 @@ type DocumentUserStatistic struct { DocumentID string `json:"document_id"` UserID string `json:"user_id"` LastRead string `json:"last_read"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` TotalTimeSeconds int64 `json:"total_time_seconds"` - ReadPages int64 `json:"read_pages"` + ReadPercentage float64 `json:"read_percentage"` Percentage float64 `json:"percentage"` WordsRead int64 `json:"words_read"` Wpm float64 `json:"wpm"` @@ -85,18 +84,6 @@ type Metadatum struct { CreatedAt string `json:"created_at"` } -type RawActivity struct { - ID int64 `json:"id"` - UserID string `json:"user_id"` - DocumentID string `json:"document_id"` - DeviceID string `json:"device_id"` - StartTime string `json:"start_time"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` - Duration int64 `json:"duration"` - CreatedAt string `json:"created_at"` -} - type User struct { ID string `json:"id"` Pass *string `json:"-"` @@ -119,27 +106,14 @@ type UserStreak struct { type ViewDocumentUserStatistic struct { DocumentID string `json:"document_id"` UserID string `json:"user_id"` - LastRead string `json:"last_read"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` + LastRead interface{} `json:"last_read"` TotalTimeSeconds sql.NullFloat64 `json:"total_time_seconds"` - ReadPages int64 `json:"read_pages"` + ReadPercentage sql.NullFloat64 `json:"read_percentage"` Percentage float64 `json:"percentage"` WordsRead interface{} `json:"words_read"` Wpm int64 `json:"wpm"` } -type ViewRescaledActivity struct { - UserID string `json:"user_id"` - DocumentID string `json:"document_id"` - DeviceID string `json:"device_id"` - CreatedAt string `json:"created_at"` - StartTime string `json:"start_time"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` - Duration int64 `json:"duration"` -} - type ViewUserStreak struct { UserID string `json:"user_id"` Window string `json:"window"` diff --git a/database/query.sql b/database/query.sql index e235ef0..ef717ae 100644 --- a/database/query.sql +++ b/database/query.sql @@ -1,12 +1,12 @@ -- name: AddActivity :one -INSERT INTO raw_activity ( +INSERT INTO activity ( user_id, document_id, device_id, start_time, duration, - page, - pages + start_percentage, + end_percentage ) VALUES (?, ?, ?, ?, ?, ?, ?) RETURNING *; @@ -43,8 +43,7 @@ WITH filtered_activity AS ( user_id, start_time, duration, - page, - pages + ROUND(CAST(end_percentage - start_percentage AS REAL) * 100, 2) AS read_percentage FROM activity WHERE activity.user_id = $user_id @@ -65,8 +64,7 @@ SELECT title, author, duration, - page, - pages + read_percentage FROM filtered_activity AS activity LEFT JOIN documents ON documents.id = activity.document_id LEFT JOIN users ON users.id = activity.user_id; @@ -82,9 +80,9 @@ WITH RECURSIVE last_30_days AS ( ), filtered_activity AS ( SELECT - user_id, + user_id, start_time, - duration + duration FROM activity WHERE start_time > DATE('now', '-31 days') AND activity.user_id = $user_id @@ -142,41 +140,6 @@ ORDER BY devices.last_synced DESC; SELECT * FROM documents WHERE id = $document_id LIMIT 1; --- name: GetDocumentDaysRead :one -WITH document_days AS ( - SELECT DATE(start_time, time_offset) AS dates - FROM activity - JOIN users ON users.id = activity.user_id - WHERE document_id = $document_id - AND user_id = $user_id - GROUP BY dates -) -SELECT CAST(COUNT(*) AS INTEGER) AS days_read -FROM document_days; - --- name: GetDocumentReadStats :one -SELECT - COUNT(DISTINCT page) AS pages_read, - SUM(duration) AS total_time -FROM activity -WHERE document_id = $document_id -AND user_id = $user_id -AND start_time >= $start_time; - --- name: GetDocumentReadStatsCapped :one -WITH capped_stats AS ( - SELECT MIN(SUM(duration), CAST($page_duration_cap AS INTEGER)) AS durations - FROM activity - WHERE document_id = $document_id - AND user_id = $user_id - AND start_time >= $start_time - GROUP BY page -) -SELECT - CAST(COUNT(*) AS INTEGER) AS pages_read, - CAST(SUM(durations) AS INTEGER) AS total_time -FROM capped_stats; - -- name: GetDocumentWithStats :one SELECT docs.id, @@ -189,23 +152,21 @@ SELECT docs.words, CAST(COALESCE(dus.wpm, 0.0) AS INTEGER) AS wpm, - COALESCE(dus.page, 0) AS page, - COALESCE(dus.pages, 0) AS pages, - COALESCE(dus.read_pages, 0) AS read_pages, + COALESCE(dus.read_percentage, 0) AS read_percentage, COALESCE(dus.total_time_seconds, 0) AS total_time_seconds, STRFTIME('%Y-%m-%d %H:%M:%S', COALESCE(dus.last_read, "1970-01-01"), users.time_offset) AS last_read, - CASE - WHEN dus.percentage > 97.0 THEN 100.0 + ROUND(CAST(CASE WHEN dus.percentage IS NULL THEN 0.0 - ELSE dus.percentage - END AS percentage, + WHEN (dus.percentage * 100.0) > 97.0 THEN 100.0 + ELSE dus.percentage * 100.0 + END AS REAL), 2) AS percentage, CAST(CASE WHEN dus.total_time_seconds IS NULL THEN 0.0 ELSE - CAST(dus.total_time_seconds AS REAL) - / CAST(dus.read_pages AS REAL) - END AS INTEGER) AS seconds_per_page + CAST(dus.total_time_seconds AS REAL) + / (dus.read_percentage * 100.0) + END AS INTEGER) AS seconds_per_percent FROM documents AS docs LEFT JOIN users ON users.id = $user_id LEFT JOIN @@ -233,25 +194,24 @@ SELECT docs.words, CAST(COALESCE(dus.wpm, 0.0) AS INTEGER) AS wpm, - COALESCE(dus.page, 0) AS page, - COALESCE(dus.pages, 0) AS pages, - COALESCE(dus.read_pages, 0) AS read_pages, + COALESCE(dus.read_percentage, 0) AS read_percentage, COALESCE(dus.total_time_seconds, 0) AS total_time_seconds, STRFTIME('%Y-%m-%d %H:%M:%S', COALESCE(dus.last_read, "1970-01-01"), users.time_offset) AS last_read, - CASE - WHEN dus.percentage > 97.0 THEN 100.0 + ROUND(CAST(CASE WHEN dus.percentage IS NULL THEN 0.0 - ELSE dus.percentage - END AS percentage, + WHEN (dus.percentage * 100.0) > 97.0 THEN 100.0 + ELSE dus.percentage * 100.0 + END AS REAL), 2) AS percentage, + CASE WHEN dus.total_time_seconds IS NULL THEN 0.0 ELSE ROUND( CAST(dus.total_time_seconds AS REAL) - / CAST(dus.read_pages AS REAL) + / (dus.read_percentage * 100.0) ) - END AS seconds_per_page + END AS seconds_per_percent FROM documents AS docs LEFT JOIN users ON users.id = $user_id LEFT JOIN @@ -298,20 +258,6 @@ WHERE id = $user_id LIMIT 1; SELECT * FROM user_streaks WHERE user_id = $user_id; --- name: GetUsers :many -SELECT * FROM users -WHERE - users.id = $user - OR ?1 IN ( - SELECT id - FROM users - WHERE id = $user - AND admin = 1 - ) -ORDER BY created_at DESC -LIMIT $limit -OFFSET $offset; - -- name: GetWPMLeaderboard :many SELECT user_id, @@ -328,35 +274,18 @@ ORDER BY wpm DESC; SELECT CAST(value AS TEXT) AS id, CAST((documents.filepath IS NULL) AS BOOLEAN) AS want_file, - CAST((IFNULL(documents.synced, false) != true) AS BOOLEAN) AS want_metadata + CAST((documents.id IS NULL) AS BOOLEAN) AS want_metadata FROM json_each(?1) LEFT JOIN documents ON value = documents.id WHERE ( documents.id IS NOT NULL AND documents.deleted = false - AND ( - documents.synced = false - OR documents.filepath IS NULL - ) + AND documents.filepath IS NULL ) OR (documents.id IS NULL) OR CAST($document_ids AS TEXT) != CAST($document_ids AS TEXT); --- name: UpdateDocumentDeleted :one -UPDATE documents -SET - deleted = $deleted -WHERE id = $id -RETURNING *; - --- name: UpdateDocumentSync :one -UPDATE documents -SET - synced = $synced -WHERE id = $id -RETURNING *; - -- name: UpdateProgress :one INSERT OR REPLACE INTO document_progress ( user_id, diff --git a/database/query.sql.go b/database/query.sql.go index f5cce04..794d48a 100644 --- a/database/query.sql.go +++ b/database/query.sql.go @@ -7,53 +7,52 @@ package database import ( "context" - "database/sql" "strings" ) const addActivity = `-- name: AddActivity :one -INSERT INTO raw_activity ( +INSERT INTO activity ( user_id, document_id, device_id, start_time, duration, - page, - pages + start_percentage, + end_percentage ) VALUES (?, ?, ?, ?, ?, ?, ?) -RETURNING id, user_id, document_id, device_id, start_time, page, pages, duration, created_at +RETURNING id, user_id, document_id, device_id, start_time, start_percentage, end_percentage, duration, created_at ` type AddActivityParams struct { - UserID string `json:"user_id"` - DocumentID string `json:"document_id"` - DeviceID string `json:"device_id"` - StartTime string `json:"start_time"` - Duration int64 `json:"duration"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` + UserID string `json:"user_id"` + DocumentID string `json:"document_id"` + DeviceID string `json:"device_id"` + StartTime string `json:"start_time"` + Duration int64 `json:"duration"` + StartPercentage float64 `json:"start_percentage"` + EndPercentage float64 `json:"end_percentage"` } -func (q *Queries) AddActivity(ctx context.Context, arg AddActivityParams) (RawActivity, error) { +func (q *Queries) AddActivity(ctx context.Context, arg AddActivityParams) (Activity, error) { row := q.db.QueryRowContext(ctx, addActivity, arg.UserID, arg.DocumentID, arg.DeviceID, arg.StartTime, arg.Duration, - arg.Page, - arg.Pages, + arg.StartPercentage, + arg.EndPercentage, ) - var i RawActivity + var i Activity err := row.Scan( &i.ID, &i.UserID, &i.DocumentID, &i.DeviceID, &i.StartTime, - &i.Page, - &i.Pages, + &i.StartPercentage, + &i.EndPercentage, &i.Duration, &i.CreatedAt, ) @@ -154,8 +153,7 @@ WITH filtered_activity AS ( user_id, start_time, duration, - page, - pages + ROUND(CAST(end_percentage - start_percentage AS REAL) * 100, 2) AS read_percentage FROM activity WHERE activity.user_id = ?1 @@ -176,8 +174,7 @@ SELECT title, author, duration, - page, - pages + read_percentage FROM filtered_activity AS activity LEFT JOIN documents ON documents.id = activity.document_id LEFT JOIN users ON users.id = activity.user_id @@ -192,13 +189,12 @@ type GetActivityParams struct { } type GetActivityRow struct { - DocumentID string `json:"document_id"` - StartTime string `json:"start_time"` - Title *string `json:"title"` - Author *string `json:"author"` - Duration int64 `json:"duration"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` + DocumentID string `json:"document_id"` + StartTime string `json:"start_time"` + Title *string `json:"title"` + Author *string `json:"author"` + Duration int64 `json:"duration"` + ReadPercentage float64 `json:"read_percentage"` } func (q *Queries) GetActivity(ctx context.Context, arg GetActivityParams) ([]GetActivityRow, error) { @@ -222,8 +218,7 @@ func (q *Queries) GetActivity(ctx context.Context, arg GetActivityParams) ([]Get &i.Title, &i.Author, &i.Duration, - &i.Page, - &i.Pages, + &i.ReadPercentage, ); err != nil { return nil, err } @@ -249,9 +244,9 @@ WITH RECURSIVE last_30_days AS ( ), filtered_activity AS ( SELECT - user_id, + user_id, start_time, - duration + duration FROM activity WHERE start_time > DATE('now', '-31 days') AND activity.user_id = ?1 @@ -465,98 +460,6 @@ func (q *Queries) GetDocument(ctx context.Context, documentID string) (Document, return i, err } -const getDocumentDaysRead = `-- name: GetDocumentDaysRead :one -WITH document_days AS ( - SELECT DATE(start_time, time_offset) AS dates - FROM activity - JOIN users ON users.id = activity.user_id - WHERE document_id = ?1 - AND user_id = ?2 - GROUP BY dates -) -SELECT CAST(COUNT(*) AS INTEGER) AS days_read -FROM document_days -` - -type GetDocumentDaysReadParams struct { - DocumentID string `json:"document_id"` - UserID string `json:"user_id"` -} - -func (q *Queries) GetDocumentDaysRead(ctx context.Context, arg GetDocumentDaysReadParams) (int64, error) { - row := q.db.QueryRowContext(ctx, getDocumentDaysRead, arg.DocumentID, arg.UserID) - var days_read int64 - err := row.Scan(&days_read) - return days_read, err -} - -const getDocumentReadStats = `-- name: GetDocumentReadStats :one -SELECT - COUNT(DISTINCT page) AS pages_read, - SUM(duration) AS total_time -FROM activity -WHERE document_id = ?1 -AND user_id = ?2 -AND start_time >= ?3 -` - -type GetDocumentReadStatsParams struct { - DocumentID string `json:"document_id"` - UserID string `json:"user_id"` - StartTime string `json:"start_time"` -} - -type GetDocumentReadStatsRow struct { - PagesRead int64 `json:"pages_read"` - TotalTime sql.NullFloat64 `json:"total_time"` -} - -func (q *Queries) GetDocumentReadStats(ctx context.Context, arg GetDocumentReadStatsParams) (GetDocumentReadStatsRow, error) { - row := q.db.QueryRowContext(ctx, getDocumentReadStats, arg.DocumentID, arg.UserID, arg.StartTime) - var i GetDocumentReadStatsRow - err := row.Scan(&i.PagesRead, &i.TotalTime) - return i, err -} - -const getDocumentReadStatsCapped = `-- name: GetDocumentReadStatsCapped :one -WITH capped_stats AS ( - SELECT MIN(SUM(duration), CAST(?1 AS INTEGER)) AS durations - FROM activity - WHERE document_id = ?2 - AND user_id = ?3 - AND start_time >= ?4 - GROUP BY page -) -SELECT - CAST(COUNT(*) AS INTEGER) AS pages_read, - CAST(SUM(durations) AS INTEGER) AS total_time -FROM capped_stats -` - -type GetDocumentReadStatsCappedParams struct { - PageDurationCap int64 `json:"page_duration_cap"` - DocumentID string `json:"document_id"` - UserID string `json:"user_id"` - StartTime string `json:"start_time"` -} - -type GetDocumentReadStatsCappedRow struct { - PagesRead int64 `json:"pages_read"` - TotalTime int64 `json:"total_time"` -} - -func (q *Queries) GetDocumentReadStatsCapped(ctx context.Context, arg GetDocumentReadStatsCappedParams) (GetDocumentReadStatsCappedRow, error) { - row := q.db.QueryRowContext(ctx, getDocumentReadStatsCapped, - arg.PageDurationCap, - arg.DocumentID, - arg.UserID, - arg.StartTime, - ) - var i GetDocumentReadStatsCappedRow - err := row.Scan(&i.PagesRead, &i.TotalTime) - return i, err -} - const getDocumentWithStats = `-- name: GetDocumentWithStats :one SELECT docs.id, @@ -569,23 +472,21 @@ SELECT docs.words, CAST(COALESCE(dus.wpm, 0.0) AS INTEGER) AS wpm, - COALESCE(dus.page, 0) AS page, - COALESCE(dus.pages, 0) AS pages, - COALESCE(dus.read_pages, 0) AS read_pages, + COALESCE(dus.read_percentage, 0) AS read_percentage, COALESCE(dus.total_time_seconds, 0) AS total_time_seconds, STRFTIME('%Y-%m-%d %H:%M:%S', COALESCE(dus.last_read, "1970-01-01"), users.time_offset) AS last_read, - CASE - WHEN dus.percentage > 97.0 THEN 100.0 + ROUND(CAST(CASE WHEN dus.percentage IS NULL THEN 0.0 - ELSE dus.percentage - END AS percentage, + WHEN (dus.percentage * 100.0) > 97.0 THEN 100.0 + ELSE dus.percentage * 100.0 + END AS REAL), 2) AS percentage, CAST(CASE WHEN dus.total_time_seconds IS NULL THEN 0.0 ELSE - CAST(dus.total_time_seconds AS REAL) - / CAST(dus.read_pages AS REAL) - END AS INTEGER) AS seconds_per_page + CAST(dus.total_time_seconds AS REAL) + / (dus.read_percentage * 100.0) + END AS INTEGER) AS seconds_per_percent FROM documents AS docs LEFT JOIN users ON users.id = ?1 LEFT JOIN @@ -602,22 +503,20 @@ type GetDocumentWithStatsParams struct { } type GetDocumentWithStatsRow struct { - ID string `json:"id"` - Title *string `json:"title"` - Author *string `json:"author"` - Description *string `json:"description"` - Isbn10 *string `json:"isbn10"` - Isbn13 *string `json:"isbn13"` - Filepath *string `json:"filepath"` - Words *int64 `json:"words"` - Wpm int64 `json:"wpm"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` - ReadPages int64 `json:"read_pages"` - TotalTimeSeconds int64 `json:"total_time_seconds"` - LastRead interface{} `json:"last_read"` - Percentage interface{} `json:"percentage"` - SecondsPerPage int64 `json:"seconds_per_page"` + ID string `json:"id"` + Title *string `json:"title"` + Author *string `json:"author"` + Description *string `json:"description"` + Isbn10 *string `json:"isbn10"` + Isbn13 *string `json:"isbn13"` + Filepath *string `json:"filepath"` + Words *int64 `json:"words"` + Wpm int64 `json:"wpm"` + ReadPercentage float64 `json:"read_percentage"` + TotalTimeSeconds int64 `json:"total_time_seconds"` + LastRead interface{} `json:"last_read"` + Percentage float64 `json:"percentage"` + SecondsPerPercent int64 `json:"seconds_per_percent"` } func (q *Queries) GetDocumentWithStats(ctx context.Context, arg GetDocumentWithStatsParams) (GetDocumentWithStatsRow, error) { @@ -633,13 +532,11 @@ func (q *Queries) GetDocumentWithStats(ctx context.Context, arg GetDocumentWithS &i.Filepath, &i.Words, &i.Wpm, - &i.Page, - &i.Pages, - &i.ReadPages, + &i.ReadPercentage, &i.TotalTimeSeconds, &i.LastRead, &i.Percentage, - &i.SecondsPerPage, + &i.SecondsPerPercent, ) return i, err } @@ -711,25 +608,24 @@ SELECT docs.words, CAST(COALESCE(dus.wpm, 0.0) AS INTEGER) AS wpm, - COALESCE(dus.page, 0) AS page, - COALESCE(dus.pages, 0) AS pages, - COALESCE(dus.read_pages, 0) AS read_pages, + COALESCE(dus.read_percentage, 0) AS read_percentage, COALESCE(dus.total_time_seconds, 0) AS total_time_seconds, STRFTIME('%Y-%m-%d %H:%M:%S', COALESCE(dus.last_read, "1970-01-01"), users.time_offset) AS last_read, - CASE - WHEN dus.percentage > 97.0 THEN 100.0 + ROUND(CAST(CASE WHEN dus.percentage IS NULL THEN 0.0 - ELSE dus.percentage - END AS percentage, + WHEN (dus.percentage * 100.0) > 97.0 THEN 100.0 + ELSE dus.percentage * 100.0 + END AS REAL), 2) AS percentage, + CASE WHEN dus.total_time_seconds IS NULL THEN 0.0 ELSE ROUND( CAST(dus.total_time_seconds AS REAL) - / CAST(dus.read_pages AS REAL) + / (dus.read_percentage * 100.0) ) - END AS seconds_per_page + END AS seconds_per_percent FROM documents AS docs LEFT JOIN users ON users.id = ?1 LEFT JOIN @@ -748,22 +644,20 @@ type GetDocumentsWithStatsParams struct { } type GetDocumentsWithStatsRow struct { - ID string `json:"id"` - Title *string `json:"title"` - Author *string `json:"author"` - Description *string `json:"description"` - Isbn10 *string `json:"isbn10"` - Isbn13 *string `json:"isbn13"` - Filepath *string `json:"filepath"` - Words *int64 `json:"words"` - Wpm int64 `json:"wpm"` - Page int64 `json:"page"` - Pages int64 `json:"pages"` - ReadPages int64 `json:"read_pages"` - TotalTimeSeconds int64 `json:"total_time_seconds"` - LastRead interface{} `json:"last_read"` - Percentage interface{} `json:"percentage"` - SecondsPerPage interface{} `json:"seconds_per_page"` + ID string `json:"id"` + Title *string `json:"title"` + Author *string `json:"author"` + Description *string `json:"description"` + Isbn10 *string `json:"isbn10"` + Isbn13 *string `json:"isbn13"` + Filepath *string `json:"filepath"` + Words *int64 `json:"words"` + Wpm int64 `json:"wpm"` + ReadPercentage float64 `json:"read_percentage"` + TotalTimeSeconds int64 `json:"total_time_seconds"` + LastRead interface{} `json:"last_read"` + Percentage float64 `json:"percentage"` + SecondsPerPercent interface{} `json:"seconds_per_percent"` } func (q *Queries) GetDocumentsWithStats(ctx context.Context, arg GetDocumentsWithStatsParams) ([]GetDocumentsWithStatsRow, error) { @@ -785,13 +679,11 @@ func (q *Queries) GetDocumentsWithStats(ctx context.Context, arg GetDocumentsWit &i.Filepath, &i.Words, &i.Wpm, - &i.Page, - &i.Pages, - &i.ReadPages, + &i.ReadPercentage, &i.TotalTimeSeconds, &i.LastRead, &i.Percentage, - &i.SecondsPerPage, + &i.SecondsPerPercent, ); err != nil { return nil, err } @@ -987,56 +879,6 @@ func (q *Queries) GetUserStreaks(ctx context.Context, userID string) ([]UserStre return items, nil } -const getUsers = `-- name: GetUsers :many -SELECT id, pass, admin, time_offset, created_at FROM users -WHERE - users.id = ?1 - OR ?1 IN ( - SELECT id - FROM users - WHERE id = ?1 - AND admin = 1 - ) -ORDER BY created_at DESC -LIMIT ?3 -OFFSET ?2 -` - -type GetUsersParams struct { - User string `json:"user"` - Offset int64 `json:"offset"` - Limit int64 `json:"limit"` -} - -func (q *Queries) GetUsers(ctx context.Context, arg GetUsersParams) ([]User, error) { - rows, err := q.db.QueryContext(ctx, getUsers, arg.User, arg.Offset, arg.Limit) - if err != nil { - return nil, err - } - defer rows.Close() - var items []User - for rows.Next() { - var i User - if err := rows.Scan( - &i.ID, - &i.Pass, - &i.Admin, - &i.TimeOffset, - &i.CreatedAt, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const getWPMLeaderboard = `-- name: GetWPMLeaderboard :many SELECT user_id, @@ -1089,17 +931,14 @@ const getWantedDocuments = `-- name: GetWantedDocuments :many SELECT CAST(value AS TEXT) AS id, CAST((documents.filepath IS NULL) AS BOOLEAN) AS want_file, - CAST((IFNULL(documents.synced, false) != true) AS BOOLEAN) AS want_metadata + CAST((documents.id IS NULL) AS BOOLEAN) AS want_metadata FROM json_each(?1) LEFT JOIN documents ON value = documents.id WHERE ( documents.id IS NOT NULL AND documents.deleted = false - AND ( - documents.synced = false - OR documents.filepath IS NULL - ) + AND documents.filepath IS NULL ) OR (documents.id IS NULL) OR CAST(?1 AS TEXT) != CAST(?1 AS TEXT) @@ -1134,86 +973,6 @@ func (q *Queries) GetWantedDocuments(ctx context.Context, documentIds string) ([ return items, nil } -const updateDocumentDeleted = `-- name: UpdateDocumentDeleted :one -UPDATE documents -SET - deleted = ?1 -WHERE id = ?2 -RETURNING id, md5, filepath, coverfile, title, author, series, series_index, lang, description, words, gbid, olid, isbn10, isbn13, synced, deleted, updated_at, created_at -` - -type UpdateDocumentDeletedParams struct { - Deleted bool `json:"-"` - ID string `json:"id"` -} - -func (q *Queries) UpdateDocumentDeleted(ctx context.Context, arg UpdateDocumentDeletedParams) (Document, error) { - row := q.db.QueryRowContext(ctx, updateDocumentDeleted, arg.Deleted, arg.ID) - var i Document - err := row.Scan( - &i.ID, - &i.Md5, - &i.Filepath, - &i.Coverfile, - &i.Title, - &i.Author, - &i.Series, - &i.SeriesIndex, - &i.Lang, - &i.Description, - &i.Words, - &i.Gbid, - &i.Olid, - &i.Isbn10, - &i.Isbn13, - &i.Synced, - &i.Deleted, - &i.UpdatedAt, - &i.CreatedAt, - ) - return i, err -} - -const updateDocumentSync = `-- name: UpdateDocumentSync :one -UPDATE documents -SET - synced = ?1 -WHERE id = ?2 -RETURNING id, md5, filepath, coverfile, title, author, series, series_index, lang, description, words, gbid, olid, isbn10, isbn13, synced, deleted, updated_at, created_at -` - -type UpdateDocumentSyncParams struct { - Synced bool `json:"-"` - ID string `json:"id"` -} - -func (q *Queries) UpdateDocumentSync(ctx context.Context, arg UpdateDocumentSyncParams) (Document, error) { - row := q.db.QueryRowContext(ctx, updateDocumentSync, arg.Synced, arg.ID) - var i Document - err := row.Scan( - &i.ID, - &i.Md5, - &i.Filepath, - &i.Coverfile, - &i.Title, - &i.Author, - &i.Series, - &i.SeriesIndex, - &i.Lang, - &i.Description, - &i.Words, - &i.Gbid, - &i.Olid, - &i.Isbn10, - &i.Isbn13, - &i.Synced, - &i.Deleted, - &i.UpdatedAt, - &i.CreatedAt, - ) - return i, err -} - const updateProgress = `-- name: UpdateProgress :one INSERT OR REPLACE INTO document_progress ( user_id, diff --git a/database/schema.sql b/database/schema.sql index aa66442..39cf70c 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -91,16 +91,17 @@ CREATE TABLE IF NOT EXISTS document_progress ( PRIMARY KEY (user_id, document_id, device_id) ); --- Raw Read Activity -CREATE TABLE IF NOT EXISTS raw_activity ( +-- Read Activity +CREATE TABLE IF NOT EXISTS activity ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_id TEXT NOT NULL, document_id TEXT NOT NULL, device_id TEXT NOT NULL, start_time DATETIME NOT NULL, - page INTEGER NOT NULL, - pages INTEGER NOT NULL, + start_percentage REAL NOT NULL, + end_percentage REAL NOT NULL, + duration INTEGER NOT NULL, created_at DATETIME NOT NULL DEFAULT (STRFTIME('%Y-%m-%dT%H:%M:%SZ', 'now')), @@ -113,19 +114,6 @@ CREATE TABLE IF NOT EXISTS raw_activity ( ----------------------- Temporary Tables ---------------------- --------------------------------------------------------------- --- Temporary Activity Table (Cached from View) -CREATE TEMPORARY TABLE IF NOT EXISTS activity ( - user_id TEXT NOT NULL, - document_id TEXT NOT NULL, - device_id TEXT NOT NULL, - - created_at DATETIME NOT NULL, - start_time DATETIME NOT NULL, - page INTEGER NOT NULL, - pages INTEGER NOT NULL, - duration INTEGER NOT NULL -); - -- Temporary User Streaks Table (Cached from View) CREATE TEMPORARY TABLE IF NOT EXISTS user_streaks ( user_id TEXT NOT NULL, @@ -144,10 +132,8 @@ CREATE TEMPORARY TABLE IF NOT EXISTS document_user_statistics ( document_id TEXT NOT NULL, user_id TEXT NOT NULL, last_read TEXT NOT NULL, - page INTEGER NOT NULL, - pages INTEGER NOT NULL, total_time_seconds INTEGER NOT NULL, - read_pages INTEGER NOT NULL, + read_percentage REAL NOT NULL, percentage REAL NOT NULL, words_read INTEGER NOT NULL, wpm REAL NOT NULL @@ -158,9 +144,9 @@ CREATE TEMPORARY TABLE IF NOT EXISTS document_user_statistics ( --------------------------- Indexes --------------------------- --------------------------------------------------------------- -CREATE INDEX IF NOT EXISTS temp.activity_start_time ON activity (start_time); -CREATE INDEX IF NOT EXISTS temp.activity_user_id ON activity (user_id); -CREATE INDEX IF NOT EXISTS temp.activity_user_id_document_id ON activity ( +CREATE INDEX IF NOT EXISTS activity_start_time ON activity (start_time); +CREATE INDEX IF NOT EXISTS activity_user_id ON activity (user_id); +CREATE INDEX IF NOT EXISTS activity_user_id_document_id ON activity ( user_id, document_id ); @@ -169,100 +155,6 @@ CREATE INDEX IF NOT EXISTS temp.activity_user_id_document_id ON activity ( ---------------------------- Views ---------------------------- --------------------------------------------------------------- --------------------------------- -------- Rescaled Activity ------ --------------------------------- - -CREATE VIEW IF NOT EXISTS view_rescaled_activity AS - -WITH RECURSIVE nums (idx) AS ( - SELECT 1 AS idx - UNION ALL - SELECT idx + 1 - FROM nums - LIMIT 1000 -), - -current_pages AS ( - SELECT - document_id, - user_id, - pages - FROM raw_activity - GROUP BY document_id, user_id - HAVING MAX(start_time) - ORDER BY start_time DESC -), - -intermediate AS ( - SELECT - raw_activity.document_id, - raw_activity.device_id, - raw_activity.user_id, - raw_activity.created_at, - raw_activity.start_time, - raw_activity.duration, - raw_activity.page, - current_pages.pages, - - -- Derive first page - ((raw_activity.page - 1) * current_pages.pages) / raw_activity.pages - + 1 AS first_page, - - -- Derive last page - MAX( - ((raw_activity.page - 1) * current_pages.pages) - / raw_activity.pages - + 1, - (raw_activity.page * current_pages.pages) / raw_activity.pages - ) AS last_page - - FROM raw_activity - INNER JOIN current_pages ON - current_pages.document_id = raw_activity.document_id - AND current_pages.user_id = raw_activity.user_id -), - -num_limit AS ( - SELECT * FROM nums - LIMIT (SELECT MAX(last_page - first_page + 1) FROM intermediate) -), - -rescaled_raw AS ( - SELECT - intermediate.document_id, - intermediate.device_id, - intermediate.user_id, - intermediate.created_at, - intermediate.start_time, - intermediate.last_page, - intermediate.pages, - intermediate.first_page + num_limit.idx - 1 AS page, - intermediate.duration / ( - intermediate.last_page - intermediate.first_page + 1.0 - ) AS duration - FROM intermediate - LEFT JOIN num_limit ON - num_limit.idx <= (intermediate.last_page - intermediate.first_page + 1) -) - -SELECT - user_id, - document_id, - device_id, - created_at, - start_time, - page, - pages, - - -- Round up if last page (maintains total duration) - CAST(CASE - WHEN page = last_page AND duration != CAST(duration AS INTEGER) - THEN duration + 1 - ELSE duration - END AS INTEGER) AS duration -FROM rescaled_raw; - -------------------------------- --------- User Streaks --------- -------------------------------- @@ -279,7 +171,7 @@ WITH document_windows AS ( 'weekday 0', '-7 day' ) AS weekly_read, DATE(activity.start_time, users.time_offset) AS daily_read - FROM raw_activity AS activity + FROM activity LEFT JOIN users ON users.id = activity.user_id GROUP BY activity.user_id, weekly_read, daily_read ), @@ -387,38 +279,84 @@ LEFT JOIN current_streak ON CREATE VIEW IF NOT EXISTS view_document_user_statistics AS -WITH true_progress AS ( +WITH intermediate_ga AS ( + SELECT + ga1.id AS row_id, + ga1.user_id, + ga1.document_id, + ga1.duration, + ga1.start_time, + ga1.start_percentage, + ga1.end_percentage, + + -- Find Overlapping Events (Assign Unique ID) + ( + SELECT MIN(id) + FROM activity AS ga2 + WHERE + ga1.document_id = ga2.document_id + AND ga1.user_id = ga2.user_id + AND ga1.start_percentage <= ga2.end_percentage + AND ga1.end_percentage >= ga2.start_percentage + ) AS group_leader + FROM activity AS ga1 +), + +grouped_activity AS ( SELECT - document_id, user_id, - start_time AS last_read, - page, - pages, - SUM(duration) AS total_time_seconds, + document_id, + MAX(start_time) AS start_time, + MIN(start_percentage) AS start_percentage, + MAX(end_percentage) AS end_percentage, + MAX(end_percentage) - MIN(start_percentage) AS read_percentage, + SUM(duration) AS duration + FROM intermediate_ga + GROUP BY group_leader +), - -- Determine Read Pages - COUNT(DISTINCT page) AS read_pages, - - -- Derive Percentage of Book - ROUND(CAST(page AS REAL) / CAST(pages AS REAL) * 100, 2) AS percentage - FROM view_rescaled_activity - GROUP BY document_id, user_id +current_progress AS ( + SELECT + user_id, + document_id, + COALESCE(( + SELECT percentage + FROM document_progress AS dp + WHERE + dp.user_id = iga.user_id + AND dp.document_id = iga.document_id + ), end_percentage) AS percentage + FROM intermediate_ga AS iga + GROUP BY user_id, document_id HAVING MAX(start_time) ) + SELECT - true_progress.*, - (CAST(COALESCE(documents.words, 0.0) AS REAL) / pages * read_pages) + ga.document_id, + ga.user_id, + MAX(start_time) AS last_read, + SUM(duration) AS total_time_seconds, + SUM(read_percentage) AS read_percentage, + cp.percentage, + + (CAST(COALESCE(d.words, 0.0) AS REAL) * SUM(read_percentage)) AS words_read, - (CAST(COALESCE(documents.words, 0.0) AS REAL) / pages * read_pages) - / (total_time_seconds / 60.0) AS wpm -FROM true_progress -INNER JOIN documents ON documents.id = true_progress.document_id + + (CAST(COALESCE(d.words, 0.0) AS REAL) * SUM(read_percentage)) + / (SUM(duration) / 60.0) AS wpm +FROM grouped_activity AS ga +INNER JOIN + current_progress AS cp + ON ga.user_id = cp.user_id AND ga.document_id = cp.document_id +INNER JOIN + documents AS d + ON d.id = ga.document_id +GROUP BY ga.document_id, ga.user_id ORDER BY wpm DESC; --------------------------------------------------------------- ------------------ Populate Temporary Tables ------------------ --------------------------------------------------------------- -INSERT INTO activity SELECT * FROM view_rescaled_activity; INSERT INTO user_streaks SELECT * FROM view_user_streaks; INSERT INTO document_user_statistics SELECT * FROM view_document_user_statistics; diff --git a/database/update_temp_tables.sql b/database/update_temp_tables.sql index f63964e..f583714 100644 --- a/database/update_temp_tables.sql +++ b/database/update_temp_tables.sql @@ -1,5 +1,3 @@ -DELETE FROM activity; -INSERT INTO activity SELECT * FROM view_rescaled_activity; DELETE FROM user_streaks; INSERT INTO user_streaks SELECT * FROM view_user_streaks; DELETE FROM document_user_statistics; diff --git a/main.go b/main.go index 4e2e2b0..536249d 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,8 @@ package main import ( "os" "os/signal" + "sync" + "syscall" log "github.com/sirupsen/logrus" "github.com/urfave/cli/v2" @@ -22,13 +24,13 @@ func main() { log.SetFormatter(UTCFormatter{&log.TextFormatter{FullTimestamp: true}}) app := &cli.App{ - Name: "Book Bank", + Name: "Book Manager", Usage: "A self hosted e-book progress tracker.", Commands: []*cli.Command{ { Name: "serve", Aliases: []string{"s"}, - Usage: "Start Book Bank web server.", + Usage: "Start Book Manager web server.", Action: cmdServer, }, }, @@ -40,17 +42,23 @@ func main() { } func cmdServer(ctx *cli.Context) error { - log.Info("Starting Book Bank Server") + log.Info("Starting Book Manager Server") + + // Create Channel + wg := sync.WaitGroup{} + done := make(chan struct{}) + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM) + + // Start Server server := server.NewServer() - server.StartServer() + server.StartServer(&wg, done) - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - <-c + // Wait & Close + <-interrupt + server.StopServer(&wg, done) - log.Info("Stopping Server") - server.StopServer() - log.Info("Server Stopped") + // Stop Server os.Exit(0) return nil diff --git a/server/server.go b/server/server.go index 2124bf9..d8a49d7 100644 --- a/server/server.go +++ b/server/server.go @@ -5,6 +5,7 @@ import ( "net/http" "os" "path/filepath" + "sync" "time" log "github.com/sirupsen/logrus" @@ -29,35 +30,72 @@ func NewServer() *Server { // Create Paths docDir := filepath.Join(c.DataPath, "documents") coversDir := filepath.Join(c.DataPath, "covers") - _ = os.Mkdir(docDir, os.ModePerm) - _ = os.Mkdir(coversDir, os.ModePerm) + os.Mkdir(docDir, os.ModePerm) + os.Mkdir(coversDir, os.ModePerm) return &Server{ API: api, Config: c, Database: db, + httpServer: &http.Server{ + Handler: api.Router, + Addr: (":" + c.ListenPort), + }, } } -func (s *Server) StartServer() { - listenAddr := (":" + s.Config.ListenPort) +func (s *Server) StartServer(wg *sync.WaitGroup, done <-chan struct{}) { + ticker := time.NewTicker(15 * time.Minute) - s.httpServer = &http.Server{ - Handler: s.API.Router, - Addr: listenAddr, - } + wg.Add(2) go func() { + defer wg.Done() + err := s.httpServer.ListenAndServe() - if err != nil { - log.Error("Error starting server ", err) + if err != nil && err != http.ErrServerClosed { + log.Error("Error Starting Server:", err) + } + }() + + go func() { + defer wg.Done() + defer ticker.Stop() + + s.RunScheduledTasks() + + for { + select { + case <-ticker.C: + s.RunScheduledTasks() + case <-done: + log.Info("Stopping Task Runner...") + return + } } }() } -func (s *Server) StopServer() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - s.httpServer.Shutdown(ctx) - s.API.DB.DB.Close() +func (s *Server) RunScheduledTasks() { + log.Info("[RunScheduledTasks] Refreshing Temp Table Cache") + if err := s.API.DB.CacheTempTables(); err != nil { + log.Warn("[RunScheduledTasks] Refreshing Temp Table Cache Failure:", err) + } + log.Info("[RunScheduledTasks] Refreshing Temp Table Success") +} + +func (s *Server) StopServer(wg *sync.WaitGroup, done chan<- struct{}) { + log.Info("Stopping HTTP Server...") + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := s.httpServer.Shutdown(ctx); err != nil { + log.Info("Shutting Error") + } + s.API.DB.Shutdown() + + close(done) + wg.Wait() + + log.Info("Server Stopped") } diff --git a/templates/activity.html b/templates/activity.html index 69cf0fe..6371288 100644 --- a/templates/activity.html +++ b/templates/activity.html @@ -28,7 +28,7 @@ scope="col" class="p-3 font-normal text-left uppercase border-b border-gray-200 dark:border-gray-800" > - Page + Percent @@ -51,7 +51,7 @@
{{ $activity.Duration }}
{{ $activity.Page }} / {{ $activity.Pages }}
+{{ $activity.ReadPercentage }}%
Seconds / Page
+Seconds / Percent
- {{ .Data.SecondsPerPage }} + {{ .Data.SecondsPerPercent }}
Progress
- {{ .Data.Page }} / {{ .Data.Pages }} ({{ .Data.Percentage }}%) + {{ .Data.Percentage }}%