```
├── .dockerignore
├── .env.example (omitted)
├── .github/
   ├── FUNDING.yml
   ├── ISSUE_TEMPLATE/
      ├── bug_report.yml (600 tokens)
      ├── config.yml
      ├── feature_request.yml (400 tokens)
   ├── PULL_REQUEST_TEMPLATE.md (200 tokens)
   ├── workflows/
      ├── docker-nightly.yml (400 tokens)
      ├── docker-publish.yml (900 tokens)
      ├── integration.yml (800 tokens)
      ├── nightly.yml (800 tokens)
      ├── pr-checks.yml (600 tokens)
      ├── security.yml (700 tokens)
├── .gitignore (400 tokens)
├── CHANGELOG.md (22.3k tokens)
├── CONTRIBUTING.md (300 tokens)
├── Dockerfile (4.8k tokens)
├── LICENSE (omitted)
├── README.md (8.5k tokens)
├── assets/
   ├── screenshots/
      ├── deezer-browse.png
      ├── desktop-album.png
      ├── desktop-artist.png
      ├── desktop-audiobooks.png
      ├── desktop-home.png
      ├── desktop-library.png
      ├── desktop-player.png
      ├── desktop-podcasts.png
      ├── desktop-settings.png
      ├── mobile-album.png
      ├── mobile-artist.png
      ├── mobile-audiobooks.png
      ├── mobile-home.png
      ├── mobile-library.png
      ├── mobile-login.png
      ├── mobile-player.png
      ├── mobile-podcasts.png
      ├── mood-mixer.png
      ├── reddit post/
         ├── desktop-home.png
         ├── desktop-podcasts.png
         ├── mobile-home.png
         ├── mobile-player.png
         ├── vibe-overlay.png
      ├── spotify-import-preview.png
      ├── vibe-blend.png
      ├── vibe-drift.png
      ├── vibe-galaxy.png
      ├── vibe-map.png
      ├── vibe-overlay.png
├── backend/
   ├── .dockerignore (100 tokens)
   ├── .gitignore (100 tokens)
   ├── Dockerfile (400 tokens)
   ├── docker-entrypoint.sh (600 tokens)
   ├── healthcheck.js (100 tokens)
   ├── jest.config.js (100 tokens)
   ├── migrate-safe.sh (500 tokens)
   ├── migrations/
      ├── audio_analysis_cleanup_fix.sql (200 tokens)
   ├── package-lock.json (80.3k tokens)
   ├── package.json (500 tokens)
   ├── prisma/
      ├── migrations/
         ├── 20250101000000_rename_soulseek_fallback/
            ├── migration.sql (100 tokens)
         ├── 20250102000000_add_user_token_version/
            ├── migration.sql (100 tokens)
         ├── 20250102000001_add_downloadjob_targetmbid_index/
            ├── migration.sql (100 tokens)
         ├── 20251130000000_init/
            ├── migration.sql (7.6k tokens)
         ├── 20251225000000_add_missing_track_updated_at/
            ├── migration.sql (100 tokens)
         ├── 20251225100000_add_similar_artists_json/
            ├── migration.sql
         ├── 20251226000000_add_mood_bucket_system/
            ├── migration.sql (500 tokens)
         ├── 20251229004706_add_enrichment_concurrency/
            ├── migration.sql
         ├── 20251229043907_add_metadata_overrides/
            ├── migration.sql (200 tokens)
         ├── 20251230000000_add_podcast_audiobook_search_vectors/
            ├── migration.sql (1200 tokens)
         ├── 20251230234224_add_enrichment_and_overrides/
            ├── migration.sql (200 tokens)
         ├── 20251231041041_add_original_year_to_album/
            ├── migration.sql
         ├── 20260101152925_add_lidarr_webhook_secret/
            ├── migration.sql
         ├── 20260102142537_add_analysis_started_at/
            ├── migration.sql
         ├── 20260102150000_add_audio_analyzer_workers/
            ├── migration.sql
         ├── 20260103045951_add_lastfm_api_key/
            ├── migration.sql
         ├── 20260104000000_add_soulseek_concurrent_downloads/
            ├── migration.sql
         ├── 20260107000000_add_download_source_columns/
            ├── migration.sql (100 tokens)
         ├── 20260118000000_add_partial_unique_index_active_downloads/
            ├── migration.sql (100 tokens)
         ├── 20260123181610_add_artist_counts_and_indexes/
            ├── migration.sql (100 tokens)
         ├── 20260127000000_add_pgvector/
            ├── migration.sql (200 tokens)
         ├── 20260128000000_add_clap_workers/
            ├── migration.sql
         ├── 20260128100000_reduce_embedding_dimension/
            ├── migration.sql (200 tokens)
         ├── 20260130000000_add_similarity_functions/
            ├── migration.sql (600 tokens)
         ├── 20260204100000_add_vibe_analysis_fields/
            ├── migration.sql (100 tokens)
         ├── 20260207000000_add_music_search_vector_triggers/
            ├── migration.sql (500 tokens)
         ├── 20260214115252_add_download_job_unique_constraint/
            ├── migration.sql (100 tokens)
         ├── 20260214121222_add_webhook_events/
            ├── migration.sql (200 tokens)
         ├── 20260214145320_standardize_integration_config/
            ├── migration.sql (100 tokens)
         ├── 20260214_add_discovery_batch_version/
            ├── migration.sql
         ├── 20260221000000_add_public_url/
            ├── migration.sql
         ├── 20260221184459_add_subsonic_listen_source/
            ├── migration.sql
         ├── 20260222000000_add_album_title_index/
            ├── migration.sql
         ├── 20260222100000_nullable_download_job_target_mbid/
            ├── migration.sql
         ├── 20260222110000_add_enrichment_status_indexes/
            ├── migration.sql (100 tokens)
         ├── 20260223000000_lastfm_tags_default_empty_array/
            ├── migration.sql (100 tokens)
         ├── 20260226000000_add_registration_open_setting/
            ├── migration.sql
         ├── 20260227000000_add_lidarr_profile_settings/
            ├── migration.sql
         ├── 20260227100000_add_track_lyrics/
            ├── migration.sql (100 tokens)
         ├── 20260302000000_add_share_links/
            ├── migration.sql (200 tokens)
         ├── 20260303000000_add_podcast_cache_headers/
            ├── migration.sql
         ├── 20260304000000_add_source_url_to_import_job/
            ├── migration.sql
         ├── 20260305000000_add_isrc_to_track_and_pending/
            ├── migration.sql
         ├── 20260305000000_add_missing_from_disk_to_pending_track/
            ├── migration.sql
         ├── 20260305000000_add_subsonic_bookmarks/
            ├── migration.sql (200 tokens)
         ├── 20260305000000_add_subsonic_play_queue/
            ├── migration.sql (100 tokens)
         ├── 20260305000001_add_corrupt_flag_to_track/
            ├── migration.sql
         ├── 20260307000000_switch_ivfflat_to_hnsw/
            ├── migration.sql (100 tokens)
         ├── 20260309000000_add_scan_status/
            ├── migration.sql
         ├── 20260401000000_add_disc_number_to_track/
            ├── migration.sql
         ├── migration_lock.toml
      ├── schema.prisma (5.9k tokens)
   ├── scripts/
      ├── backfill-original-year.ts (900 tokens)
      ├── create-test-user.ts (100 tokens)
      ├── duplicateLibrary.ts (700 tokens)
      ├── generateVibeVocabulary.ts (700 tokens)
      ├── recalibrateAnalysis.ts (500 tokens)
   ├── seeds/
      ├── createUser.ts (200 tokens)
   ├── src/
      ├── __mocks__/
         ├── p-queue.cjs (100 tokens)
         ├── test-env.cjs (100 tokens)
      ├── config.ts (700 tokens)
      ├── data/
         ├── featureProfiles.ts (6.1k tokens)
         ├── vibe-vocabulary.json (520.9k tokens)
      ├── index.ts (4.6k tokens)
      ├── jobs/
         ├── __tests__/
            ├── manual-webhook-reconciliation-test.md (400 tokens)
         ├── queueCleaner.ts (4.9k tokens)
         ├── webhookReconciliation.ts (1600 tokens)
      ├── lib/
         ├── soulseek/
            ├── client.ts (4.8k tokens)
            ├── common.ts
            ├── downloads.ts (500 tokens)
            ├── index.ts
            ├── listen.ts (300 tokens)
            ├── messages/
               ├── common.ts (100 tokens)
               ├── from/
                  ├── index.ts
                  ├── peer-init.ts (200 tokens)
                  ├── peer.ts (1000 tokens)
                  ├── server.ts (800 tokens)
               ├── index.ts
               ├── message-builder.ts (200 tokens)
               ├── message-parser.ts (300 tokens)
               ├── message-stream.ts (200 tokens)
               ├── to/
                  ├── index.ts
                  ├── peer.ts (700 tokens)
                  ├── server.ts (500 tokens)
            ├── peer.ts (500 tokens)
            ├── server.ts (400 tokens)
            ├── utils/
               ├── types.ts (100 tokens)
      ├── middleware/
         ├── __tests__/
            ├── auth.test.ts (2.3k tokens)
         ├── auth.ts (1300 tokens)
         ├── errorHandler.ts (300 tokens)
         ├── rateLimiter.ts (600 tokens)
         ├── subsonicAuth.ts (1000 tokens)
      ├── routes/
         ├── __tests__/
            ├── auth.route.test.ts (2.3k tokens)
            ├── library-albums.route.test.ts (2.5k tokens)
            ├── playlists.route.test.ts (2.1k tokens)
            ├── share.route.test.ts (2.5k tokens)
            ├── webhooks.route.test.ts (2.3k tokens)
         ├── analysis.ts (4.8k tokens)
         ├── apiKeys.ts (700 tokens)
         ├── artists.ts (4.8k tokens)
         ├── audiobooks.ts (6.2k tokens)
         ├── auth.ts (3.5k tokens)
         ├── browse.ts (2.2k tokens)
         ├── deviceLink.ts (1300 tokens)
         ├── discover.ts (19.2k tokens)
         ├── downloads.ts (5.6k tokens)
         ├── enrichment.ts (5.6k tokens)
         ├── events.ts (500 tokens)
         ├── eventsTicket.ts (100 tokens)
         ├── homepage.ts (1300 tokens)
         ├── library.ts (100 tokens)
         ├── library/
            ├── albums.ts (1300 tokens)
            ├── artists.ts (6.5k tokens)
            ├── backfill.ts (300 tokens)
            ├── coverArt.ts (3.4k tokens)
            ├── index.ts (200 tokens)
            ├── scan.ts (700 tokens)
            ├── streaming.ts (1200 tokens)
            ├── tracks.ts (11.2k tokens)
         ├── listeningState.ts (600 tokens)
         ├── mixes.ts (3.8k tokens)
         ├── notifications.ts (5.9k tokens)
         ├── offline.ts (1800 tokens)
         ├── onboarding.ts (3.9k tokens)
         ├── playbackState.ts (1100 tokens)
         ├── playlists.ts (8.7k tokens)
         ├── plays.ts (500 tokens)
         ├── podcasts.ts (11.9k tokens)
         ├── recommendations.ts (3.2k tokens)
         ├── releases.ts (1700 tokens)
         ├── search.ts (2.1k tokens)
         ├── settings.ts (600 tokens)
         ├── share.ts (3.1k tokens)
         ├── soulseek.ts (3.6k tokens)
         ├── spotify.ts (3k tokens)
         ├── subsonic/
            ├── artistInfo.ts (600 tokens)
            ├── compat.ts (1300 tokens)
            ├── index.ts (1100 tokens)
            ├── library.ts (5.8k tokens)
            ├── lyrics.ts (900 tokens)
            ├── mappers.ts (1200 tokens)
            ├── playback.ts (3.8k tokens)
            ├── playlists.ts (2k tokens)
            ├── podcasts.ts (1000 tokens)
            ├── profile.ts (200 tokens)
            ├── queue.ts (1500 tokens)
            ├── search.ts (3.4k tokens)
            ├── starred.ts (800 tokens)
            ├── userHelpers.ts (200 tokens)
            ├── userManagement.ts (1300 tokens)
         ├── system.ts (100 tokens)
         ├── systemSettings.ts (5.4k tokens)
         ├── vibe.ts (3.2k tokens)
         ├── webhooks.ts (1900 tokens)
      ├── services/
         ├── __tests__/
            ├── README.md (200 tokens)
            ├── audioAnalysisCleanup.test.ts (2.1k tokens)
            ├── audioScanValidator.test.ts (800 tokens)
            ├── audioStreaming.test.ts (800 tokens)
            ├── coverRepair.test.ts (800 tokens)
            ├── enrichmentFailureService.test.ts (900 tokens)
            ├── imageStorage.test.ts (300 tokens)
            ├── m3uParser.test.ts (500 tokens)
            ├── musicPathConfig.test.ts (1200 tokens)
            ├── musicScanner.test.ts (2.4k tokens)
            ├── search.test.ts (1200 tokens)
            ├── soulseek-search-strategies.test.ts (600 tokens)
            ├── webhookEventStore.test.ts (1500 tokens)
         ├── acquisitionService.ts (7.4k tokens)
         ├── artistCountsService.ts (1100 tokens)
         ├── audioAnalysisCleanup.ts (2.3k tokens)
         ├── audioScanValidator.ts (400 tokens)
         ├── audioStreaming.ts (3.6k tokens)
         ├── audiobookCache.ts (3k tokens)
         ├── audiobookshelf.ts (3.3k tokens)
         ├── coverArt.ts (500 tokens)
         ├── coverArtExtractor.ts (400 tokens)
         ├── dataCache.ts (2.8k tokens)
         ├── deezer.ts (5.3k tokens)
         ├── discoverWeekly.ts (25.2k tokens)
         ├── discovery/
            ├── __tests__/
               ├── discoveryAlbumLifecycle.test.ts (3.4k tokens)
               ├── discoveryBatchLogger.test.ts (1100 tokens)
               ├── discoverySeeding.test.ts (3.8k tokens)
            ├── discoveryAlbumLifecycle.ts (1400 tokens)
            ├── discoveryBatchLogger.ts (400 tokens)
            ├── discoverySeeding.ts (2.2k tokens)
            ├── index.ts (100 tokens)
            ├── optimisticBatchUpdate.ts (800 tokens)
         ├── discoveryLogger.ts (1200 tokens)
         ├── downloadQueue.ts (5k tokens)
         ├── enrichment.ts (3.8k tokens)
         ├── enrichmentFailureService.ts (2.5k tokens)
         ├── enrichmentState.ts (2.1k tokens)
         ├── eventBus.ts (300 tokens)
         ├── fanart.ts (1500 tokens)
         ├── featureDetection.ts (800 tokens)
         ├── hybridSimilarity.ts (1500 tokens)
         ├── imageBackfill.ts (2.3k tokens)
         ├── imageProvider.ts (2.5k tokens)
         ├── imageStorage.ts (1500 tokens)
         ├── itunes.ts (2.2k tokens)
         ├── lastfm.ts (7.1k tokens)
         ├── lidarr.ts (22k tokens)
         ├── lrclib.ts (300 tokens)
         ├── m3uParser.ts (400 tokens)
         ├── mixes/
            ├── discoveryMixes.ts (1600 tokens)
            ├── genreMixes.ts (2.9k tokens)
            ├── helpers.ts (1800 tokens)
            ├── index.ts (2.5k tokens)
            ├── moodMixes.ts (12.2k tokens)
            ├── timeMixes.ts (900 tokens)
         ├── moodBucketService.ts (4.7k tokens)
         ├── musicScanner.ts (8.9k tokens)
         ├── musicbrainz.ts (7.2k tokens)
         ├── notificationPolicyService.ts (2.7k tokens)
         ├── notificationService.ts (1300 tokens)
         ├── podcastCache.ts (1600 tokens)
         ├── podcastDownload.ts (3.3k tokens)
         ├── podcastindex.ts (300 tokens)
         ├── programmaticPlaylists.ts
         ├── rateLimiter.ts (2.3k tokens)
         ├── rss-parser.ts (2.3k tokens)
         ├── search.ts (5.1k tokens)
         ├── simpleDownloadManager.ts (19k tokens)
         ├── songPath.ts (1700 tokens)
         ├── songlink.ts (800 tokens)
         ├── soulseek-search-strategies.ts (2.1k tokens)
         ├── soulseek.ts (16.9k tokens)
         ├── spotify.ts (11.1k tokens)
         ├── spotifyImport.ts (25.2k tokens)
         ├── staleJobCleanup.ts (1400 tokens)
         ├── textEmbeddingBridge.ts (400 tokens)
         ├── trackIdentity.ts (1100 tokens)
         ├── umapProjection.ts (2.8k tokens)
         ├── vibeVocabulary.ts (2k tokens)
         ├── webhookEventStore.ts (1200 tokens)
         ├── wikidata.ts (1000 tokens)
         ├── ytdlp.ts (800 tokens)
      ├── types/
         ├── express-session.d.ts (omitted)
         ├── lidarr.ts (900 tokens)
         ├── soulseek.ts (200 tokens)
      ├── utils/
         ├── artistNormalization.ts (2.7k tokens)
         ├── async.ts (300 tokens)
         ├── cacheWrapper.ts (400 tokens)
         ├── colorExtractor.ts (1500 tokens)
         ├── configValidator.ts (1100 tokens)
         ├── dateFilters.ts (300 tokens)
         ├── db.ts (300 tokens)
         ├── distributedLock.ts (300 tokens)
         ├── embedding.ts (100 tokens)
         ├── encryption.ts (900 tokens)
         ├── envWriter.ts (700 tokens)
         ├── errors.ts (600 tokens)
         ├── fuzzyMatch.ts (500 tokens)
         ├── logger.ts (200 tokens)
         ├── metadataOverrides.ts (300 tokens)
         ├── metrics.ts (600 tokens)
         ├── normalization.ts (400 tokens)
         ├── normalize.ts
         ├── playlistLogger.ts (1600 tokens)
         ├── queryMonitor.ts (200 tokens)
         ├── rangeParser.ts (300 tokens)
         ├── redis.ts (100 tokens)
         ├── shuffle.ts (100 tokens)
         ├── ssrf.ts (400 tokens)
         ├── subsonicResponse.ts (500 tokens)
         ├── systemSettings.ts (500 tokens)
      ├── workers/
         ├── __tests__/
            ├── dataCleanup.test.ts (1000 tokens)
            ├── enrichmentStateMachine.test.ts (3.5k tokens)
         ├── artistEnrichment.ts (3.8k tokens)
         ├── artistEnrichmentWorker.ts (600 tokens)
         ├── audioCompletionSubscriber.ts (1100 tokens)
         ├── dataCleanup.ts (900 tokens)
         ├── dataIntegrity.ts (3k tokens)
         ├── discoverCron.ts (500 tokens)
         ├── enrichmentQueues.ts (400 tokens)
         ├── index.ts (2.7k tokens)
         ├── moodBucketWorker.ts (700 tokens)
         ├── organizeSingles.ts (1800 tokens)
         ├── podcastEnrichmentWorker.ts (400 tokens)
         ├── processors/
            ├── discoverProcessor.ts (500 tokens)
            ├── importProcessor.ts (200 tokens)
            ├── scanProcessor.ts (5k tokens)
         ├── queues.ts (300 tokens)
         ├── trackEnrichmentWorker.ts (500 tokens)
         ├── umapWorker.ts (100 tokens)
         ├── unifiedEnrichment.ts (13.4k tokens)
   ├── tsconfig.json (100 tokens)
   ├── tsconfig.production.json
├── deploy.sh (400 tokens)
├── docker-compose.dev.yml (400 tokens)
├── docker-compose.prod.yml (400 tokens)
├── docker-compose.server.yml (1000 tokens)
├── docker-compose.services.yml (600 tokens)
├── docker-compose.yml (2.5k tokens)
├── frontend/
   ├── .dockerignore (100 tokens)
   ├── .gitignore (100 tokens)
   ├── Dockerfile (500 tokens)
   ├── app/
      ├── album/
         ├── [id]/
            ├── page.tsx (2.3k tokens)
      ├── api/
         ├── events/
            ├── route.ts (400 tokens)
            ├── ticket/
               ├── route.ts (300 tokens)
      ├── artist/
         ├── [id]/
            ├── loading.tsx
            ├── page.tsx (2.2k tokens)
      ├── audiobooks/
         ├── [id]/
            ├── page.tsx (1400 tokens)
         ├── loading.tsx
         ├── page.tsx (6.4k tokens)
         ├── series/
            ├── [name]/
               ├── page.tsx (3.6k tokens)
      ├── browse/
         ├── playlists/
            ├── [id]/
               ├── page.tsx (5.1k tokens)
            ├── page.tsx (6.8k tokens)
      ├── collection/
         ├── loading.tsx
         ├── page.tsx (3.7k tokens)
      ├── device/
         ├── page.tsx (3.8k tokens)
      ├── discover/
         ├── page.tsx (2.9k tokens)
      ├── error.tsx (100 tokens)
      ├── favicon.ico
      ├── global-error.tsx (200 tokens)
      ├── globals.css (3k tokens)
      ├── import/
         ├── playlist/
            ├── page.tsx (11.8k tokens)
         ├── spotify/
            ├── page.tsx (100 tokens)
      ├── layout.tsx (800 tokens)
      ├── library/
         ├── page.tsx
      ├── loading.tsx
      ├── login/
         ├── page.tsx (3.6k tokens)
      ├── manifest.webmanifest/
         ├── route.ts (600 tokens)
      ├── mix/
         ├── [id]/
            ├── page.tsx (4k tokens)
      ├── onboarding/
         ├── page.tsx (9.1k tokens)
      ├── page.tsx (1700 tokens)
      ├── playlist/
         ├── [id]/
            ├── page.tsx (12.3k tokens)
      ├── playlists/
         ├── loading.tsx
         ├── page.tsx (7.9k tokens)
      ├── podcasts/
         ├── [id]/
            ├── page.tsx (1800 tokens)
         ├── genre/
            ├── [genreId]/
               ├── page.tsx (1900 tokens)
         ├── loading.tsx
         ├── page.tsx (6.9k tokens)
      ├── queue/
         ├── loading.tsx
         ├── page.tsx (3.9k tokens)
      ├── radio/
         ├── page.tsx (2.8k tokens)
      ├── register/
         ├── page.tsx (900 tokens)
      ├── releases/
         ├── page.tsx (2.4k tokens)
      ├── search/
         ├── loading.tsx
         ├── page.tsx (3.7k tokens)
      ├── settings/
         ├── loading.tsx
         ├── page.tsx (1900 tokens)
      ├── setup/
         ├── page.tsx (300 tokens)
      ├── share/
         ├── [token]/
            ├── SharePageClient.tsx (4.5k tokens)
            ├── layout.tsx (100 tokens)
            ├── page.tsx (300 tokens)
      ├── sync/
         ├── page.tsx (2.6k tokens)
      ├── vibe/
         ├── page.tsx (3.7k tokens)
   ├── assets/
      ├── SpotIcon.png
      ├── icon.png
   ├── components/
      ├── MetadataEditor.tsx (4.4k tokens)
      ├── MixCard.tsx (900 tokens)
      ├── MoodMixer.tsx (2.5k tokens)
      ├── ServiceWorkerRegistration.tsx (500 tokens)
      ├── activity/
         ├── ActiveDownloadsTab.tsx (1800 tokens)
         ├── DiscoverSettingsTab.tsx (2.2k tokens)
         ├── HistoryTab.tsx (2.2k tokens)
         ├── ImportsTab.tsx (2.4k tokens)
         ├── NotificationsTab.tsx (2.5k tokens)
      ├── cards/
         ├── MediaCard.tsx (800 tokens)
      ├── layout/
         ├── ActivityPanel.tsx (2.6k tokens)
         ├── AuthenticatedLayout.tsx (1700 tokens)
         ├── BottomNavigation.tsx (700 tokens)
         ├── MobileSidebar.tsx (1800 tokens)
         ├── Sidebar.tsx (4.9k tokens)
         ├── TVLayout.tsx (2.5k tokens)
         ├── TopBar.tsx (3.5k tokens)
         ├── UnifiedPanel.tsx (900 tokens)
      ├── lyrics/
         ├── LyricsPanel.tsx (700 tokens)
         ├── MobileLyricsView.tsx (600 tokens)
      ├── player/
         ├── FullPlayer.tsx (5.9k tokens)
         ├── KeyboardShortcutsTooltip.tsx (600 tokens)
         ├── MediaControlsHandler.tsx (100 tokens)
         ├── MiniPlayer.tsx (6.7k tokens)
         ├── OverlayPlayer.tsx (4.1k tokens)
         ├── PlayerModeWrapper.tsx (100 tokens)
         ├── SeekSlider.tsx (1900 tokens)
         ├── SleepTimer.tsx (1200 tokens)
         ├── UniversalPlayer.tsx (400 tokens)
      ├── providers/
         ├── AudioErrorBoundary.tsx (200 tokens)
         ├── ConditionalAudioProvider.tsx (500 tokens)
         ├── GlobalErrorBoundary.tsx (500 tokens)
      ├── ui/
         ├── AudiobookCard.tsx (900 tokens)
         ├── Badge.tsx (200 tokens)
         ├── Button.tsx (400 tokens)
         ├── CachedImage.tsx (200 tokens)
         ├── Card.tsx (200 tokens)
         ├── ConfirmDialog.tsx (800 tokens)
         ├── EmptyState.tsx (200 tokens)
         ├── FormElements.tsx (500 tokens)
         ├── GalaxyBackground.tsx (500 tokens)
         ├── GradientSpinner.tsx (400 tokens)
         ├── HorizontalCarousel.tsx (1100 tokens)
         ├── InlineStatus.tsx (1200 tokens)
         ├── LoadingScreen.tsx (100 tokens)
         ├── Modal.tsx (400 tokens)
         ├── OperationConfirmToast.tsx (500 tokens)
         ├── PlayableCard.tsx (1600 tokens)
         ├── PlaylistSelector.tsx (2.6k tokens)
   ├── docker-entrypoint.sh (200 tokens)
   ├── eslint.config.mjs (100 tokens)
   ├── features/
      ├── album/
         ├── components/
            ├── AlbumActionBar.tsx (800 tokens)
            ├── AlbumHero.tsx (1700 tokens)
            ├── SimilarAlbums.tsx (300 tokens)
            ├── TrackList.tsx (2.5k tokens)
         ├── hooks/
            ├── useAlbumActions.ts (1000 tokens)
            ├── useAlbumData.ts (300 tokens)
         ├── types.ts (300 tokens)
      ├── artist/
         ├── components/
            ├── ArtistActionBar.tsx (700 tokens)
            ├── ArtistBio.tsx (100 tokens)
            ├── ArtistHero.tsx (1700 tokens)
            ├── AvailableAlbums.tsx (1200 tokens)
            ├── Discography.tsx (600 tokens)
            ├── PopularTracks.tsx (2.1k tokens)
            ├── SimilarArtists.tsx (1000 tokens)
            ├── index.ts (100 tokens)
         ├── hooks/
            ├── index.ts
            ├── useArtistActions.ts (800 tokens)
            ├── useArtistData.ts (600 tokens)
            ├── useDownloadActions.ts (500 tokens)
         ├── types.ts (400 tokens)
      ├── audiobook/
         ├── components/
            ├── AudiobookActionBar.tsx (900 tokens)
            ├── AudiobookHero.tsx (1700 tokens)
            ├── ChapterList.tsx (400 tokens)
         ├── hooks/
            ├── useAudiobookActions.ts (800 tokens)
            ├── useAudiobookData.ts (400 tokens)
         ├── types.ts (300 tokens)
      ├── discover/
         ├── components/
            ├── DiscoverActionBar.tsx (500 tokens)
            ├── DiscoverHero.tsx (900 tokens)
            ├── HowItWorks.tsx (500 tokens)
            ├── TrackList.tsx (2000 tokens)
            ├── UnavailableAlbums.tsx (1500 tokens)
         ├── constants.ts (100 tokens)
         ├── hooks/
            ├── useDiscoverActions.ts (900 tokens)
            ├── useDiscoverData.ts (1000 tokens)
            ├── usePreviewPlayer.ts (1000 tokens)
         ├── types.ts (200 tokens)
      ├── home/
         ├── components/
            ├── ArtistsGrid.tsx (700 tokens)
            ├── AudiobooksGrid.tsx (600 tokens)
            ├── ContinueListening.tsx (1100 tokens)
            ├── FeaturedPlaylistsGrid.tsx (1100 tokens)
            ├── HomeHero.tsx (300 tokens)
            ├── LibraryRadioStations.tsx (2000 tokens)
            ├── MixesGrid.tsx (100 tokens)
            ├── PodcastsGrid.tsx (700 tokens)
            ├── PopularArtistsGrid.tsx (600 tokens)
            ├── SectionHeader.tsx (400 tokens)
         ├── hooks/
            ├── useHomeData.ts (1000 tokens)
         ├── radioData.ts (2.3k tokens)
         ├── types.ts (300 tokens)
      ├── library/
         ├── components/
            ├── AlbumsGrid.tsx (1300 tokens)
            ├── ArtistsGrid.tsx (1300 tokens)
            ├── LibraryHeader.tsx (300 tokens)
            ├── LibraryTabs.tsx (500 tokens)
            ├── LibraryToolbar.tsx (700 tokens)
            ├── TracksList.tsx (2.3k tokens)
         ├── hooks/
            ├── useLibraryActions.ts (900 tokens)
         ├── types.ts (200 tokens)
      ├── podcast/
         ├── components/
            ├── ContinueListening.tsx (1400 tokens)
            ├── EpisodeList.tsx (2.3k tokens)
            ├── PodcastActionBar.tsx (1100 tokens)
            ├── PodcastHero.tsx (1500 tokens)
            ├── PreviewEpisodes.tsx (1400 tokens)
            ├── SimilarPodcasts.tsx (800 tokens)
         ├── hooks/
            ├── usePodcastActions.ts (1100 tokens)
            ├── usePodcastData.ts (1000 tokens)
         ├── types.ts (200 tokens)
         ├── utils.ts
      ├── search/
         ├── components/
            ├── AliasResolutionBanner.tsx (100 tokens)
            ├── EmptyState.tsx (100 tokens)
            ├── LibraryAlbumsGrid.tsx (300 tokens)
            ├── LibraryAudiobooksGrid.tsx (300 tokens)
            ├── LibraryPodcastsGrid.tsx (400 tokens)
            ├── LibraryTracksList.tsx (1300 tokens)
            ├── SearchFilters.tsx (1000 tokens)
            ├── SimilarArtistsGrid.tsx (400 tokens)
            ├── SoulseekBrowser.tsx (3.9k tokens)
            ├── TVSearchInput.tsx (600 tokens)
            ├── TopResult.tsx (1000 tokens)
            ├── UnifiedSongsList.tsx (1900 tokens)
            ├── soulseekHelpers.tsx (300 tokens)
         ├── hooks/
            ├── useSearchData.ts (400 tokens)
            ├── useSoulseekSearch.ts (1400 tokens)
         ├── types.ts (500 tokens)
      ├── settings/
         ├── components/
            ├── sections/
               ├── AIServicesSection.tsx (1200 tokens)
               ├── AccountSection.tsx (3.1k tokens)
               ├── AudiobookshelfSection.tsx (800 tokens)
               ├── CacheSection.tsx (11.5k tokens)
               ├── CorruptTracksSection.tsx (1100 tokens)
               ├── DownloadPreferencesSection.tsx (1000 tokens)
               ├── LidarrSection.tsx (1300 tokens)
               ├── PlaybackSection.tsx (400 tokens)
               ├── SoulseekSection.tsx (800 tokens)
               ├── StoragePathsSection.tsx (400 tokens)
               ├── SubsonicSection.tsx (1900 tokens)
               ├── UserManagementSection.tsx (2000 tokens)
            ├── ui/
               ├── SettingsInput.tsx (400 tokens)
               ├── SettingsLayout.tsx (900 tokens)
               ├── SettingsRow.tsx (200 tokens)
               ├── SettingsSection.tsx (300 tokens)
               ├── SettingsSelect.tsx (300 tokens)
               ├── SettingsSidebar.tsx (500 tokens)
               ├── SettingsToggle.tsx (200 tokens)
               ├── index.ts (100 tokens)
         ├── hooks/
            ├── useSettingsData.ts (400 tokens)
            ├── useSystemSettings.ts (1200 tokens)
            ├── useTwoFactor.ts (900 tokens)
         ├── types.ts (300 tokens)
      ├── vibe/
         ├── ActivityIconBar.tsx (400 tokens)
         ├── VibeAlchemy.tsx (2.5k tokens)
         ├── VibeMap.tsx (3.5k tokens)
         ├── VibePanelSheet.tsx (800 tokens)
         ├── VibeSongPath.tsx (1200 tokens)
         ├── VibeToolbar.tsx (800 tokens)
         ├── mapUtils.ts (1200 tokens)
         ├── panel-shared.tsx (600 tokens)
         ├── scenes/
            ├── GravityGridScene.tsx (9.1k tokens)
         ├── tabs/
            ├── LyricsTab.tsx (500 tokens)
            ├── NowPlayingTab.tsx (500 tokens)
            ├── QueueTab.tsx (1300 tokens)
         ├── types.ts (200 tokens)
         ├── useVibeMap.ts (900 tokens)
   ├── healthcheck.js (100 tokens)
   ├── hooks/
      ├── useActivityPanel.ts (200 tokens)
      ├── useDoubleTap.ts (500 tokens)
      ├── useEventSource.ts (2.1k tokens)
      ├── useImageColor.ts (2.6k tokens)
      ├── useImportToasts.ts (200 tokens)
      ├── useKeyboardShortcuts.ts (1000 tokens)
      ├── useLyricsSync.ts (700 tokens)
      ├── useLyricsToggle.tsx (400 tokens)
      ├── useMediaInfo.ts (600 tokens)
      ├── useMediaQuery.ts (200 tokens)
      ├── useMediaSession.ts (1900 tokens)
      ├── useMetadataDisplay.ts (700 tokens)
      ├── useNotifications.ts (1200 tokens)
      ├── usePlaybackProgress.ts (300 tokens)
      ├── usePlayerMode.ts (300 tokens)
      ├── useQueries.ts (5.2k tokens)
      ├── useSleepTimer.ts (600 tokens)
      ├── useTVNavigation.ts (2.2k tokens)
      ├── useTrackFormat.ts (100 tokens)
      ├── useTrackPreview.ts (1400 tokens)
      ├── useVibeToggle.ts (300 tokens)
   ├── lib/
      ├── activity-panel-settings-context.tsx (300 tokens)
      ├── api.ts (11.2k tokens)
      ├── audio-context.tsx (100 tokens)
      ├── audio-controller-context.tsx (100 tokens)
      ├── audio-controller.ts (3.7k tokens)
      ├── audio-controls-context.tsx (10.7k tokens)
      ├── audio-hooks.tsx (600 tokens)
      ├── audio-playback-context.tsx (1500 tokens)
      ├── audio-state-context.tsx (5.5k tokens)
      ├── auth-context.tsx (1000 tokens)
      ├── download-context.tsx (1300 tokens)
      ├── download-progress-context.tsx (500 tokens)
      ├── enrichmentApi.ts (1100 tokens)
      ├── features-context.tsx (400 tokens)
      ├── format.ts (100 tokens)
      ├── lyrics-utils.ts (200 tokens)
      ├── query-client.tsx (600 tokens)
      ├── query-events.ts (400 tokens)
      ├── search-result-store.ts (300 tokens)
      ├── toast-context.tsx (1000 tokens)
      ├── track-format.ts (800 tokens)
      ├── tv-utils.ts (400 tokens)
      ├── version.ts (100 tokens)
   ├── next.config.ts (900 tokens)
   ├── package-lock.json (79.5k tokens)
   ├── package.json (400 tokens)
   ├── playwright.config.ts (200 tokens)
   ├── postcss.config.mjs
   ├── public/
      ├── assets/
         ├── icons/
            ├── icon-128.webp
            ├── icon-192.webp
            ├── icon-256.webp
            ├── icon-48.webp
            ├── icon-512.webp
            ├── icon-72.webp
            ├── icon-96.webp
         ├── images/
            ├── SpotIcon.png
            ├── apple-touch-icon.png
            ├── favicon-192.png
            ├── kima.webp
      ├── fonts/
         ├── montserrat-latin.woff2
      ├── offline.html (300 tokens)
      ├── sw.js (1200 tokens)
   ├── tests/
      ├── e2e/
         ├── enrichment-cycle.spec.ts (1500 tokens)
         ├── fixtures/
            ├── test-helpers.ts (1100 tokens)
         ├── full-ux-audit.spec.ts (6.9k tokens)
         ├── global.setup.ts (500 tokens)
         ├── import-url.spec.ts (700 tokens)
         ├── playlists.spec.ts (2.2k tokens)
         ├── predeploy/
            ├── auth.spec.ts (400 tokens)
            ├── integrations.spec.ts (1000 tokens)
            ├── library.spec.ts (500 tokens)
            ├── playback.spec.ts (1200 tokens)
         ├── queue.spec.ts (1400 tokens)
         ├── security.spec.ts (2.9k tokens)
         ├── smoke.spec.ts (400 tokens)
         ├── vibe.spec.ts (2.4k tokens)
   ├── tsconfig.json (100 tokens)
   ├── utils/
      ├── cn.ts
      ├── formatNumber.ts
      ├── formatTime.ts (400 tokens)
      ├── shuffle.ts (100 tokens)
      ├── vibeMatchScore.ts (400 tokens)
├── healthcheck-prod.js (100 tokens)
├── package-lock.json (omitted)
├── package.json
├── scripts/
   ├── create-e2e-user.sh (500 tokens)
   ├── dev-setup.sh (200 tokens)
   ├── run-enrichment-memory-test.sh (900 tokens)
   ├── take-screenshots.js (2k tokens)
├── services/
   ├── audio-analyzer-clap/
      ├── Dockerfile (300 tokens)
      ├── analyzer.py (8.3k tokens)
      ├── requirements.txt (100 tokens)
   ├── audio-analyzer/
      ├── Dockerfile (700 tokens)
      ├── analyzer.py (17.3k tokens)
      ├── requirements.txt
      ├── test_analyzer.py (500 tokens)
```


## /.dockerignore

```dockerignore path="/.dockerignore" 
.git
.worktrees
.claude
.serena
.aider-desk
.aider.tags.cache.v4
.roo
.ruff_cache
.vscode
context_portal
logs
docs
scripts
*.md
*.log
**/node_modules
**/.next
.env*
coverage
*.test.ts
__tests__
android
ios

```

## /.github/FUNDING.yml

```yml path="/.github/FUNDING.yml" 
github: Chevron7Locked
ko_fi: Chevron7Locked
#custom: ["https://example.com/donate"]

```

## /.github/ISSUE_TEMPLATE/bug_report.yml

```yml path="/.github/ISSUE_TEMPLATE/bug_report.yml" 
name: Bug Report
description: Report a bug or unexpected behavior
title: "[Bug]: "
labels: ["bug", "needs triage"]
body:
    - type: markdown
      attributes:
          value: |
              Thanks for taking the time to report a bug. Please fill out the information below to help us diagnose and fix the issue.

    - type: textarea
      id: description
      attributes:
          label: Bug Description
          description: A clear and concise description of what the bug is.
          placeholder: Describe the bug...
      validations:
          required: true

    - type: textarea
      id: reproduction
      attributes:
          label: Steps to Reproduce
          description: Step-by-step instructions to reproduce the behavior.
          placeholder: |
              1. Go to '...'
              2. Click on '...'
              3. Scroll down to '...'
              4. See error
      validations:
          required: true

    - type: textarea
      id: expected
      attributes:
          label: Expected Behavior
          description: What did you expect to happen?
          placeholder: Describe what should have happened...
      validations:
          required: true

    - type: textarea
      id: actual
      attributes:
          label: Actual Behavior
          description: What actually happened?
          placeholder: Describe what actually happened...
      validations:
          required: true

    - type: input
      id: version
      attributes:
          label: Kima Version
          description: What version of Kima are you running?
          placeholder: "e.g., v1.0.0, nightly-2024-01-15, or commit hash"
      validations:
          required: true

    - type: dropdown
      id: deployment
      attributes:
          label: Deployment Method
          description: How are you running Kima?
          options:
              - Docker (docker-compose)
              - Docker (standalone)
              - Manual/Source
              - Other
      validations:
          required: true

    - type: textarea
      id: environment
      attributes:
          label: Environment Details
          description: Any relevant environment information (OS, browser, Docker version, etc.)
          placeholder: |
              - OS: Ubuntu 22.04
              - Docker: 24.0.5
              - Browser: Firefox 120
      validations:
          required: false

    - type: textarea
      id: logs
      attributes:
          label: Relevant Logs
          description: Please copy and paste any relevant log output. This will be automatically formatted into code.
          render: shell
      validations:
          required: false

    - type: checkboxes
      id: checklist
      attributes:
          label: Checklist
          options:
              - label: I have searched existing issues to ensure this bug hasn't already been reported
                required: true
              - label: I am using a supported version of Kima
                required: true

```

## /.github/ISSUE_TEMPLATE/config.yml

```yml path="/.github/ISSUE_TEMPLATE/config.yml" 
blank_issues_enabled: false
contact_links:
    - name: Questions & Discussions
      url: https://github.com/Chevron7Locked/kima/discussions
      about: Ask questions and discuss Kima in GitHub Discussions

```

## /.github/ISSUE_TEMPLATE/feature_request.yml

```yml path="/.github/ISSUE_TEMPLATE/feature_request.yml" 
name: Feature Request
description: Suggest a new feature or enhancement
title: "[Feature]: "
labels: ["enhancement", "needs triage"]
body:
    - type: markdown
      attributes:
          value: |
              Thanks for suggesting a feature! Please provide as much detail as possible.

    - type: textarea
      id: problem
      attributes:
          label: Problem or Use Case
          description: What problem does this feature solve? What are you trying to accomplish?
          placeholder: "I'm trying to... but currently..."
      validations:
          required: true

    - type: textarea
      id: solution
      attributes:
          label: Proposed Solution
          description: Describe the feature you'd like to see implemented.
          placeholder: Describe your ideal solution...
      validations:
          required: true

    - type: textarea
      id: alternatives
      attributes:
          label: Alternatives Considered
          description: Have you considered any alternative solutions or workarounds?
          placeholder: Describe alternatives you've considered...
      validations:
          required: false

    - type: dropdown
      id: scope
      attributes:
          label: Feature Scope
          description: How big of a change is this?
          options:
              - Small (UI tweak, minor enhancement)
              - Medium (new component, significant enhancement)
              - Large (new major feature, architectural change)
      validations:
          required: true

    - type: checkboxes
      id: contribution
      attributes:
          label: Contribution
          options:
              - label: I would be willing to help implement this feature
                required: false

    - type: checkboxes
      id: checklist
      attributes:
          label: Checklist
          options:
              - label: I have searched existing issues to ensure this hasn't already been requested
                required: true

```

## /.github/PULL_REQUEST_TEMPLATE.md

## Description

<!-- Briefly describe what this PR does -->

## Type of Change

-   [ ] Bug fix (non-breaking change that fixes an issue)
-   [ ] New feature (non-breaking change that adds functionality)
-   [ ] Enhancement (improvement to existing functionality)
-   [ ] Documentation update
-   [ ] Code cleanup / refactoring
-   [ ] Other (please describe):

## Related Issues

Fixes #

## Changes Made

-
-
-

## Testing Done

-   [ ] Tested locally with Docker
-   [ ] Tested specific functionality:

## Screenshots (if applicable)

## Checklist

-   [ ] My code follows the project's code style
-   [ ] I have tested my changes locally
-   [ ] I have updated documentation if needed
-   [ ] My changes don't introduce new warnings
-   [ ] This PR targets the `main` branch


## /.github/workflows/docker-nightly.yml

```yml path="/.github/workflows/docker-nightly.yml" 
name: Nightly Build

on:
    schedule:
        - cron: "0 4 * * *" # 4am UTC daily
    workflow_dispatch:

concurrency:
    group: docker-nightly
    cancel-in-progress: true

env:
    IMAGE_NAME: ${{ secrets.DOCKERHUB_USERNAME }}/kima

jobs:
    build-nightly:
        name: Build & Push Nightly Image
        runs-on: ubuntu-latest
        steps:
            - name: Checkout code
              uses: actions/checkout@v4

            - name: Free up disk space
              run: |
                  sudo rm -rf /usr/share/dotnet
                  sudo rm -rf /opt/ghc
                  sudo rm -rf /usr/local/share/boost
                  sudo rm -rf "$AGENT_TOOLSDIRECTORY"

            - name: Set up QEMU
              uses: docker/setup-qemu-action@v3

            - name: Set up Docker Buildx
              uses: docker/setup-buildx-action@v3

            - name: Login to Docker Hub
              uses: docker/login-action@v3
              with:
                  username: ${{ secrets.DOCKERHUB_USERNAME }}
                  password: ${{ secrets.DOCKERHUB_TOKEN }}

            - name: Get short SHA
              id: sha
              run: echo "short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT

            - name: Build and push nightly
              uses: docker/build-push-action@v5
              with:
                  context: .
                  file: ./Dockerfile
                  push: true
                  tags: |
                      ${{ env.IMAGE_NAME }}:nightly
                      ${{ env.IMAGE_NAME }}:nightly-${{ steps.sha.outputs.short }}
                  labels: |
                      org.opencontainers.image.revision=${{ github.sha }}
                      org.opencontainers.image.version=nightly-${{ steps.sha.outputs.short }}
                  cache-from: type=gha
                  cache-to: type=gha,mode=max
                  platforms: linux/amd64,linux/arm64

```

## /.github/workflows/docker-publish.yml

```yml path="/.github/workflows/docker-publish.yml" 
name: Build and Publish Docker Image

on:
    push:
        tags:
            - "v*"
    workflow_dispatch:
        inputs:
            version:
                description: "Version tag (e.g., v1.0.0)"
                required: true
                type: string

env:
    DOCKERHUB_IMAGE_NAME: ${{ secrets.DOCKERHUB_USERNAME }}/kima

jobs:
    build:
        runs-on: ubuntu-latest
        permissions:
            contents: read
            packages: write
        steps:
            - name: Checkout
              uses: actions/checkout@v4

            - name: Set GHCR image name
              run: |
                  OWNER="${{ github.repository_owner }}"
                  echo "GHCR_IMAGE_NAME=ghcr.io/${OWNER,,}/kima" >> $GITHUB_ENV

            - name: Free up disk space
              run: |
                  sudo rm -rf /usr/share/dotnet
                  sudo rm -rf /opt/ghc
                  sudo rm -rf /usr/local/share/boost
                  sudo rm -rf "$AGENT_TOOLSDIRECTORY"

            - name: Set up QEMU
              uses: docker/setup-qemu-action@v3

            - name: Set up Docker Buildx
              uses: docker/setup-buildx-action@v3

            - name: Login to DockerHub
              uses: docker/login-action@v3
              with:
                  username: ${{ secrets.DOCKERHUB_USERNAME }}
                  password: ${{ secrets.DOCKERHUB_TOKEN }}

            - name: Login to GitHub Container Registry
              uses: docker/login-action@v3
              with:
                  registry: ghcr.io
                  username: ${{ github.actor }}
                  password: ${{ secrets.GITHUB_TOKEN }}

            - name: Extract version
              id: version
              run: |
                  if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
                    echo "version=${{ inputs.version }}" >> $GITHUB_OUTPUT
                  else
                    echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
                  fi

            - name: Build and push
              uses: docker/build-push-action@v5
              with:
                  context: .
                  file: ./Dockerfile
                  push: true
                  tags: |
                      ${{ env.DOCKERHUB_IMAGE_NAME }}:${{ steps.version.outputs.version }}
                      ${{ env.DOCKERHUB_IMAGE_NAME }}:latest
                      ${{ env.GHCR_IMAGE_NAME }}:${{ steps.version.outputs.version }}
                      ${{ env.GHCR_IMAGE_NAME }}:latest
                  cache-from: type=gha
                  cache-to: type=gha,mode=max
                  platforms: linux/amd64,linux/arm64

    create-release:
        needs: [build]
        runs-on: ubuntu-latest
        permissions:
            contents: write
        steps:
            - name: Checkout
              uses: actions/checkout@v4

            - name: Extract version
              id: version
              run: |
                  if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
                    echo "version=${{ inputs.version }}" >> $GITHUB_OUTPUT
                  else
                    echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
                  fi

            - name: Create Release
              uses: softprops/action-gh-release@v1
              with:
                  tag_name: ${{ steps.version.outputs.version }}
                  name: Kima ${{ steps.version.outputs.version }}
                  body: |
                      ## Quick Start

                      \`\`\`bash
                      docker run -d \
                        --name kima \
                        -p 3030:3030 \
                        -v /path/to/your/music:/music \
                        -v kima_data:/data \
                        ${{ secrets.DOCKERHUB_USERNAME }}/kima:${{ steps.version.outputs.version }}
                      \`\`\`

                      Then open http://localhost:3030 and create your account!

                      ## Documentation

                      See the [README](https://github.com/${{ github.repository }}#readme) for full documentation.
                  draft: false
                  prerelease: false
                  generate_release_notes: true

```

## /.github/workflows/integration.yml

```yml path="/.github/workflows/integration.yml" 
name: Integration Tests

on:
    pull_request:
        branches: [main]
        types: [labeled]
    workflow_dispatch:

concurrency:
    group: integration-${{ github.ref }}
    cancel-in-progress: true

jobs:
    e2e:
        name: E2E Tests
        if: github.event_name == 'workflow_dispatch' || github.event.label.name == 'run-e2e'
        runs-on: ubuntu-latest
        timeout-minutes: 60

        steps:
            - uses: actions/checkout@v4

            - name: Set up Docker Buildx
              uses: docker/setup-buildx-action@v3

            - name: Build Docker image
              uses: docker/build-push-action@v5
              with:
                  context: .
                  push: false
                  load: true
                  tags: kima:e2e
                  cache-from: type=gha
                  cache-to: type=gha,mode=max

            - name: Start Kima stack
              run: |
                  docker run -d \
                    --name kima-e2e \
                    -p 3030:3030 \
                    -p 3006:3006 \
                    -v kima_e2e_data:/data \
                    kima:e2e
                  echo "Waiting for health check..."
                  timeout 90 bash -c 'until curl -sf http://localhost:3030/api/health; do sleep 3; done'

            - name: Create E2E test user
              run: |
                  TEST_USER="kima_e2e"
                  TEST_PASS="$(openssl rand -hex 20)"
                  echo "::add-mask::${TEST_PASS}"
                  echo "KIMA_TEST_USERNAME=${TEST_USER}" >> "$GITHUB_ENV"
                  echo "KIMA_TEST_PASSWORD=${TEST_PASS}" >> "$GITHUB_ENV"
                  KIMA_CONTAINER=kima-e2e \
                  KIMA_TEST_USERNAME="${TEST_USER}" \
                  KIMA_TEST_PASSWORD="${TEST_PASS}" \
                  bash scripts/create-e2e-user.sh

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: frontend/package-lock.json

            - name: Install Playwright
              working-directory: frontend
              run: |
                  npm ci
                  npx playwright install --with-deps chromium

            - name: Run predeploy tests
              working-directory: frontend
              run: npm run test:predeploy
              env:
                  KIMA_TEST_USERNAME: ${{ env.KIMA_TEST_USERNAME }}
                  KIMA_TEST_PASSWORD: ${{ env.KIMA_TEST_PASSWORD }}
                  KIMA_UI_BASE_URL: http://127.0.0.1:3030

            - name: Run functional tests
              working-directory: frontend
              run: |
                  npx playwright test \
                    tests/e2e/smoke.spec.ts \
                    tests/e2e/queue.spec.ts \
                    tests/e2e/playlists.spec.ts \
                    tests/e2e/security.spec.ts \
                    tests/e2e/vibe.spec.ts \
                    tests/e2e/full-ux-audit.spec.ts \
                    --reporter=list
              env:
                  KIMA_TEST_USERNAME: ${{ env.KIMA_TEST_USERNAME }}
                  KIMA_TEST_PASSWORD: ${{ env.KIMA_TEST_PASSWORD }}
                  KIMA_UI_BASE_URL: http://127.0.0.1:3030

            - name: Upload test results on failure
              if: failure()
              uses: actions/upload-artifact@v4
              with:
                  name: playwright-results-${{ github.run_id }}
                  path: |
                      frontend/test-results/
                      frontend/playwright-report/
                  retention-days: 7

            - name: Stop and remove stack
              if: always()
              run: |
                  docker stop kima-e2e || true
                  docker rm kima-e2e || true
                  docker volume rm kima_e2e_data || true

```

## /.github/workflows/nightly.yml

```yml path="/.github/workflows/nightly.yml" 
name: Nightly Full Suite

on:
    schedule:
        - cron: "0 3 * * *"
    workflow_dispatch:

concurrency:
    group: nightly
    cancel-in-progress: true

jobs:
    full-e2e:
        name: Full E2E Suite
        runs-on: ubuntu-latest
        timeout-minutes: 60
        permissions:
            issues: write

        steps:
            - uses: actions/checkout@v4

            - name: Set up Docker Buildx
              uses: docker/setup-buildx-action@v3

            - name: Build Docker image
              uses: docker/build-push-action@v5
              with:
                  context: .
                  push: false
                  load: true
                  tags: kima:nightly
                  cache-from: type=gha
                  cache-to: type=gha,mode=max

            - name: Start Kima stack
              run: |
                  docker run -d \
                    --name kima-nightly \
                    -p 3030:3030 \
                    -p 3006:3006 \
                    -v kima_nightly_data:/data \
                    kima:nightly
                  timeout 90 bash -c 'until curl -sf http://localhost:3030/api/health; do sleep 3; done'

            - name: Create test user
              run: |
                  # Generate random credentials for this run -- no hardcoded passwords in source.
                  TEST_USER="kima_e2e"
                  TEST_PASS="$(openssl rand -hex 20)"
                  echo "::add-mask::${TEST_PASS}"
                  echo "KIMA_TEST_USERNAME=${TEST_USER}" >> "$GITHUB_ENV"
                  echo "KIMA_TEST_PASSWORD=${TEST_PASS}" >> "$GITHUB_ENV"
                  KIMA_CONTAINER=kima-nightly \
                  KIMA_TEST_USERNAME="${TEST_USER}" \
                  KIMA_TEST_PASSWORD="${TEST_PASS}" \
                  bash scripts/create-e2e-user.sh

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: frontend/package-lock.json

            - name: Install Playwright
              working-directory: frontend
              run: |
                  npm ci
                  npx playwright install --with-deps chromium

            - name: Run full E2E suite
              working-directory: frontend
              run: npx playwright test --reporter=list
              env:
                  KIMA_TEST_USERNAME: ${{ env.KIMA_TEST_USERNAME }}
                  KIMA_TEST_PASSWORD: ${{ env.KIMA_TEST_PASSWORD }}
                  KIMA_UI_BASE_URL: http://127.0.0.1:3030

            - name: Upload results
              if: always()
              uses: actions/upload-artifact@v4
              with:
                  name: nightly-report-${{ github.run_id }}
                  path: |
                      frontend/test-results/
                      frontend/playwright-report/
                  retention-days: 14

            - name: Open issue on failure
              if: failure()
              uses: actions/github-script@v7
              with:
                  script: |
                      await github.rest.issues.create({
                        owner: context.repo.owner,
                        repo: context.repo.repo,
                        title: `Nightly suite failed -- ${new Date().toISOString().split('T')[0]}`,
                        body: `The nightly E2E suite failed on ${context.sha}.\n\nRun: ${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`,
                        labels: ['bug', 'ci']
                      });

            - name: Stop stack
              if: always()
              run: |
                  docker stop kima-nightly || true
                  docker rm kima-nightly || true
                  docker volume rm kima_nightly_data || true

```

## /.github/workflows/pr-checks.yml

```yml path="/.github/workflows/pr-checks.yml" 
name: PR Checks

on:
    pull_request:
        branches: [main]
        types: [opened, synchronize, reopened]

jobs:
    lint-frontend:
        name: Lint Frontend
        runs-on: ubuntu-latest
        steps:
            - uses: actions/checkout@v4

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: frontend/package-lock.json

            - name: Install dependencies
              working-directory: frontend
              run: npm ci

            - name: Run ESLint
              working-directory: frontend
              run: npm run lint

    typecheck:
        name: TypeScript Typecheck
        runs-on: ubuntu-latest
        steps:
            - uses: actions/checkout@v4

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: frontend/package-lock.json

            - name: Typecheck frontend
              working-directory: frontend
              run: |
                  npm ci
                  npm run typecheck

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: backend/package-lock.json

            - name: Typecheck backend
              working-directory: backend
              run: |
                  npm ci
                  npm run typecheck

    unit-tests:
        name: Backend Unit Tests
        runs-on: ubuntu-latest
        steps:
            - uses: actions/checkout@v4

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: backend/package-lock.json

            - name: Install dependencies
              working-directory: backend
              run: npm ci

            - name: Run Jest
              working-directory: backend
              # Exclude webhookEventStore -- it is an integration test requiring
              # a live PostgreSQL database. It runs in integration.yml instead.
              run: npm test -- --passWithNoTests --testPathIgnorePatterns="webhookEventStore"

    build-docker:
        name: Docker Build Check
        runs-on: ubuntu-latest
        needs: [lint-frontend, typecheck, unit-tests]
        steps:
            - uses: actions/checkout@v4

            - name: Set up Docker Buildx
              uses: docker/setup-buildx-action@v3

            - name: Build Docker image (no push)
              uses: docker/build-push-action@v5
              with:
                  context: .
                  push: false
                  tags: kima:pr-check
                  cache-from: type=gha
                  cache-to: type=gha,mode=max

```

## /.github/workflows/security.yml

```yml path="/.github/workflows/security.yml" 
name: Security Checks

on:
    pull_request:
        branches: [main]
    schedule:
        - cron: "0 2 * * 0"
    workflow_dispatch:

jobs:
    dependency-audit:
        name: Dependency Audit
        runs-on: ubuntu-latest
        steps:
            - uses: actions/checkout@v4

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"

            - name: Audit frontend (critical only)
              working-directory: frontend
              run: |
                  npm ci --ignore-scripts
                  npm audit --audit-level=critical

            - name: Audit backend (critical only)
              working-directory: backend
              run: |
                  npm ci --ignore-scripts
                  npm audit --audit-level=critical

    security-e2e:
        name: Security E2E Tests
        runs-on: ubuntu-latest
        timeout-minutes: 20

        steps:
            - uses: actions/checkout@v4

            - name: Set up Docker Buildx
              uses: docker/setup-buildx-action@v3

            - name: Build Docker image
              uses: docker/build-push-action@v5
              with:
                  context: .
                  push: false
                  load: true
                  tags: kima:security
                  cache-from: type=gha
                  cache-to: type=gha,mode=max

            - name: Start Kima stack
              run: |
                  docker run -d \
                    --name kima-security \
                    -p 3030:3030 \
                    -p 3006:3006 \
                    -v kima_security_data:/data \
                    kima:security
                  timeout 90 bash -c 'until curl -sf http://localhost:3030/api/health; do sleep 3; done'

            - name: Create test user
              run: |
                  TEST_USER="kima_e2e"
                  TEST_PASS="$(openssl rand -hex 20)"
                  echo "::add-mask::${TEST_PASS}"
                  echo "KIMA_TEST_USERNAME=${TEST_USER}" >> "$GITHUB_ENV"
                  echo "KIMA_TEST_PASSWORD=${TEST_PASS}" >> "$GITHUB_ENV"
                  KIMA_CONTAINER=kima-security \
                  KIMA_TEST_USERNAME="${TEST_USER}" \
                  KIMA_TEST_PASSWORD="${TEST_PASS}" \
                  bash scripts/create-e2e-user.sh

            - uses: actions/setup-node@v4
              with:
                  node-version: "22"
                  cache: "npm"
                  cache-dependency-path: frontend/package-lock.json

            - name: Install Playwright
              working-directory: frontend
              run: |
                  npm ci
                  npx playwright install --with-deps chromium

            - name: Run security spec
              working-directory: frontend
              run: npx playwright test tests/e2e/security.spec.ts --reporter=list
              env:
                  KIMA_TEST_USERNAME: ${{ env.KIMA_TEST_USERNAME }}
                  KIMA_TEST_PASSWORD: ${{ env.KIMA_TEST_PASSWORD }}
                  KIMA_UI_BASE_URL: http://127.0.0.1:3030

            - name: Upload results on failure
              if: failure()
              uses: actions/upload-artifact@v4
              with:
                  name: security-test-results-${{ github.run_id }}
                  path: frontend/test-results/
                  retention-days: 7

            - name: Stop stack
              if: always()
              run: |
                  docker stop kima-security || true
                  docker rm kima-security || true
                  docker volume rm kima_security_data || true

```

## /.gitignore

```gitignore path="/.gitignore" 
# Environment & Secrets
.env
.env.*
!.env.example
*.local
.roomodes

# Dependencies
**/node_modules/
**/__pycache__/
*.py[cod]
*$py.class
*.so
.Python
venv/
.venv/
**/.venv/

# Build Outputs
frontend/.next/
frontend/out/
backend/dist/
**/dist/
**/build/
**/out/
.next

# Logs
logs/
*.log
npm-debug.log*
yarn-debug.log*
pnpm-debug.log*
backend/logs/
frontend/logs/

# Testing & Coverage
coverage/
*.lcov
.nyc_output
**/playwright-report/
**/test-results/

# Temporary & Backup Files
*.tmp
*.temp
*.bak
*.old

# Cache
.cache
.eslintcache
.stylelintcache
.rpt2_cache_*/
.ruff_cache/

# Docker
docker-compose.override.yml
docker-compose.local.yml
**/volumes/
**/data/

# IDEs & Editors
.vscode/
.idea/
*.iml
*.sublime-workspace
*.sublime-project
*.swp
*.swo
*~
\#*\#
.\#*
*.code-workspace

# OS Files
.DS_Store
Thumbs.db
Desktop.ini

# Runtime & Process
pids/
*.pid
*.seed
*.pid.lock

# Database
*.sqlite
*.sqlite3
*.db
*.db-shm
*.db-wal
**/prisma/dev.db
**/prisma/dev.db-journal

# TypeScript
*.tsbuildinfo

# Package Manager
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
pnpm-lock.yaml

# Secrets & Keys
*.keystore
*.jks
keystore.b64
**/key.txt
*.conf
lidify.keystore

# Backend
backend/cache/
**/cache/covers/
**/cache/transcodes/
**/mullvad/

# Frontend
frontend/android/
frontend/test-*.tsx

# Research/Scraping Cache
**/.firecrawl/

# AI Tools & Assistants
.claude/
**/.claude/
!.claude/commands/
.cursor/
**/.cursor/
.roo/
**/.roo/
.aider*
.serena/
**/.serena/
.opencode/
**/.opencode/
.vibe/
**/.vibe/
.claudeignore
CLAUDE.md
AGENTS.md

# Internal Documentation (public docs are in project root)
docs/
**/docs/
context_portal/
issues/
plans/
planning/
systems/

# Dev test scripts
/e2e/

# Release/Deployment Artifacts
RELEASE_NOTES_*.md
RELEASE_*_COMMANDS.sh
DEPLOYMENT_COMMANDS_*.txt
*_RELEASE_SUMMARY.md
CODE_REVIEW_*.md

# Local stress tests
tests/stress/

# Development
.worktrees/
reset-and-setup.sh
organize-singles.sh
COMMIT_MESSAGE.txt
ISSUE_REVIEW.md
backend/backend/
postman/
soularr/

# Legacy (removed from repo)
/App.tsx
/app.json
/src/

# Allow README files
!README.md
!readme.md

# Claude Code internal task tracking
.task/

```

## /CHANGELOG.md

# Changelog

All notable changes to Kima will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [Unreleased] - nightly

## [1.7.11] - 2026-04-08

### Fixed

- **Docker build failure on linux/arm64 (v1.7.10 hotfix)**: The torch/torchaudio/torchvision pins added in v1.7.10 (PR #178) used the `+cpu` local version suffix, which exists on the pytorch CPU index for amd64 but not for arm64 at torch < 2.6.0. The arm64 build step 7/41 failed with `ERROR: Could not find a version that satisfies the requirement torch==2.5.1+cpu`. Dropped the `+cpu` suffix -- PEP 440 matches versions without a local tag against any local variant, so the same pin resolves to `2.5.1+cpu` on amd64 and `2.5.1` on arm64.
- **CLAP model not loading (#165, properly this time)**: The v1.7.6 fix closed this issue but was actually dead code -- it edited `services/audio-analyzer-clap/requirements.txt`, but the main Dockerfile only copies `analyzer.py` from that directory and never installs from requirements.txt. CLAP was broken in every release from when the feature shipped until v1.7.10's pins landed (which would have fixed it if the arm64 build had not failed). Root cause: unpinned `torch torchaudio torchvision` let pip's resolver install mismatched versions (torch 2.5.1+cpu paired with torchaudio 2.11.0+cpu -- ABI-incompatible), and unpinned scipy/pandas were installed at latest versions needing numpy>=1.26, which tensorflow-cpu 2.13 then downgraded to 1.24.3, breaking scipy's `from numpy.exceptions import AxisError`. Fixed by the torch+cpu suffix correction above plus PR #178's (now-effective) numpy/scipy/pandas pins. Verified by reproducing the full CLAP + transformers + laion_clap + tensorflow + essentia import chain in the published v1.7.9 image and confirming the fix resolves all errors.

## [1.7.10] - 2026-04-07

### Added

- **Disc numbers and subtitles for multi-disc albums (#157, #170)**: The scanner now reads `disk.no` and `discsubtitle` tags from file metadata. Album views group tracks by disc with a header when multiple discs are present, and show compact `1-05` / `2-08` track labels for multi-disc releases. Single-disc albums are unchanged. Subsonic clients receive the `discNumber` attribute on songs and tracks are ordered by `[disc, trackNo]` across every code path (library, offline, share, and all Subsonic endpoints). A new additive migration adds two nullable columns (`Track.discNumber`, `Track.discSubtitle`) -- no rescan is required, but rescanning a library picks up the new fields. Thanks @loskutov.

### Fixed

- **Deezer preview CORS/ORB in hardened browsers (#173, #178)**: Previews were failing in LibreWolf, hardened Firefox, and Brave because Deezer's CDN doesn't send cross-origin headers and strict browsers block the direct `<audio>` load via Opaque Response Blocking. Previews now route through new backend proxy endpoints (`/api/artists/preview/.../stream` and `/api/playlists/.../preview/stream`) so audio loads same-origin with proper `Cache-Control: no-store`, 10s upstream timeout, and upstream-stream cleanup on client disconnect. The cached 24h preview URL lookup is bypassed for streaming (using a new `getFreshTrackPreview()` helper) so the Akamai token expiry that caused #172 no longer bites playback. The ownership check on pending-track preview uses a single generic "not found" message for both missing and wrong-playlist cases to avoid leaking existence of other users' track IDs. Thanks @cachamber.
- **Artist sorting ignores "The" and is now case-insensitive (#174, #175)**: Library artist sort used raw SQL ordering with no normalization, so "alt-J" sorted to the end and "The Beatles" sorted under T. Both backend (`backend/src/routes/library/artists.ts` via parameterized `Prisma.sql`) and frontend (`frontend/app/collection/page.tsx`) now strip a leading "The " (case-insensitive) and compare case-insensitively, with a stable ID tiebreaker. Server-side ordering matches the UI and is applied before pagination. Thanks @cachamber.
- **Album track order regression for NULL disc numbers**: The new `orderBy [discNumber asc, trackNo asc]` in #170 relied on Prisma's default sort behavior, which in Postgres puts NULLs LAST. On upgrade-without-rescan, all tracks have `discNumber = NULL`, which stayed correct; but for partially-rescanned libraries where some tracks gained a disc number and others did not, NULL-disc tracks would end up dumped at the end of the album instead of interleaved in track order. All six orderBy call sites (library/albums, offline, share, and three in subsonic/library) now use `{ sort: "asc", nulls: "first" }` to preserve pre-migration order.
- **Finamp removed from README client list (#167)**: Finamp is a Jellyfin-only client and does not support the Subsonic/OpenSubsonic API. The README's two mentions of it were incorrect. Thanks @Chaphasilor.

### Changed

- **Docker build: pin CLAP/PyTorch stack to known-compatible versions (#178)**: Docker image now pins `torch==2.5.1+cpu`, `torchaudio==2.5.1+cpu`, `torchvision==0.20.1+cpu`, `numpy==1.24.4`, `scipy==1.10.1`, `pandas==2.0.3` and adds a build-time sanity check that imports `torch`, `torchaudio`, `numpy`, `scipy`, `pandas`, `laion_clap`, and `transformers.BertModel`. Prevents silent ML dependency resolution drift between builds. Thanks @cachamber.
- **Security audit cleanup**: Removed ~553 lines of stale/vulnerable transitive dependencies from `backend/package-lock.json` and `frontend/package-lock.json` (4 high/1 critical backend, 3 high frontend). Lockfile-only changes, no package.json dependency bumps.
- **Preview stream cleanup**: Both Deezer preview proxy endpoints now destroy the upstream axios stream on client disconnect (`res.on("close", ...)`), matching the existing audiobook stream pattern.

## [1.7.9] - 2026-04-06

### Fixed

- **Multi-track audiobooks auto-complete after first file**: Audiobooks stored as multiple audio files (e.g., one MP3 per CD/chapter) were being marked `isFinished=true` and stopping playback as soon as the first file ended -- typically within a few minutes -- with no way to resume. Root cause: `streamAudiobook()` hardcoded `tracks[0]`, and the frontend's `handleEnded` handler treated any file end as the entire book finishing. Fix: backend now accepts a `trackIndex` query parameter and exposes the full `tracks[]` array (with `startOffset` and `duration`) in the audiobook detail response; frontend player tracks the active file, advances to the next track on `ended`, handles cross-track seeks, and applies a `trackOffset` so all displayed/saved positions reflect total-book time. Single-file audiobooks are unaffected.

## [1.7.8] - 2026-03-31

### Fixed

- **iOS PWA background audio: Control Center pause/resume produces no sound (WebKit #261858)**: Standalone PWAs on iOS use WKWebView, which suspends the audio session when backgrounded and cannot reactivate it from Control Center — `play()` resolves but produces no sound. Confirmed via on-device debugging with ios-webkit-debug-proxy. Fix: iOS "Add to Home Screen" now creates a Safari bookmark (display: browser) instead of a standalone WKWebView app, giving full audio session support. Desktop and Android retain the standalone PWA experience. Dynamic manifest route serves platform-appropriate display mode based on User-Agent.

## [1.7.7] - 2026-03-31

### Fixed

- **iOS audio not resuming after notification or Control Center pause**: On iPhone, audio would stop when a notification came in or when paused from Control Center, and tapping play showed "playing" but produced no sound. Root cause: the foreground recovery handler checked `isPlaying()` (always false after iOS interrupts audio) instead of tracking whether playback was interrupted by the system. Added three-layer recovery: (1) 1-second auto-resume timer for brief notification interruptions, (2) robust Media Session play handler that reloads the audio source on failure, (3) foreground recovery that detects system-interrupted state and resumes when app returns to focus. Also handles `AbortError` (iOS audio session invalidated) by reloading the source, and `NotAllowedError` by prompting the user to tap play.

## [1.7.6] - 2026-03-30

### Fixed

- SSRF blocking admin-configured integration URLs, CLAP numpy version floor (#165, #166)

## [1.7.5] - 2026-03-29

### Fixed

- Onboarding validation rejects disabled integrations, hide audiobooks when disabled (#162)

## [1.7.4] - 2026-03-21

### Fixed

- **Search returns no results for common words ("the", "a", etc.)**: PostgreSQL FTS drops English stop words, so `to_tsquery('english', 'the:*')` silently returns zero rows without raising an error -- meaning the ILIKE fallback never triggered. All three search paths (artists, albums, tracks) now fall through to ILIKE when FTS returns an empty result set.
- **`/health` endpoint returns no diagnostic detail**: Both `/health` and `/api/health` previously returned only `{"status":"ok"}` regardless of dependency state. They now ping PostgreSQL and Redis, return `uptime`, `version`, `db`, and `redis` fields, and respond with HTTP 503 when either dependency is unavailable.

### Changed

- **TypeScript strict mode enabled (frontend)**: `"strict": true` in `frontend/tsconfig.json`. Fixed 31 implicit-any and strictNullChecks errors across album/artist/podcast pages, MetadataEditor, AlbumHero, ArtistHero, ArtistActionBar, AvailableAlbums, useDiscoverData, LibraryAlbumsGrid, LibraryTracksList, UnifiedSongsList, CacheSection, useQueries, useTVNavigation.
- **Redis client consolidation**: Removed `redis` (node-redis v4) dependency entirely. All ~30 backend files now use `ioredis` exclusively. Eliminated the dual-client footgun. Pub/sub in `textEmbeddingBridge` rewritten to ioredis event-based API.
- **routes/library.ts split**: 4364-line god-file split into `routes/library/` sub-routers: `artists`, `albums`, `tracks`, `streaming`, `coverArt`, `scan`, `backfill`. Each file has a single clear responsibility. `library.ts` is now a 10-line re-export barrel.
- **services/programmaticPlaylists.ts split**: 3842-line god-file split into `services/mixes/` sub-modules: `genreMixes`, `moodMixes`, `timeMixes`, `discoveryMixes`, `helpers`, `index`. `programmaticPlaylists.ts` is now a 7-line re-export barrel.
- **Startup task consolidation**: Three concurrent IIFE startup tasks (audiobook sync, artist counts backfill, image backfill) consolidated into a sequential `runStartupTasks()` function to avoid overwhelming DB connections at startup.
- **Dead code removal**: Removed unused `ffmpeg-static` dependency (not imported anywhere); added `prebuild` script to clean `dist/` before each build.

### Fixed (internal)

- **enrichmentStateMachine tests**: Updated mock for `enrichmentStateService` to include `publishToChannel` method. Tests for Python analyzer C2 bridge now correctly assert against `publishToChannel` instead of raw Redis `publish` (which was never called after the ioredis consolidation refactored the channel through the service layer).

### Tests

- **Tier 1 backend test coverage added**: New test suites for `services/search.ts` (22 tests -- `queryToTsquery` logic, special char handling, search service), `routes/share.ts` (17 tests -- share token lifecycle, play counting, unauthenticated access), `routes/webhooks.ts` (14 tests -- Lidarr webhook events, secret validation, error handling), `services/musicScanner.ts` (49 tests -- pure parsing helpers, path normalization, metadata extraction), `routes/library/albums.ts` (17 tests -- pagination, filtering, delete). Total test count: 307 (up from 173).

## [1.7.3] - 2026-03-20

### Fixed

- **Cover art 404s after volume wipe or permission change (#149)**: Artist enrichment and album cover fetch now check if native cover files actually exist on disk before preserving stale DB paths. Added `nativeFileExists()` utility, `repairBrokenCovers()` service, and `POST /enrichment/repair-covers` endpoint to scan and clear broken cover paths for re-fetch.
- **ARM64 Docker builds missing core Python dependencies (#1)**: `tensorflow-cpu` has no Linux ARM64 wheels, and all pip installs were chained -- so tensorflow failure prevented redis, bullmq, and other core deps from installing. Split into three layers: core deps (always succeed), ML deps (torch/CLAP -- works on ARM64), and tensorflow/essentia (gracefully degrades on ARM64 with log message).
- **Misleading "Remove docker-compose.override.yml" message on non-Docker installs (#147)**: Replaced Docker-specific text with generic "Service not detected" message for Audio Analysis and Vibe Similarity on Proxmox LXC and bare metal installs.
- **Unused `artistHeroUrl` parameter in enrichAlbumCovers**: Cleaned up dead parameter that was passed but never used.

## [1.7.2] - 2026-03-18

### Fixed

- **Multi-container docker-compose: fresh install shows login instead of setup wizard (#158)**: Next.js rewrites were compiled at build time with `127.0.0.1:3006` baked in because `NEXT_PUBLIC_BACKEND_URL` was never passed as a build arg. In multi-container mode the frontend container can't reach `127.0.0.1:3006` (that's the backend container). Fix: added `NEXT_PUBLIC_BACKEND_URL` as a Dockerfile build arg, defaulting to `http://backend:3006` in docker-compose.yml. The reporter's CORS issue was a secondary symptom -- once the proxy works, all requests are same-origin.
- **Audiobookshelf sync silently skips books**: Books with no title returned early without throwing, but `syncAll` counted them as synced. Now `syncAudiobook` returns a boolean; skipped books increment `result.skipped` instead of `result.synced`.
- **Audiobookshelf sync notification hides failures**: The "Synced N audiobooks" notification now includes failed/skipped counts when non-zero.
- **Audiobookshelf cover download can hang indefinitely**: `downloadCover()` had no fetch timeout. Added `AbortSignal.timeout(10_000)` to prevent a single slow cover from stalling the entire sync.
- **Audiobookshelf book size includes non-audio files**: Changed `book.size` to `book.media?.size` for audio-only size.

### Added

- **Sync button on audiobooks page**: Users can now sync audiobooks directly from the audiobooks page without going through settings or triggering a full enrichment cycle. Shows synced/failed/skipped counts in a toast and refreshes the grid automatically.

## [1.7.1] - 2026-03-17

### Fixed

- **Playlist track count not updating live**: Adding a track to a playlist from the album page or library (collection) no longer requires a page refresh. The React Query cache is now invalidated at all three `addTrackToPlaylist` call sites (`album/[id]/page.tsx`, `useLibraryActions`, `useAddToPlaylistMutation`). Closes #157.
- **Settings: stale restart-required modal removed**: The "Restart Required" modal was a remnant of an old architecture -- services reinitialize live when settings are saved. The modal and all supporting state (`changedServices`, `originalSettings`) have been removed. Closes #158.
- **YouTube / YouTube Music playlist import broken**: `/api/browse/playlists/parse` now handles `youtube.com` and `music.youtube.com` URLs in addition to the short `youtu.be` form. Closes #155.
- **Lidarr MBID mismatch (album MBID sent to artist endpoint)**: `verifyArtistName` was incorrectly receiving the album MBID from Lidarr search results and passing it to the MusicBrainz `/artist/{id}` endpoint, causing 404s on every verification. Closes #156.
- **Hardcoded port-3030 in API base URL detection**: `getApiBaseUrl` no longer hard-codes port 3030 as the "frontend" port. It now returns a relative URL by default, making Kima work behind any reverse proxy or non-standard port mapping. Closes #154.

### Changed

- Dead code removed: unused `setIsBulkAdd` / `setIsAddingToPlaylist` state on album page; unused `useSearchParams()` call in settings page.

## [1.7.0] - 2026-03-16

### Added

- **Vibe 3D galaxy view**: New immersive "galaxy" toggle on the Vibe page renders your library as a navigable 3D star field using React Three Fiber + Three.js. Tracks are positioned by their vibe embeddings, lit by a dynamic space grid with depth lines, and colored by mood. Double-click any point to play. Camera uses map controls with smooth zoom and pan. Rendered in a dedicated `GravityGridScene` with cluster label billboards, optimized point instancing, and post-processing disabled for GPU performance.
- **Vibe map pre-compute and KNN acceleration**: Map data is now pre-computed on the backend at enrichment completion rather than on first page load. KNN graph is built incrementally (100-at-a-time) against a pre-sorted index, cutting cold-start time from seconds to milliseconds on large libraries.
- **GitHub Actions CI pipeline**: Full multi-stage CI across all PRs and pushes to main. Stages: lint + typecheck (frontend + backend), unit tests (Jest), security scan (CodeQL + secrets), E2E predeploy tests (Playwright against a real Docker build), nightly build + push to Docker Hub and GHCR.
- **E2E enrichment cycle spec**: Playwright test that wipes all enrichment data, triggers a full re-enrich, polls until completion, then asserts track success rate >= 80%, vibe map populated, vibe search returning results, and failure rate < 20%. Skips gracefully when the library has fewer than 10 tracks (CI containers). Companion shell script (`scripts/run-enrichment-memory-test.sh`) captures host SUnreclaim and container RSS before/after and fails the run if slab growth exceeds 1 GB.
- **E2E vibe spec**: Playwright test covering vibe map load, track selection, song path generation, vibe search, alchemy (track blending), and similar-tracks suggestions.
- **PWA offline fallback**: Service worker now serves a cached offline page when navigation requests fail. Cached on install alongside the app shell.
- **PWA update banner**: App detects when a new service worker is waiting and shows a non-blocking "Update available" banner. Clicking it activates the new worker and reloads.
- **Web app manifest fixes**: `display_override` with `window-controls-overlay` for desktop PWA title bar space. Corrected `scope`, `start_url`, and `id` fields. Added `edge_side_panel` and screenshot metadata.

### Fixed

- **Enrichment: audio counter stale at completion**: `audio.completed` in the Redis state snapshot was frozen at the value from the last `audioQueued > 0` cycle. Once all tracks were queued, the update block was skipped while Essentia continued processing in the background, leaving the counter stuck (e.g. 171/232). The counter is now flushed from the live DB count at the `isFullyComplete` transition before going idle.
- **Security: double authentication in system settings**: `requireAdmin` already enforces authentication, so the redundant `requireAuth` call before it was removed.
- **Security: OOM from Next.js VMA accumulation**: The App Router's map-style routes allocate ~15 TB of virtual memory, causing `anon_vma_chain` slab exhaustion on the host kernel. Mitigations: Docker memory cap (`mem_limit: 6g`, `memswap_limit: 8g`) in both compose files; Node 22 in the Dockerfile (V8 improvements reduce anonymous VMAs); `MALLOC_ARENA_MAX=1` for the frontend process (fewer glibc arenas); `NODE_OPTIONS=--max-old-space-size=512` for the frontend (bounds heap to reduce GC-driven VMAs).
- **Security: DoS vectors patched**: Unbounded enrichment queue, unguarded bulk endpoints, and routes missing rate limits addressed in the production hardening pass.
- **Security: SSRF vector in webhook callback validation**: Callback URLs are now validated against an allowlist of configured hosts before being followed.
- **Vibe map accuracy**: SLERP interpolation corrected, similarity formula made consistent between search and map. Distinct mood colors assigned per cluster. ARIA labels added to interactive elements.
- **Vibe map timer leak**: `setInterval` polling the map data endpoint was not cleared on unmount. Replaced with React Query cache with explicit `staleTime`.
- **Vibe request ref cleanup**: `vibeRequestRef` was not cleaned up on component unmount, causing stale abort signals on re-mount.
- **Mobile: full-height vibe page rendering**: CSS height chain corrected from `html`/`body` through layout to vibe page so the map fills the viewport without overflow or clipping on mobile.
- **Subsonic: `.view` suffix normalization**: Requests from clients that append `.view` to endpoint names (e.g. `getCoverArt.view`) are now normalized before routing. Expanded OpenSubsonic endpoint compatibility.
- **Service worker: stale styles after rebuild**: SW was caching HTML and CSS documents, causing users to see outdated styles until manual cache clear. HTML and CSS are now excluded from the SW cache entirely.
- **dead `sourceTrack` variable**: Removed dead variable from vibe request path.
- **Abort signal not wired to vibe fetch**: Cancelling a vibe search now properly aborts the in-flight fetch.

### Changed

- **vibe-test prototype page removed**: The `/vibe-test` dev sandbox page and its associated R3F prototype components (`VibeUniverse`, `TrackCloud`, `TrackTooltip`, `universeUtils`) have been removed. The production galaxy view lives in `GravityGridScene`.
- **NowPlayingTab simplified**: Removed legacy MusicCNN sections from the vibe sidebar -- radar chart (recharts), mood spectrum bars, audio features grid (BPM/key/energy/danceability/valence/arousal), and match score badge. These all showed "--" because `audioFeatures` is no longer populated since the vibe system moved to CLAP. The tab now shows album art and track info only.
- **README**: Vibe section rewritten with Map, Galaxy, Drift, and Blend subsections and fresh screenshots. All 19 screenshots retaken at 1440x900 desktop / 390x844 mobile with correct logo rendering.

### Removed

- `recharts` dependency (zero usages after NowPlayingTab cleanup).
- Dead dev artifacts: `backend/test_dedup_manual.ts`, `backend/src/scripts/testDataCleanup.ts`.

## [1.6.4] - 2026-03-12

Fixes #152. Addresses #145.

### Fixed

- **Version display (#152)**: Frontend and backend package.json were not bumped for 1.6.3. Both now read from package.json dynamically
- **Long track playback stalling (#145)**: Added playback watchdog that monitors currentTime progress every 1s. If no advancement for 3s while playing, automatically reloads the stream at the saved position and resumes playback. Supplementary stalled event listener with 10s grace timer as fallback recovery path. Recovery limited to 3 attempts before surfacing an error
- **Page refresh kills playback (#145)**: After a page refresh, pressing play now reloads the stream for the restored track/audiobook/podcast instead of doing nothing. Audiobook and podcast progress is seeked to saved position
- **Network retry improvements**: Exponential backoff (1s/2s/4s) replaces linear (2s/4s), max retries bumped from 2 to 3. Network retries and stall recoveries now auto-resume playback instead of requiring manual "Tap play to resume"
- **Stream cache validation**: Added ETag and Last-Modified headers to audio streaming responses, enabling browser If-Range optimization for efficient reconnection after stalls

### Changed

- **Centralized User-Agent version**: All backend HTTP clients now use a shared USER_AGENT constant from config.ts (derived from package.json) instead of hardcoded version strings scattered across 12 files

## [1.6.2] - 2026-03-05

Closes #32. Partially addresses #25, #90, #124, #139.

### Added

- **Skip MusicBrainz when Lidarr disabled (#90, #124)**: Playlist imports no longer call MusicBrainz for MBID resolution when Lidarr is not configured. Soulseek searches by artist+album+track text and never uses MBIDs, so MB API calls were pure waste for Soulseek-only users. A 170-song import that took ~15 minutes now generates its preview in seconds. Albums without MBIDs route directly to Soulseek instead of being blocked or misrouted to track-based acquisition.
- **Import cancellation with AbortSignal**: Cancelling a playlist import now immediately aborts all in-flight and queued Soulseek searches and downloads. Previously, `cancelJob()` only marked DB records as failed while rate-limiter-queued searches continued executing for minutes. AbortSignal threads from `cancelJob()` through the PQueue album pipeline, acquisition service, rate limiter, search strategies, and download retry loop.
- **Background playlist imports**: Importing a playlist URL no longer navigates to a full-page progress screen. Imports fire in the background with a toast notification, and the user stays on their current page. Completion, failure, and cancellation show toast notifications via SSE events.
- **Import URL dedup**: Submitting the same playlist URL while an import is already active returns the existing job instead of creating a duplicate. URLs are normalized (host + pathname, trailing slashes stripped) for reliable matching.
- **Imports management tab**: New "Imports" tab in the Activity Panel shows all active and past imports with real-time progress bars, status badges, cancel buttons, and links to created playlists.
- **Import page reconnect**: Refreshing `/import/playlist` while an import is running reconnects to the active job's progress instead of showing a blank form.
- **Early playlist name resolution**: Quick imports now fetch the real playlist name from Spotify/Deezer before enqueueing, so the Imports tab shows the actual name immediately instead of a generic placeholder.

- **Playlist action hub**: Create, Import URL, Import File (M3U), and Browse buttons directly on the playlists page. No more navigating through Browse to import.
- **Sidebar create playlist**: The "+" button in the sidebar now opens an inline create dialog instead of navigating away.
- **M3U playlist import**: Upload `.m3u` / `.m3u8` playlist files to create playlists by matching entries against your library. 4-tier matching: file path, filename, exact metadata, fuzzy metadata (fuzzball).
- **Multi-playlist add**: The "Add to Playlist" picker in the full player now supports selecting multiple playlists at once with checkboxes and a confirm button. Existing single-select callers are unchanged.
- **Playlist visibility toggle**: Globe/Lock button on the playlist detail page lets owners toggle public/private visibility. Previously required database editing for imported playlists.
- **BullMQ import queue**: Playlist imports (Spotify, Deezer, M3U) now run via a dedicated `playlist-import` BullMQ queue instead of fire-and-forget async. Provides crash recovery, visibility in Bull Board admin panel, and proper queue semantics.
- **Podcast refresh buttons**: RefreshCw button on podcast detail page checks for new episodes. "Refresh All" button on main podcasts page queues refresh for all subscriptions via BullMQ.
- **Custom RSS feed subscription**: "Add RSS Feed" button on the main podcasts page lets users subscribe to any podcast by pasting a direct RSS feed URL, without needing to find it on Apple Podcasts.
- **Conditional GET for feed refresh**: Podcast feed fetches now send `If-Modified-Since` and `ETag` headers, receiving 304 Not Modified when feeds haven't changed. Reduces bandwidth and server load for hourly auto-refresh.

### Changed

- **Route rename**: `/library` is now `/collection` (redirects preserved). `/import/spotify` is now `/import/playlist` (redirects preserved with query params).
- **Onboarding simplified to 2 steps**: Removed the informational step 3 (enrichment/analysis features). Onboarding is now Account + Integrations, with "Complete Setup" finishing directly from step 2.
- **Smoother sync progress bar**: SSE events now emit every 1% instead of 2%, and polling fallback tightened from 2s to 500ms. Progress bar reflects real scan data at higher resolution.
- **Import cancel cleanup**: Cancelling an import now fully removes all DB records, Redis cache entries, and BullMQ jobs. Failed imports with zero matched tracks also clean up automatically. Partial failures preserve matched tracks.

### Fixed

- **Security: hardcoded Last.fm API key removed**: Default fallback API key removed from source code. `LASTFM_API_KEY` environment variable is now required for Last.fm enrichment.

### Removed

- Dead code cleanup: removed 3 unused service files (`openai.ts`, `fileValidator.ts`, `Skeleton.tsx`), 16 unused exports across utils/middleware/workers, and debug console.logs from Soulseek search hook.

- **Spotify 100-track pagination**: Anonymous Spotify tokens cap `tracks.total` at 100, preventing pagination from triggering. Now speculatively fetches additional pages when a full page of results is received, bypassing the cap for playlists of any size.
- **Playlist partial update schema**: `PUT /playlists/:id` previously required `name` in every request body (using create schema). Now uses a dedicated update schema where both `name` and `isPublic` are optional, supporting partial updates without resetting unrelated fields.
- **Artist MBID race condition**: Concurrent enrichment workers could both check that an MBID was free, then both try to claim it, crashing the second worker with a unique constraint violation. All four MBID write sites now catch Prisma `P2002` errors and gracefully skip the MBID update while preserving other enrichment data.
- **Double import on page refresh**: Refreshing `/import/playlist` while an import was running fired a second import for the same URL. Removed auto-start behavior; the page now checks for active imports and reconnects to them.

## [1.6.1] - 2026-03-03

Closes #121, #125, #136, #138. Partially addresses #139, #25, #108, #30.

### Added

- **Share links**: Generate shareable URLs for playlists, tracks, and albums. Public playback page with built-in audio player, no account required. Token-based access with optional expiry and play count limits. Share popover in playlist page with copy-to-clipboard and revoke.
- **Playlist inline rename**: Click playlist title to edit in place. Enter to save, Escape to cancel, click-away to save. Input stays open on save failure for retry.
- **Player queue and add-to-playlist buttons**: Queue navigation button and add-current-track-to-playlist button in the full player bar.
- **Local artist images**: Library scanner discovers `artist.jpg`/`folder.jpg`/`.png`/`.webp` in music directories and copies them to the image cache. Enrichment preserves local images over external URLs.
- **Playback queue expanded to 2000 items**: Queue storage increased from 100 to 2000 tracks (frontend and backend).
- **GHCR publishing**: Docker images now published to GitHub Container Registry alongside Docker Hub on tagged releases. Credit to @SupremeMortal (#48).
- **#134 Lidarr batch album fetching**: Large Lidarr libraries no longer crash with V8 string overflow -- albums are fetched in paginated batches. Credit to @cachamber.
- **#132 Preview volume sync**: Preview audio volume now syncs with the global player volume. Credit to @cachamber.
- **Safari audio session hint**: Explicitly sets `navigator.audioSession.type = "playback"` on Safari 16.4+ to ensure the correct AVAudioSession category before first playback.

### Fixed

- **Security: path traversal in cover art serving**: `getLocalImagePath` and `getResizedImagePath` lacked path containment checks. Added `path.resolve` + `startsWith` guards matching the existing `validateCoverPath` pattern. Removed dead `localImageExists` function.
- **Security: share stream missing error handlers**: Raw `createReadStream.pipe(res)` replaced with `streamFileWithRangeSupport` utility for proper stream error handling and file descriptor cleanup on client disconnect.
- **Security: global JSON body limit too broad**: 5mb limit applied to all routes. Replaced with conditional middleware -- 5mb for playback state only, 1mb for everything else.
- **Enrichment: manual enrich overwrites local artist images**: `applyArtistEnrichment` unconditionally replaced `heroUrl` with external URLs. Added DB re-read + native path guard matching the background worker.
- **Enrichment: stale heroUrl reference in download fallback**: Removed misleading `artist.heroUrl` check on stale function parameter. The downstream DB re-read handles native path preservation.
- **Scanner: wrong artist image in deep directory structures**: Directory iteration went shallow-to-deep, matching genre-level `folder.jpg` before artist-level. Reversed to deep-to-shallow.
- **UI: playlist rename and add-to-playlist fail silently**: Added try/catch with toast errors. Rename input stays open on failure for retry.
- **Mobile: double-tap to play tracks not working**: `onDoubleClick` on track rows does not fire on touch devices. Added `touch-action: manipulation` and custom double-tap detection via `onTouchEnd` with 300ms window across all 7 track list components. Desktop double-click preserved.
- **Mobile lyrics: text clipped by album art container**: Lyrics crawl rendered above album art but was clipped by the parent's `overflow-hidden`. Replaced with a full lyrics view that swaps out the album art when active. Synced lyrics auto-scroll to the active line; plain lyrics are freely scrollable.
- **Enrichment: audio analysis and vibe embeddings running simultaneously**: Both ML models (Essentia + CLAP) competed for CPU/GPU, causing UI flickering. Vibe phase now defers until audio analysis is fully idle. Removed per-track vibe job queuing from the audio completion subscriber -- `executeVibePhase` sweep is now the sole queuing path.
- **UI: activity panel reopens after closing**: `useEffect` dependency on the full `activityPanel` object caused event listener teardown/re-register on every open/close. Destructured to stable `useCallback` refs.
- **UI: silent failures on playlist operations**: `handleRemoveTrack`, `handleToggleHide`, `handleRemovePendingTrack`, and `handleDeletePlaylist` caught errors with only `console.error`. Added `toast.error` to all four.
- **Player: dead `handleSeek` wrappers**: Removed pass-through wrappers in FullPlayer, OverlayPlayer, and MiniPlayer. `seek` passed directly to `SeekSlider`.
- **Artist page popular tracks**: Improved title matching with three-tier fallback (exact, normalized, aggressively stripped) so remaster/deluxe variants match correctly as owned. Unowned tracks now show artist hero image instead of gray placeholder.
- **Card hover overlay regression**: Dark gradient overlays caused blackout effect on album art hover. Made overlay conditional on playable cards, softened opacity on grid cards.
- **Album navigation delay**: First click to album pages felt unresponsive due to `prefetch={false}` on all card Links. Enabled Next.js prefetching for instant navigation.
- **GHCR image name casing**: `github.repository_owner` preserves uppercase but GHCR requires all-lowercase. Compute image name at runtime with bash lowercase conversion.
- **#128 Subsonic rate limit too low for Symfonium sync**: Large libraries (2000+ songs) hit the 300 req/min rate limit during Symfonium sync. Bumped to 1500 req/min -- self-hosted service behind auth, no brute-force risk.
- **Mobile: lock screen always shows "playing" / steals Bluetooth/CarPlay**: Removed the silence keepalive system that looped near-silent audio to maintain the OS audio session while paused.
- **Mobile: resumeWithGesture shows "playing" when blocked by OS**: Now awaits confirmation and reverts on failure.
- **Audiobook progress overwritten on track end**: Completion flag was immediately overwritten by the pause-triggered progress save. Fixed ordering.
- **Duplicate "play" event firing**: Now emits only on `playing` (when audio is actually producing sound).
- **MediaSession metadata unnecessary re-renders**: Removed `isPlaying` from metadata effect deps.
- **Mobile: lock screen stuck on "playing" after errors**: Added `error` event to MediaSession playbackState listeners.
- **Mobile: audio stops silently in background**: Network retry now emits proper error for UI recovery.
- **Mobile: foreground recovery too narrow**: Clears error on foreground return.
- **Podcast progress bar reverts on pause**: Now updates React state after API save.
- **Mobile: permanent pause after phone call/Siri**: Tracks pre-interruption state and attempts auto-resume.
- **Enrichment: `isPaused` permanently stuck after Stop**: Moved `isStopping` handler to top of cycle.
- **Enrichment: vibe re-run doesn't restart cycle**: Now calls `triggerEnrichmentNow()` and cleans completed BullMQ jobs.
- **Enrichment: BullMQ jobId dedup silently drops re-queued vibe tracks**: Added `vibeQueue.clean(0, 0, 'completed')` before `addBulk()`.
- **Enrichment: stale failure records inflate "View Failures" count**: CLAP analyzer resolves failures on success.

### Removed

- **Swagger API documentation**: Removed `swagger-jsdoc`, `swagger-ui-express`, and all 16 `@openapi` annotations. 30 packages eliminated.
- **Debug logging**: Removed 20 debug `console.log`/`console.warn` statements.
- **Unused dependencies**: Removed `react-virtuoso`, `silence-keepalive.ts`, dead `pauseRef`.

### Changed

- **Audio state context cleanup**: Removed unused exports `isRepeat`, `lastServerSync`, `setLastServerSync`, `isHydrated` from context type and provider value.

- **Frontend query keys standardized**: Raw `["playlist", id]` string arrays replaced with centralized `queryKeys` helpers across the playlist page.
- **Share API `entityType` typed**: Parameter typed as `"playlist" | "track" | "album"` union instead of `string`.
- **Playlist mutations use React Query**: Track removal and playlist deletion now use mutation hooks with automatic cache invalidation instead of direct API calls.
- **AuthenticatedLayout**: Public path matching changed from exact match to prefix match for `/share/*` routes.
- **Playlist import performance**: Parallelized MusicBrainz lookups via Promise.all. Batch-loaded all library tracks -- reduced ~3000 per-track DB queries to 2 batch queries.
- **Dependencies**: Updated safe patches -- @bull-board 6.20.3, axios 1.13.6, bullmq 5.70.1, ioredis 5.10.0, fast-xml-parser 5.4.1 (stack overflow CVE fix), tailwindcss 4.2.1, framer-motion 12.34.3, tailwind-merge 3.5.0. Fixed npm audit vulnerabilities.

## [1.6.0] - 2026-03-02

### Fixed

- **Enrichment: failure count inflation**: Python audio analyzer recorded EnrichmentFailure on every attempt, not just after max retries. Removed Python writer; Node.js audioAnalysisCleanup is now the sole writer. Added success resolution in `_save_results()` for immediate cleanup instead of hourly sweep lag.
- **Enrichment: isPaused permanently stuck after Stop**: Stop control message set `isPaused=true` which was never cleared because `shouldHaltCycle()` was unreachable from the early return. Moved `isStopping` handler to top of `runEnrichmentCycle()`. Added `userStopped` flag to prevent auto-restart via timer while allowing explicit re-run/enrich actions.
- **Enrichment: Stop doesn't reach Python analyzer**: `enrichmentState.stop()` only published to `enrichment:control`. Now also publishes `pause` to `audio:analysis:control` (not `stop`, which would exit the process). Resume publishes `resume` to both channels. All re-run functions resume the Python analyzer via `clearPauseState()`.
- **Enrichment: state sync stopping deadlock**: If `enrichment:control` message was lost but state service showed `stopping`, the sync set `isPaused=true` with no `isStopping` to clear it. State sync now handles `stopping` directly by transitioning to idle.
- **Enrichment: reverse sync for missed resume**: If local `isPaused` was stale but state service showed `running`, the cycle stayed paused. Added reverse sync to detect and clear the mismatch.
- **Enrichment: crash recovery gaps**: Startup now resets artists stuck in `enriching` status and tracks with `_queued` sentinel in `lastfmTags`, in addition to existing audio/vibe processing resets.
- **Import: duplicate playlists on large imports**: `checkImportCompletion()`, `buildPlaylistAfterScan()`, and `buildPlaylist()` lacked idempotency guards. Late download callbacks and queueCleaner re-queued scans that each created a new playlist. Added status guards at all three layers.
- **Import: processImport overwrites cancel**: Setting `status="downloading"` without checking if already cancelled. Added cancel guard.
- **Enrichment failures: TOCTOU race in recordFailure**: Find-then-create pattern replaced with atomic `prisma.enrichmentFailure.upsert()`. Also resets `resolved=false` on re-failure (previously hidden from UI).
- **Enrichment failures: Python/Node.js Track status race**: Added `WHERE analysisStatus='processing'` optimistic lock to `_save_results()` and `_save_failed()`. Prevents stale writes when cleanup resets a track near the 15-minute threshold.
- **Discovery: duplicate Discover Weekly jobs**: `discoverQueue.add()` now uses deterministic `jobId` based on userId + week, preventing cron/manual trigger overlap.
- **Discovery: checkBatchCompletion race**: Re-reads batch status after 60s Lidarr wait. Added `expectedStatus` parameter to `updateBatchStatus` optimistic locking for belt-and-suspenders protection.
- **Discovery: album status reset on regeneration**: `discoveryAlbum.upsert()` update branch no longer sets `status: "ACTIVE"`, preserving user's LIKED/DELETED decisions.
- **Scanner: ownedAlbum duplicate constraint violation**: Replaced `create()` with `upsert()` using compound key.
- **Streaming: transcodedFile duplicate constraint violation**: Replaced `create()` with `upsert()` on `cachePath`.
- **Downloads: notification retry creates duplicates**: Added dedup check before `downloadJob.create()` at all 3 retry handlers.
- **Webhook: unnecessary Lidarr API calls**: Skip reconciliation when no processing download jobs exist.
- **Infrastructure: audio-analyzer supervisor autorestart**: Changed from `unexpected` to `true` (matching backend fix).
- **Infrastructure: Redis startup race**: Added Redis readiness loop to `wait-for-db.sh` with separate counter. Backend supervisor changed to `autorestart=true`.
- **Python: deprecated datetime.utcnow()**: Replaced with `datetime.now(timezone.utc)`.

### Added

- 37 new tests across 9 test files covering enrichment state machine, idempotency guards, queue dedup, notification dedup, and Python optimistic locking.

## [1.6.0-pre.2] - 2026-03-01 (nightly)

### Fixed

- **Enrichment: vibe progress jumps 0% to 100%**: CLAP analyzer reported completion via internal HTTP callbacks but never emitted SSE events. Added `enrichment:progress` SSE event type with broadcast support (`userId: "*"`), emitted from vibe success/failure endpoints. Frontend SSE handler invalidates the `enrichment-progress` query on each event for immediate re-fetch.
- **SSRF protection**: Added `validateUrlForFetch()` to podcast stream and download paths to block requests to internal networks.
- **CORS enforcement**: Reject unlisted CORS origins instead of allowing all.
- **Encryption KDF**: Always derive encryption key via SHA-256 with legacy fallback.
- **Query limits**: Clamp `/plays` limit to max 200.
- **Webhook secret comparison**: Use `crypto.timingSafeEqual` for timing-safe webhook secret validation.
- **Webhook log spam**: Rate-limit missing webhook secret warning to once per process.
- **Stream TTL sweep**: Add 1-hour TTL sweep for stale `activeStreams` entries.
- **Transcode race condition**: Deduplicate concurrent transcodes via in-flight map.
- **Streaming singleton**: Make `AudioStreamingService` a singleton to prevent duplicate instances.
- **Enrichment reset**: Exclude processing tracks from full enrichment reset.
- **Image cache eviction**: Add LRU eviction to `useImageColor` localStorage cache (max 500 entries).
- **Preview audio leak**: Remove old preview audio elements from map when switching tracks.
- **Keyboard shortcut re-renders**: Move keyboard shortcut deps to refs for stable effect.
- **Player polling loop**: Move `lastServerSync`/queue/index/shuffle to refs in poll effect.
- **Queue desync on track removal**: Handle removing current track from middle of queue.
- **Overlay re-open on auto-advance**: Don't re-open overlay on auto-advance after first play.
- **Previous track restart**: Restart current track if position > 3s on previous button press.
- **Podcast detection**: Split podcast composite ID before URL comparison.

## [1.6.0] - 2026-02-28

### Added

- **Synchronized lyrics**: LRCLIB integration fetches timed `.lrc` lyrics during library scan. Full-player and overlay-player display synced lyrics with a 3-line stacked view (previous/current/next). Lyrics toggle in activity panel with owner-based priority so Discovery settings don't override an active lyrics view.
- **LRCLIB rate limiting**: Lyrics API calls go through the global rate limiter (2 req/s, concurrency 1) to respect upstream limits.

### Fixed

- **Enrichment pipeline**: Fixed 7 issues -- vibe re-run no-ops (dedup cache not cleared), completion notification never firing (dead in-memory counters replaced with DB query), infinite artist retry loop (final attempt reset status to pending), phantom state after shutdown, podcast failures excluded from counts, orphaned frontend type, and removed dead vibe reset endpoint.
- **Feature flags go stale**: Now polls every 60s instead of fetching once on mount.
- **Mood mixer threshold mismatch**: Frontend threshold now matches backend minimum (8 tracks).
- **iOS Safari audio playback**: Reset stale network retry count on preload swap, removed competing silence keepalive from resume gesture, guarded redundant `play()` calls, pre-set track ref for deterministic deduplication, and capped error cascade at 3 consecutive failures.
- **iOS AirPod/lock-screen resume**: Silence keepalive `prime()` in the MediaSession play handler consumed the iOS user gesture budget before the actual audio resume. Moved keepalive priming to the pause handler so the play handler's full gesture is available for `tryResume()`.
- **Audio analyzer retry loop**: Failed tracks had retry count reset to 0 on re-queue, bypassing the max-retries guard. Now preserves count so broken tracks are excluded after 3 attempts.
- **CLAP search timeouts**: Model unloaded after 10s idle when all tracks were embedded, causing ~20s cold-start on every vibe search. Now uses standard 5-minute idle timeout. Backend search timeout increased to 60s.

## [1.5.11] - 2026-02-27

### Added

- **#25** Full playlist pagination for Spotify and Deezer imports -- playlists of any size are now fully imported instead of silently capping at 100 (Spotify) or 25 (Deezer) tracks. Paginated fetch with rate limit handling, partial result recovery, and SSE progress reporting ("Fetching tracks: X of Y...").
- **#8** Configurable Lidarr quality and metadata profiles -- previously hardcoded to profile ID 1. New dropdowns in Settings > Download Services appear after a successful connection test, populated from Lidarr's API. Stored in system settings and used for all artist/album additions.

## [1.5.10] - 2026-02-27

### Added

- **#122** `DISABLE_CLAP=true` environment variable to disable the CLAP audio embedding analyzer on startup in the all-in-one container (useful for low-memory deployments)
- **#123** Foobar2000-style track title formatting in Settings > Playback -- configure a format string with `%field%`, `[conditional blocks]`, `$if2()`, `$filepart()` syntax; applied in playlist view
- **#124** Cancelling a playlist import now creates a partial playlist from all tracks already matched to your library, instead of discarding progress

### Fixed

- **#124** Cancel button previously promised "Playlist will be created with tracks downloaded so far" but discarded all progress -- now delivers on that promise
- **iOS lock screen controls inverted**: MediaSession `playbackState` was driven by React `useEffect` on `isPlaying` state, which fires asynchronously after render -- not synchronously with the actual audio state change. This caused lock screen controls to show the opposite state (play when playing, pause when paused). Rewrote MediaSession to drive `playbackState` directly from `audioEngine` events, call the engine directly from action handlers to preserve iOS user-gesture context, and use ref-based one-time handler registration to avoid re-registration churn.
- **Favicon showing old Lidify icon or wrong Kima logo**: Browser tab showed the pre-rebrand Lidify favicon. Replaced with the waveform-only icon generated from `kima-black.webp` as a proper multi-size ICO (16/32/48/64/128/256px) with tight cropping so the waveform fills the tab space.
- **Enrichment pipeline: no periodic vibe sweep**: The enrichment cycle had no phase for queueing vibe/CLAP embedding jobs. The only automatic path was a lossy pub/sub event from Essentia completion -- if missed (crash, restart, migration wipe), tracks were orphaned forever. Added Phase 5 that sweeps for tracks with completed audio but missing embedding rows via LEFT JOIN.
- **Enrichment pipeline: crash recovery dead end**: Crash recovery reset `vibeAnalysisStatus` from `processing` to `null`, which nothing in the regular cycle re-queued. Changed to reset to `pending` so the periodic sweep picks them up.
- **Enrichment pipeline: CLAP analyzer permanent death**: When enrichment was stopped, the backend sent a stop command causing the CLAP analyzer to exit cleanly (code 0). Supervisor's `autorestart=unexpected` treated this as expected and never restarted. Changed to `autorestart=true` and removed the stop signal entirely -- the analyzer has its own idle timeout.
- **Enrichment pipeline: completion never triggers**: `isFullyComplete` required `clapCompleted + clapFailed >= trackTotal`, which was impossible after `track_embeddings` was wiped by migration. Now checks for actual un-embedded tracks via LEFT JOIN.
- **Enrichment pipeline: "Reset Vibe Embeddings" incomplete**: `reRunVibeEmbeddingsOnly()` reset `vibeAnalysisStatus` but did not delete existing `track_embeddings` rows, so the re-queue query (which uses LEFT JOIN) silently skipped tracks that already had embeddings. Now deletes all embeddings first for full regeneration.
- **Feature detection: CLAP reported available when disabled**: When `DISABLE_CLAP=true` was set, `checkCLAP()` skipped the file-existence check but still fell through to heartbeat and data checks. If old embeddings existed in the database, it returned `true`, causing the vibe sweep to queue jobs that no CLAP worker would ever process. Now returns `false` immediately when disabled.
- **docker-compose.server.yml healthcheck using removed tool**: Healthcheck used `wget` which is removed from the production image during security hardening. Changed to `node /app/healthcheck.js` to match docker-compose.prod.yml.
- **#126 Subsonic JSON `getGenres.view` breaking Symfonium**: Genre responses used `#text` for the genre name in JSON output -- correct for XML but violates the Subsonic JSON convention which uses `value`. Symfonium's strict JSON parser rejected the response. Fixed `stripAttrPrefix()` to map `#text` to `value` in all JSON responses.
- **#126 Subsonic `getBookmarks.view` not implemented**: Symfonium calls `getBookmarks.view` during sync and expects a valid response with a `bookmarks` key. The endpoint hit the catch-all "not implemented" handler, returning an error without the required key. Added an empty stub returning `{ bookmarks: {} }`.
- **#91 Artist page only showing 5 popular tracks**: Frontend sliced popular tracks to 5 even though the backend returned 10. Now displays all 10.
- **#63 MusicBrainz base URL hardcoded**: MusicBrainz API URL was hardcoded, preventing use of self-hosted mirrors. Now configurable via `MUSICBRAINZ_BASE_URL` environment variable (defaults to `https://musicbrainz.org/ws/2`).

## [1.5.8] - 2026-02-26

### Fixed

- **Mobile playback: infinite network retry loop**: On mobile networks, transient `MEDIA_ERR_NETWORK` errors triggered a retry cycle that never terminated -- `canplay` and `playing` events reset the retry counter to 0 on every cycle, and `audio.load()` reset `currentTime` to 0, causing the "2-3 seconds then starts over" symptom. Fixed by removing the premature counter resets (counter now only resets on new track load) and saving/restoring playback position across retries.
- **Mobile playback: silence keepalive running during active playback**: The silence keepalive element (used to hold the iOS/Android audio session while paused in background) was started via `prime()` from a non-gesture context, then `stop()` failed to pause it because the `play()` promise hadn't resolved yet, making `el.paused` still true. Fixed by adding proper async play-promise tracking with a `pendingStop` flag, and removing the non-gesture `prime()`/`stop()` calls from the audio engine's `playing` event handler.
- **Mobile playback: play button tap fails to resume on iOS**: All in-app play buttons called `resume()` which only set React state; the actual `audio.play()` ran in a `useEffect` after re-render, outside the iOS user-gesture activation window. Fixed by adding a `resumeWithGesture()` helper that calls `audioEngine.tryResume()` and `silenceKeepalive.prime()` synchronously within the gesture context -- the same pattern already used by MediaSession lock-screen handlers. Applied across all 13 play/resume call sites.
- **Mobile playback: lock screen / notification controls unresponsive after app restore**: MediaSession action handlers were never registered when the app loaded with a server-restored track because the `hasPlayedLocallyRef` guard blocked registration, and the handler registration effect's dependency array was missing `isPlaying`, so it never re-ran when the flag was set. Fixed by adding `isPlaying` to the dependency array.
- **Cover art proxy transient fetch errors**: External cover art fetches that hit transient TCP errors (`ECONNRESET`, `ETIMEDOUT`, `UND_ERR_SOCKET`) now retry once with a 500ms delay before failing.

### Security

- **Error message leakage**: All ~82 backend route catch blocks replaced with a `safeError()` helper that logs the full error server-side but returns only `"Internal server error"` to the client. Prevents stack traces, file paths, and internal details from leaking to users.
- **SSRF protection on cover art proxy**: The cover-art proxy endpoint now validates URLs before fetching -- blocks private/loopback IPs, non-HTTP schemes, and resolves DNS to check for rebinding attacks. Audiobook cover paths also block directory traversal.
- **Login timing side-channel**: Login endpoint previously returned early on user-not-found, allowing username enumeration via response timing. Now runs a dummy bcrypt compare against an invalid hash to normalize response times regardless of whether the user exists.
- **Device link code generation**: Replaced `Math.random()` with `crypto.randomInt()` for cryptographically secure device link codes.
- **Unscoped user queries**: Added `select` clauses to all Prisma user queries that previously loaded full rows (including `passwordHash`) when only the ID or specific fields were needed.
- **Metrics endpoint authentication**: `/api/metrics` now requires authentication.
- **Registration gate**: Added `registrationOpen` system setting (default: closed) and rate limiter on the registration endpoint. After the first user is created, new registrations require an admin to explicitly open registration.
- **Admin password reset role check**: Fixed case mismatch (`"ADMIN"` vs `"admin"`) that could allow non-admin users to trigger password resets.

### Housekeeping

- Removed unused `sectionIndex` variables in audiobooks, home, and podcasts pages.
- Removed dead commented-out album cover grid code and unused imports in DiscoverHero.
- Fixed missing `useCallback` wrapper for `loadPresets` in MoodMixer.
- Added missing `previewLoadState` to effect dependency array in usePodcastData.

## [1.5.7] - 2026-02-23

### Added

- **BullMQ enrichment infrastructure**: Rewrote the entire enrichment pipeline on top of BullMQ v5, replacing the custom BLPOP/Redis queue loops. Artist, track, and podcast enrichment all run as proper BullMQ Worker instances with job-level pause, resume, and stop support. All queues are visible in the Bull Board admin dashboard. The orchestrator pushes jobs into BullMQ and uses a sentinel pattern to track when all jobs in a phase have completed before advancing.
- **Reactive vibe queuing**: The Essentia audio analyzer now publishes an `audio:analysis:complete` event to Redis when each track finishes. The CLAP service subscribes and immediately queues a vibe embedding job for that track — eliminating the previous polling-based approach where CLAP scanned the database on a fixed interval looking for newly-completed Essentia tracks.

### Fixed

- **PWA background audio session lost on iOS and Android**: Pausing from lock-screen / notification controls while the app was backgrounded caused iOS to reclaim the audio session, blocking any subsequent `audio.play()` call until the app was foregrounded. Fixes two related symptoms: (1) resuming from lock-screen controls appeared to do nothing until the app was opened, (2) music stopped after extended background playback during track transitions. Fixed by: calling `audioEngine.tryResume()` synchronously inside the MediaSession `play` handler (within the user-activation window iOS grants to MediaSession callbacks); adding a silent looping audio keepalive (`silence-keepalive.ts`) that holds the OS audio session while user audio is paused and the app is backgrounded; loading the next track directly from the `ended` event handler to eliminate the inter-track silence gap that triggered session reclaim; and adding `visibilitychange` / `pageshow` foreground recovery to retry playback if the engine is paused when the app returns to the foreground.
- **Discovery "Retry All" importing entire albums already in library**: The `POST /discover/retry-unavailable` endpoint fetched all raw `UnavailableAlbum` records for the week without applying the same three-level filter the `GET /current` endpoint uses before displaying them. As a result, clicking "Retry All" triggered full re-downloads of albums that were already present in the library (matched by discovery MBID, library MBID, or fuzzy title+artist). The retry handler now applies all three filters before creating download jobs, and deletes stale `UnavailableAlbum` records for albums already in the library so they do not reappear. Closes #34.
- **Mood-tags phase silently skipping all tracks**: `lastfmTags` was `NULL` for tracks that had been enriched before the column was added. The mood-tags enrichment phase queries `WHERE lastfmTags != '{}'`, which never matches `NULL` — so every track was silently skipped every cycle. Migration backfills all `NULL` values to `'{}'` and sets the column default, so newly enriched tracks are never NULL.
- **Docker image size (28.4 GB → 12.2 GB)**: Removed all CUDA and NVIDIA dependencies from the Docker image. The `audio-analyzer` and `audio-analyzer-clap` services now run on CPU-only PyTorch and TensorFlow. Changed pip installs to use the CPU-only PyTorch wheel index (`--index-url https://download.pytorch.org/whl/cpu`), replaced `tensorflow` with `tensorflow-cpu`, and installed `essentia-tensorflow --no-deps` to prevent pip from pulling the GPU TensorFlow variant as a transitive dependency. Removed `nvidia-cudnn-cu12`, `torchvision` (not imported), the `/opt/cudnn8` CUDA layer, and all NVIDIA library paths from the supervisor `LD_LIBRARY_PATH`. No regressions: TensorFlow confirmed running on CPU, all 9 MusiCNN classification heads load normally.
- **Docker build context bloat**: `frontend/node_modules/` (598 MB) and `frontend/.next/` (313 MB) were not excluded from the Docker build context. The `.dockerignore` `node_modules` pattern only matched root-level; changed to `**/node_modules`. Added `**/.next`. Combined these reduced the `COPY frontend/ ./` layer from 946 MB to ~50 MB.
- **Cover art fetch errors for temp-MBID albums**: Albums with temporary MBIDs (temp-*) were being passed to the Cover Art Archive API, causing 400 errors. Added validation to skip temp-MBIDs in artist enrichment and data cache.
- **VIBE-VOCAB vocabulary file missing**: The vocabulary JSON file wasn't being copied to the Docker image because TypeScript doesn't copy .json files automatically. Added explicit import to force tsc to copy it.
- **Redis memory overcommit warning**: Added `vm.overcommit_memory=1` sysctl to docker-compose.prod.yml and docker-compose.server.yml.
- **Z-index stacking order**: MiniPlayer was z-50 (same tier as modals), causing it to appear above open dialogs due to DOM ordering. Established a consistent stacking hierarchy: MiniPlayer z-[45] → TopBar z-50 → VibeOverlay/toasts z-[55] → MobileSidebar backdrop z-[60] / drawer z-[70] → all modals z-[80] → nested confirm z-[85] → toast z-[100] → OverlayPlayer z-[9999]. MobileSidebar was also using non-standard `z-100` which is not a valid Tailwind class.
- **API token display overflowing viewport on iPhone**: The newly-generated token `<code>` block extended beyond the screen on narrow viewports due to missing `min-w-0` / `overflow-hidden` on its flex container; added both.
- **CLAP BullMQ worker crash on startup**: `import psycopg2` does not implicitly import `psycopg2.pool`; the BullMQ vibe worker was crashing immediately because `psycopg2.pool.ThreadedConnectionPool` was referenced without the submodule being imported. Added explicit `import psycopg2.pool`.
- **EnrichmentStateService Redis disconnect error**: Calling `disconnect()` on an already-closed Redis connection raised an unhandled error. The disconnect is now silenced when the connection is already in a closed state.
- **CLAP worker thread-safety**: All PostgreSQL calls in the CLAP BullMQ worker are now wrapped in `run_in_executor` so they execute on a thread-pool thread rather than blocking the asyncio event loop. Connection pool is initialized once per process and shared safely across concurrent jobs.

## [1.5.5] - 2026-02-21

### Added

- **OpenSubsonic / Subsonic API**: Native client support for Amperfy, Symfonium, DSub, Ultrasonic, Finamp, and any other Subsonic-compatible app
  - Full Subsonic REST API v1.16.1 compatibility, with OpenSubsonic extensions declared
  - **MD5 token auth** — standard Subsonic auth now supported; enter your Kima API token as the password in your client app; the server verifies `md5(token + salt)` against stored API keys, avoiding any need to store plaintext login passwords
  - **OpenSubsonic `apiKey` auth** — generate per-client tokens in Settings > Native Apps; tokens can be named and revoked individually
  - **Endpoints implemented**: `ping`, `getArtists`, `getIndexes`, `getArtist`, `getAlbum`, `getSong`, `getAlbumList2`, `getAlbumList`, `getGenres`, `search3`, `search2`, `getRandomSongs`, `stream`, `download`, `getCoverArt`, `scrobble`, `getPlaylists`, `getPlaylist`, `createPlaylist`, `updatePlaylist`, `deletePlaylist`, `getUser`, `getStarred`, `getStarred2`, `star`, `unstar`, `getArtistInfo2`
  - **Enrichment-aware genres** — genre fields on albums, songs, and search results are sourced from Last.fm-enriched artist tags rather than static file tags; `getGenres` aggregates across the enriched artist catalogue
  - **Enrichment-aware biographies** — `getArtistInfo2` returns the user-edited summary when present, otherwise the Last.fm biography
  - **HTTP 206 range support** on `stream.view` for seek-capable clients and Firefox/Safari
  - Scrobbles recorded as `SUBSONIC` listen source
  - DISCOVER-location albums are excluded from all library views
- **Named API tokens** — Settings > Native Apps token generator now accepts a client name (e.g., "Amperfy", "Symfonium"); previously all tokens were named "Subsonic"
- **Public server URL setting** — admins can pin a persistent server URL in Settings > Storage; the Native Apps panel reads this URL and falls back to the browser origin when unset

### Fixed

- **Subsonic `contentType` and `suffix` wrong for FLAC/MP3**: The library scanner stores codec names (`FLAC`, `MPEG 1 Layer 3`) rather than MIME types. Added `normalizeMime()` to translate codec names to proper MIME types before surfacing them to clients — fixes clients that refused to play tracks due to unrecognised content types
- **`createPlaylist` returned empty response**: Per OpenSubsonic spec (since 1.14.0), `createPlaylist` must return the full playlist object. Now returns the same shape as `getPlaylist`
- **DISCOVER albums leaking into search and random**: `getRandomSongs` raw SQL and the `search3`/`search2` shared service had no location filter, allowing DISCOVER-only albums to appear in results. Both are now filtered to `LIBRARY` location only
- **PWA icons**: Replaced placeholder icons with the Kima brand — amber diagonal gradient with radial bloom; solid black background for maskable variants; `apple-touch-icon` added; MediaSession fallback artwork wired up
- **Frontend lint errors** (pre-existing): `let sectionIndex` changed to `const` in three pages; `setPreviewLoadState` moved inside the async function to avoid calling setState synchronously in a `useEffect`
- **Vibe orphaned-completed tracks**: Tracks where `vibeAnalysisStatus = 'completed'` but no embedding row exists (left over from the `reduce_embedding_dimension` migration) are now detected and reset each enrichment cycle so they re-enter the CLAP queue

## [1.5.4] - 2026-02-21

### Fixed

- **Vibe embeddings never starting**: `queueVibeEmbeddings` only checked for `NULL` or `'failed'` status, but the `add_vibe_analysis_fields` migration set the column default to `'pending'` — every track was silently skipped forever. Added `'pending'` to the WHERE clause.
- **CLAP infinite retry**: Added `VIBE_MAX_RETRIES` SQL guard to `queueVibeEmbeddings` so permanently-failed tracks (retry count ≥ 3) are never re-queued. Fixed off-by-one: cleanup used `>=` (giving 2 resets) instead of `>` (giving the correct 3).
- **Null byte crash in music scanner**: ASCII control characters in ID3 tags (e.g. embedded null bytes) caused PostgreSQL query failures. `sanitizeTagString()` now strips control chars from title, artist, and album tags before any DB write.
- **Soulseek stuck downloads cycling**: Downloads removed from the active list on timeout or stream error were not removed from `SlskClient.downloads`, causing the slot to be permanently occupied. Added `removeDownload()` and called it in all three error paths (timeout, download stream error, write stream error).
- **Artist enrichment duplicate MBID race condition**: Two artists resolving to the same real MBID simultaneously caused a Prisma `P2002` unique constraint violation, leaving one artist stuck in `processing`. The error is now caught specifically — the duplicate is immediately marked `unresolvable` with a warning log.
- **Admin vibe retry silently skipping tracks**: `POST /vibe/retry` reset `EnrichmentFailure.retryCount` but left `Track.vibeAnalysisRetryCount` at its max value, causing the SQL guard in `queueVibeEmbeddings` to silently skip the track forever. Both counts are now reset together.
- **Preview job missing ownership check**: Spotify preview jobs stored in Redis had no `userId` — any authenticated user could read or consume another user's preview result. `userId` is now stored in the Redis payload and validated on both `GET /preview/:jobId` and `POST /import`.
- **Playlist import DB pool exhaustion**: `matchTrack` inside `startImport` used an unbounded `Promise.all`, saturating the connection pool on large playlists. Wrapped with `pLimit(8)`.
- **PWA safe area double-inset on iOS**: `body` padding and `AuthenticatedLayout` margin both applied `env(safe-area-inset-*)`, doubling the inset gap. Replaced with `--standalone-safe-area-top/bottom` CSS custom properties that default to `0px` in browser mode and are set to the real env values only inside `@media (display-mode: standalone)`. Fixes both the double-inset on iOS PWA and the Vivaldi browser over-inset.
- **Mobile bottom content gap**: Removed the 96px bottom padding (`pb-24`) reserved for the mini player. The player is swipeable so the padding is no longer needed.

## [1.5.3] - 2026-02-18

### Fixed

- **Circuit breaker `circuitOpenedAt` drift**: `failureCount >= CIRCUIT_BREAKER_THRESHOLD` stayed true after threshold failures, resetting `circuitOpenedAt` on every subsequent `onFailure()` call — the same rolling-timestamp problem as `lastFailureTime`. Added `&& this.circuitOpenedAt === null` to enforce the single-write invariant.
- **Circuit breaker deadlock**: `shouldAttemptReset()` measured time since last failure, which resets every cleanup cycle, so the 5-minute recovery window never expired. Fixed by recording `circuitOpenedAt` at the moment the breaker first opens and measuring from that fixed point.
- **`recordSuccess()` race condition**: Success detection bracketed only `cleanupStaleProcessing()` — a millisecond window that never captured Python completions (~14s batch cadence). Replaced with `audioLastCycleCompletedCount` tracked across cycles; `recordSuccess()` fires whenever the completed count grows since the previous cycle.
- **CLAP vibe queue self-heal**: `queueVibeEmbeddings` filtered `vibeAnalysisStatus = 'pending'`, skipping thousands of tracks left as `'completed'` after the `reduce_embedding_dimension` migration dropped their embeddings. Changed filter to `<> 'processing'` so `te.track_id IS NULL` (actual embedding existence) is the source of truth.

## [1.5.2] - 2026-02-18

### Fixed

- **Audio analysis enrichment deadlock**: Three compounding bugs caused enrichment to deadlock after 12+ hours of operation.
  - `runFullEnrichment` reset `analysisStatus` to `pending` without clearing `analysisRetryCount`, silently orphaning tracks the Python analyzer would never pick up (it ignores tracks with `retryCount >= MAX_RETRIES`).
  - `queueAudioAnalysis` had no `retryCount` filter, queuing tracks Python ignores — these timed out and fed false positives to the circuit breaker.
  - The circuit breaker fired on `permanentlyFailedCount > 0`, which is expected cleanup behavior, making it permanently unrecoverable — it reopened immediately on every `HALF_OPEN` attempt.

## [1.5.1] - 2026-02-18

### Fixed

- **SSE streaming through Next.js proxy**: SSE events were buffered by Next.js rewrites, breaking real-time Soulseek search results and download progress in production. Added a dedicated Next.js API route (`app/api/events/route.ts`) that streams SSE responses directly, bypassing the buffering rewrite proxy.
- **CLAP analyzer startup contention**: CLAP model loaded eagerly on container boot (~20s of CPU/memory), competing with the Essentia audio analyzer during startup. Model now loads lazily on first job, which only arrives after audio analysis completes.

## [1.5.0] - 2026-02-17

### Changed

- **REBRAND**: Project renamed from Lidify to Kima
- Repository moved to `kima-hub` on GitHub
- Docker images now published as `chevron7locked/kima`
- All user-facing references updated across codebase
- First official release under Kima branding
- **Soulseek credential changes**: Settings and onboarding now reset and reconnect Soulseek immediately instead of just disconnecting
- **Soulseek search timeout**: Reduced from 45s to 10s for faster UI response (200+ results stream well within that window)
- **Search result streaming**: Low-quality results (< 128kbps MP3) filtered before streaming to UI, capped at 200 streamed results per search

### Added

- **Album-level Soulseek search**: Discovery downloads use a single album-wide search query with directory grouping and fuzzy title matching, reducing download time from ~15 minutes to ~15-30 seconds
- **SSE-based Soulseek search**: Search results stream to the browser in real-time via Server-Sent Events instead of waiting for the full search to complete
- **Multi-tab audio sync**: BroadcastChannel API prevents multiple browser tabs from playing audio simultaneously -- new tab claims playback, other tabs pause
- **Network error retry**: Audio engine retries on network errors with exponential backoff (2s, 4s) before surfacing the failure
- **Stream eviction notification**: Users see "Playback interrupted -- stream may have been taken by another session" instead of a generic error
- **Stuck discovery batch recovery**: Batches stuck in scanning state are automatically recovered after 10 minutes and force-failed after 30 minutes
- **Stuck Spotify import recovery**: Spotify imports stuck in scanning or downloading states are automatically detected and recovered by the queue cleaner
- **Manual download activity feed**: Soulseek manual downloads now emit `download:complete` events and appear in the activity feed
- **Critical Reliability Fixes**: Eliminated Soulseek connection race conditions with distributed locks
- **100% Webhook Reliability**: Event sourcing with PostgreSQL persistence
- **Download Deduplication**: Database unique constraint prevents duplicate jobs
- **Discovery Batch Locking**: Optimistic locking with version field
- **Redis State Persistence**: Search sessions, blocklists, and cache layer
- **Prometheus Metrics**: Full instrumentation at `/metrics` endpoint
- **Automatic Data Cleanup**: 30-60 day retention policies
- **Database-First Configuration**: Encrypted sensitive credentials with runtime updates
- **Automatic Database Baselining**: Seamless migration for existing databases
- **Complete Type Safety**: Eliminated all `as any` assertions
- **Typed Error Handling**: User-friendly error messages with proper HTTP codes

### Fixed

- **Discovery download timeout**: Album-level search eliminates the per-track search overhead (13 tracks x 5 strategies x 15s) that caused 300s acquisition timeouts
- **Worker scheduling starvation**: `setTimeout` rescheduling moved into `finally` blocks so worker cycles always reschedule, even when pile-up guards cause early return
- **Concurrent discovery generation**: Distributed lock (`discover:generate:{userId}`, 30s TTL) prevents duplicate batches when the generate button is clicked rapidly
- **Recovery scan routing**: Fixed source strings (`"discover-weekly-completion"`, `"spotify-import"`) so recovered stuck scans trigger the correct post-scan handlers instead of silently completing
- **Unbounded scan re-queuing**: Added deduplication flags so stuck batches aren't re-queued by the queue cleaner every 30 seconds
- **buildFinalPlaylist idempotency**: Early return guard prevents duplicate playlist generation if the method is called multiple times for the same batch
- **MediaError SSR safety**: Replaced browser-only `MediaError.MEDIA_ERR_NETWORK` with literal value `2` for Next.js server-side rendering compatibility
- **Soulseek search session leak**: Sessions capped at 50 with oldest-eviction to prevent unbounded Map growth
- **Soulseek cooldown Map leak**: Added 5-minute periodic cleanup of expired entries from connection cooldown Maps, cleared on both `disconnect()` and `forceDisconnect()`
- **Unhandled promise rejection**: Wrapped fire-and-forget search `.then()`/`.catch()` handler bodies in try/catch
- **Batch download fault tolerance**: Replaced `Promise.all` with `Promise.allSettled` in album search download phase and per-track batch search/download phases so one failure doesn't abort the entire batch
- **SSE connection establishment**: Added `res.flushHeaders()` and per-message `flush()` calls to ensure SSE data reaches the client immediately through reverse proxies

### Removed

- Debug `console.log` statements from SSE event route and Soulseek search route
- Dead `playback-released` BroadcastChannel broadcast code from audio player
- Animated search background gradient (replaced with cleaner static layout)

### Infrastructure

- Redis-based distributed locking for race condition prevention
- Webhook event store with automatic retry and reconciliation
- Comprehensive type definitions for Lidarr and Soulseek APIs
- Architecture Decision Records (ADRs) documenting key technical choices

## [1.4.3] - 2026-02-08

### Fixed

- **Backend unresponsiveness after hours of uptime:** Replaced `setInterval` with self-rescheduling `setTimeout` for the 2-minute reconciliation cycle and 5-minute Lidarr cleanup cycle in `workers/index.ts`. Previously, `setInterval` fired unconditionally every 2/5 minutes regardless of whether the previous cycle had completed. Since `withTimeout()` resolves via `Promise.race` but never cancels the underlying operation, timed-out operations continued running as zombies. Over hours, hundreds of concurrent zombie operations accumulated, starving the event loop and exhausting database connections and network sockets. Each cycle now waits for the previous one to fully complete before scheduling the next, making pile-up impossible.

## [1.4.2] - 2026-02-07

### Added

- **GPU acceleration:** CLAP vibe embeddings use GPU when available (NVIDIA Container Toolkit required); MusicCNN stays on CPU where it performs better due to small model size
- **GPU documentation:** README section with install commands for NVIDIA Container Toolkit (Fedora/Nobara/RHEL and Ubuntu/Debian), docker-compose GPU config, and verification steps
- **Model idle unloading:** Both MusicCNN and CLAP analyzers unload ML models after idle timeout, freeing 2-4 GB of RAM when not processing
- **Immediate model unload:** Analyzers detect when all work is complete and unload models immediately instead of waiting for the idle timeout
- **CLAP progress reporting:** Enrichment progress endpoint now includes CLAP processing count and queue length for accurate UI status
- **Discovery similar artists:** Search discover endpoint returns musically similar artists (via Last.fm `getSimilar`) separately from text-match results
- **Alias resolution banner:** UI banner shown when Last.fm resolves an artist name alias (e.g., "of mice" -> "Of Mice & Men")

### Fixed

- **Case-sensitive artist search ([#64](https://github.com/Chevron7Locked/kima-hub/issues/64)):** Added PostgreSQL tsvector search with ILIKE fallback; all artist/album/track searches are now case-insensitive
- **Circuit breaker false trips:** Audio analysis cleanup circuit breaker now counts cleanup runs instead of individual tracks, preventing premature breaker trips on large batches of stale tracks
- **DB reconciliation race condition:** Analyzer marks tracks as `processing` in the database before pushing to Redis queue, preventing the backend from double-queuing the same tracks
- **Enrichment completion detection:** `isFullyComplete` now checks CLAP processing count and queue length, not just completed vs total
- **Search special characters:** `queryToTsquery` strips non-word characters and filters empty terms, preventing PostgreSQL syntax errors on queries like `"&"` or `"..."`
- **NaN pagination limit:** Search endpoints guard against `NaN` limit values from malformed query params
- **Discovery cache key collisions:** Normalized cache keys (lowercase, trimmed, collapsed whitespace) prevent duplicate cache entries for equivalent queries
- **Worker resize pool churn:** Added 5-second debounce to worker count changes from the UI slider, preventing rapid pool destroy/recreate cycles

### Performance

- **malloc_trim memory recovery:** Both analyzers call `malloc_trim(0)` after unloading models, forcing glibc to return freed pages to the OS (6.5 GB active -> 2.0 GB idle)
- **MusicCNN worker pool auto-shutdown:** Worker pool shuts down when no pending work remains, freeing process pool memory without waiting for idle timeout
- **Enrichment queue batch size:** Reduced from 50 to 10 to match analyzer batch size, preventing buildup of stale `processing` tracks
- **Search with tsvector indexes:** Artist, album, and track tables now have generated tsvector columns with GIN indexes for fast full-text search
- **Discovery endpoint parallelized:** Artist search, similar artists, and Deezer image lookups run concurrently instead of sequentially

### Changed

- **Audio streaming range parser:** Replaced Express `res.sendFile()` with custom range parser supporting suffix ranges (`bytes=-N`) and proper 416 responses -- fixes Firefox/Safari streaming issues on large FLAC files
- **Similar artists separation:** Discovery results now split into `results` (text matches) and `similarArtists` (musically similar via Last.fm), replacing the mixed array
- **Last.fm search tightened:** Removed `getSimilarArtists` padding from `searchArtists()` and raised fuzzy match threshold from 50 to 75 to reduce false positives (e.g., "Gothica" matching "Mothica")

### Removed

- Dead enrichment worker (`backend/src/workers/enrichment.ts`) and mood bucket worker (`backend/src/workers/moodBucketWorker.ts`) -- functionality consolidated into unified enrichment worker
- Unused `useDebouncedValue` hook (replaced by `useDebounce` from search hooks)

### Contributors

- @Allram - Soulseek import fix ([#85](https://github.com/Chevron7Locked/kima-hub/pull/85))

## [1.4.1] - 2026-02-06

### Fixed

- **Doubled audio stream on next-track:** Fixed race condition where clicking next/previous played two streams simultaneously by making track-change cleanup synchronous and guarding the play/pause effect during loading
- **Soulseek download returns 400 (#101):** Frontend now sends parsed title to the download endpoint; backend derives artist/title from filename when not provided instead of rejecting the request
- **Admin password reset (#97):** Added `ADMIN_RESET_PASSWORD` environment variable support -- set it and restart to reset the admin password, then remove the variable
- **Retry failed audio analysis UI (#79):** Added "Retry Failed Analysis" button in Settings that resets permanently failed tracks back to pending for re-processing
- **Podcast auto-refresh (#81):** Podcasts now automatically refresh during the enrichment cycle (hourly), checking RSS feeds for new episodes without manual intervention
- **Compilation track matching (#70):** Added title-only fallback matching strategy for playlist reconciliation -- when album artist doesn't match (e.g. "Various Artists" compilations), tracks are matched by title with artist similarity scoring
- **Soulseek documentation (#27):** Expanded README with detailed Soulseek integration documentation covering setup, search, download workflow, and limitations
- **Admin route hardening:** Added `requireAdmin` middleware to onboarding config routes and stale job cleanup endpoint
- **2FA userId leak:** Removed userId from 2FA challenge response (information disclosure)
- **Queue bugs:** Fixed cancelJob/refreshJobMatches not persisting state, clear button was no-op, reorder not restarting track, shuffle indices not updating on removeFromQueue
- **Infinite re-render:** Fixed useAlbumData error handling causing infinite re-render loop
- **2FA status not loading:** Fixed AccountSection not loading 2FA status on mount
- **Password change error key mismatch:** Fixed error key mismatch in AccountSection password change handler
- **Discovery polling leak:** Fixed polling never stopping on batch failure
- **Timer leak:** Fixed withTimeout not clearing timer in enrichment worker
- **Audio play rejection:** Fixed unhandled promise rejection on audio.play()
- **Library tab validation:** Added tab parameter validation in library page
- **Onboarding state:** Separated success/error state in onboarding page
- **Audio analysis race condition (#79):** CLAP analyzer was clobbering Essentia's `analysisStatus` field, causing completed tracks to be reset and permanently failed after 3 cycles; both Python analyzers now check for existing embeddings before resetting
- **Enrichment completion check:** `isFullyComplete` now includes CLAP vibe embeddings, not just audio analysis
- **Enrichment UI resilience:** Added `keepPreviousData` and loading/error states to enrichment progress query so the settings block doesn't vanish on failed refetch

### Performance

- **Recommendation N+1 queries:** Eliminated N+1 queries in all 3 recommendation endpoints (60+ queries down to 3-5)
- **Idle worker pool shutdown:** Essentia analyzer shuts down its 8-worker process pool (~5.6 GB) after idle period, lazily restarts when work arrives

### Changed

- **Shared utility consolidation:** Replaced 10 inline `formatDuration` copies with shared `formatTime`/`formatDuration`, extracted `formatNumber` to shared utility, consolidated inline Fisher-Yates shuffle with shared `shuffleArray`
- **Player hook extraction:** Extracted shared `useMediaInfo` hook, eliminating ~120 lines of duplicated media info logic across MiniPlayer, FullPlayer, and OverlayPlayer
- **Preview hook consolidation:** Consolidated artist/album preview hooks into shared `useTrackPreview`
- **Redundant logging cleanup:** Removed console.error calls redundant with toast notifications or re-thrown errors

### Removed

- Dead player files: VibeOverlay, VibeGraph, VibeOverlayContainer, enhanced-vibe-test page
- Dead code: trackEnrichment.ts, discover/types/index.ts, unused artist barrel file
- Unused exports: `playTrack` from useLibraryActions, `useTrackDisplayData`/`TrackDisplayData` from useMetadataDisplay
- Unused `streamLimiter` middleware
- Deprecated `radiosByGenre` from browse API (Deezer radio requires account; internal library radio used instead)

## [1.4.0] - 2026-02-05

### Performance

- **Sequential audio/vibe enrichment:** Vibe phase skips when audio analysis is still running, preventing concurrent CPU-intensive Python analyzers from competing for resources
- **Faster enrichment cycles:** Reduced cycle interval from 30s to 5s; the rate limiter already handles API throttling, making the extra delay redundant
- **GPU auto-detection (CLAP):** PyTorch-based CLAP vibe embeddings auto-detect and use GPU when available, falling back to CPU
- **GPU auto-detection (Essentia):** TensorFlow-based audio analysis detects GPU with memory growth enabled, with device logging on startup

### Changed

- **Enrichment orchestration simplified:** Replaced 4 phase functions with duplicated stop/pause handling with a generic `runPhase()` executor and `shouldHaltCycle()` helper

### Fixed

- **Docker frontend routing:** Fixed `NEXT_PUBLIC_BACKEND_URL` build-time env var in Dockerfile so the frontend correctly proxies API requests to the backend
- **Next.js rewrite proxy:** Updated rewrite config to use `NEXT_PUBLIC_BACKEND_URL` for consistent build-time/runtime behavior
- **False lite mode on startup:** Feature detection now checks for analyzer scripts on disk, preventing false "lite mode" display before analyzers send their first heartbeat
- **Removed playback error banner:** Removed the red error bar from all player components (FullPlayer, MiniPlayer, OverlayPlayer) that displayed raw Howler.js error codes
- **Enrichment failure notifications:** Replaced aggressive per-cycle error banner with a single notification through the notification system when enrichment completes with failures

## [1.3.9] - 2026-02-04

### Fixed

- **Audio analysis cleanup:** Fixed race condition in audio analysis cleanup that could reset tracks still being processed

## [1.3.8] - 2026-02-03

### Fixed

- **Enrichment:** CLAP queue and failure cleanup fixes for enrichment debug mode

## [1.3.7] - 2026-02-01

### Added

#### CLAP Audio Analyzer (Major Feature)

New ML-based audio analysis using CLAP (Contrastive Language-Audio Pretraining) embeddings for semantic audio understanding.

- **CLAP Analyzer Service:** Python-based analyzer using Microsoft's CLAP model for generating audio embeddings
- **pgvector Integration:** Added PostgreSQL vector extension for efficient similarity search on embeddings
- **Vibe Similarity:** "Find similar tracks" feature using hybrid similarity (CLAP embeddings + BPM/key matching)
- **Vibe Explorer UI:** Test page for exploring audio similarity at `/vibe-ui-test`
- **Settings Integration:** CLAP embeddings progress display and configurable worker count in Settings
- **Enrichment Phase 4:** CLAP embedding generation integrated into enrichment pipeline

#### Feature Detection

Automatic detection of available analyzers with graceful degradation.

- **Feature Detection Service:** Backend service that monitors analyzer availability via Redis heartbeats
- **Features API:** New `/api/system/features` endpoint exposes available features to frontend
- **FeaturesProvider:** React context for feature availability throughout the app
- **Graceful UI:** Vibe button hidden when embeddings unavailable; analyzer controls greyed out in Settings
- **Onboarding:** Shows detected features instead of manual toggles

#### Docker & Deployment

- **Lite Mode:** New `docker-compose.lite.yml` override for running without optional analyzers
- **All-in-One Image:** CLAP analyzer and pgvector included in main Docker image
- **Analyzer Profiles:** Optional services can be enabled/disabled via compose overrides

#### Other

- **Local Image Storage:** Artist images stored locally with artist counts
- **Hybrid Similarity Service:** Combines CLAP embeddings with BPM and musical key for better matches
- **BPM/Key Similarity Functions:** Database functions for musical attribute matching

### Fixed

- **CLAP Queue Name:** Corrected queue name to `audio:clap:queue`
- **CLAP Large Files:** Handle large audio files by chunking to avoid memory issues
- **CLAP Dependencies:** Added missing torchvision dependency and fixed model path
- **Embedding Index:** Added missing IVFFlat index to embedding migration for query performance
- **Library Page Performance:** Artist images now cache properly - removed JWT tokens from cover-art URLs that were breaking Service Worker and HTTP cache (tokens only added for CORS canvas access on detail pages)
- **Service Worker:** Increased image cache limit from 500 to 2000 entries for better coverage of large libraries

### Performance

- **CLAP Extraction:** Always extract middle 60s of audio for efficient embedding generation
- **CLAP Duration:** Pass duration from database to avoid file probe overhead
- **Vibe Query:** Use CTE to avoid duplicate embedding lookup in similarity queries
- **PopularArtistsGrid:** Added `memo()` wrapper to prevent unnecessary re-renders when parent state changes
- **FeaturedPlaylistsGrid:** Added `memo()` wrapper and `useCallback` for click handler to ensure child `PlaylistCard` memoization works correctly
- **Scan Reconciliation:** Fixed N+1 database query pattern - replaced per-job album lookups with single batched query, reducing ~250 queries to ~3 queries for 100 pending jobs

### Security

- **Vibe API:** Added internal auth to vibe failure endpoint

### Changed

- **Docker Profiles:** Replaced Docker profiles with override file approach for better compatibility
- **Mood Columns:** Marked as legacy in schema - may be derived from CLAP embeddings in future

## [1.3.5] - 2026-01-22

### Fixed

- **Audio preload:** Emit preload 'load' event asynchronously to prevent race condition during gapless playback

## [1.3.4] - 2026-01-22

### Added

- **Gapless playback:** Preload infrastructure and next-track preloading for seamless transitions
- **Infinite scroll:** Library artists, albums, and tracks now use infinite query pagination
- **CachedImage:** Migrated to Next.js Image component with proper type support

### Fixed

- **CSS hover performance:** Fixed hover state performance issues
- **Audio analyzer:** Fixed Enhanced mode detection
- **Onboarding:** Accessibility improvements
- **Audio format detection:** Simplified to prevent wrong decoder attempts
- **Audio cleanup:** Improved Howl instance cleanup to prevent memory leaks
- **Audio cleanup tracking:** Use Set for pending cleanup tracking
- **Redis connections:** Disconnect enrichmentStateService connections on shutdown

### Changed

- **Library page:** Optimized data fetching with tab-based queries and memoized delete handlers

## [1.3.3] - 2026-01-18

Comprehensive patch release addressing critical stability issues, performance improvements, and production readiness fixes. This release includes community-contributed fixes and extensive internal code quality improvements.

### Fixed

#### Critical (P1)

- **Docker:** PostgreSQL/Redis bind mount permission errors on Linux hosts ([#59](https://github.com/Chevron7Locked/kima-hub/issues/59)) - @arsaboo via [#62](https://github.com/Chevron7Locked/kima-hub/pull/62)
- **Audio Analyzer:** Memory consumption/OOM crashes with large libraries ([#21](https://github.com/Chevron7Locked/kima-hub/issues/21), [#26](https://github.com/Chevron7Locked/kima-hub/issues/26)) - @rustyricky via [#53](https://github.com/Chevron7Locked/kima-hub/pull/53)
- **LastFM:** ".map is not a function" crashes with obscure artists ([#37](https://github.com/Chevron7Locked/kima-hub/issues/37)) - @RustyJonez via [#39](https://github.com/Chevron7Locked/kima-hub/pull/39)
- **Wikidata:** 403 Forbidden errors from missing User-Agent header ([#57](https://github.com/Chevron7Locked/kima-hub/issues/57))
- **Downloads:** Singles directory creation race conditions ([#58](https://github.com/Chevron7Locked/kima-hub/issues/58))
- **Firefox:** FLAC playback stopping at ~4:34 mark on large files ([#42](https://github.com/Chevron7Locked/kima-hub/issues/42), [#17](https://github.com/Chevron7Locked/kima-hub/issues/17))
- **Downloads:** "Skip Track" fallback setting ignored, incorrectly falling back to Lidarr ([#68](https://github.com/Chevron7Locked/kima-hub/issues/68))
- **Auth:** Login "Internal Server Error" and "socket hang up" on NAS hardware ([#75](https://github.com/Chevron7Locked/kima-hub/issues/75))
- **Podcasts:** Seeking backward causing player crash and backend container hang
- **API:** Rate limiter crash with "trust proxy" validation error causing socket hang up
- **Downloads:** Duplicate download jobs created due to race condition (database-level locking fix)

#### Quality of Life (P2)

- **Desktop UI:** Added missing "Releases" link to desktop sidebar navigation ([#41](https://github.com/Chevron7Locked/kima-hub/issues/41))
- **iPhone:** Dynamic Island/notch overlapping TopBar buttons ([#54](https://github.com/Chevron7Locked/kima-hub/issues/54))
- **Album Discovery:** Cover Art Archive timeouts causing slow page loads (2s timeout added)
- **Wikimedia:** Image proxy 429 rate limiting due to incomplete User-Agent header

### Added

- **Selective Enrichment Controls:** Individual "Re-run" buttons for Artists, Mood Tags, and Audio Analysis in Settings
- **XSS Protection:** DOMPurify sanitization for artist biography HTML content
- **AbortController:** Proper fetch request cleanup on component unmount across all hooks

### Changed

- **Performance:** Removed on-demand image fetching from library endpoints (faster page loads)
- **Performance:** Added concurrency limit to Deezer preview fetching (prevents rate limiting)
- **Performance:** Corrected batching for on-demand artist image fetching
- **Soulseek:** Connection stability improvements with auto-disconnect on credential changes
- **Backend:** Production build now uses compiled JavaScript instead of tsx transpilation (faster startup, lower memory on NAS)

### Security

- **XSS Prevention:** Artist bios now sanitized with DOMPurify before rendering
- **Race Conditions:** Database-level locking prevents duplicate download job creation

### Technical Details

#### Community Fixes

- **Docker Permissions (#62):** Creates `/data/postgres` and `/data/redis` directories with proper ownership; validates write permissions at startup using `gosu <user> test -w`
- **Audio Analyzer Memory (#53):** TensorFlow GPU memory growth enabled; `MAX_ANALYZE_SECONDS` configurable (default 90s); explicit garbage collection in finally blocks
- **LastFM Normalization (#39):** `normalizeToArray()` utility wraps single-object API responses; protects 5 locations in artist discovery endpoints

#### Hotfixes

- **Wikidata User-Agent (#57):** All 4 API endpoints now use configured axios client with proper User-Agent header
- **Singles Directory (#58):** Replaced TOCTOU `existsSync()`+`mkdirSync()` pattern with idempotent `mkdir({recursive: true})`
- **Firefox FLAC (#42):** Replaced Express `res.sendFile()` with manual range request handling via `fs.createReadStream()` with proper `Content-Range` headers
- **Skip Track (#68):** Auto-fallback logic now only activates for undefined/null settings, respecting explicit "none" (Skip Track) preference
- **NAS Login (#75):** Backend now built with `tsc` and runs with `node dist/index.js`; proxy trust setting updated; session secret standardized
- **Podcast Seek:** AbortController cancels upstream requests on client disconnect; stream error handlers prevent crashes
- **Rate Limiter:** All rate limiter configurations disable proxy validation (`validate: { trustProxy: false }`)
- **Wikimedia Proxy:** User-Agent standardized to `"Lidify/1.0.0 (https://github.com/Chevron7Locked/kima-hub)"` across all external API calls

#### Production Readiness Improvements

Internal code quality and stability fixes discovered during production readiness review:

**Security:**
- ReDoS guard on `stripAlbumEdition()` regex (500 char input limit)
- Rate limiter path matching uses precise patterns instead of vulnerable `includes()` checks

**Race Conditions:**
- Spotify token refresh uses promise singleton pattern
- Import job state re-fetched after `checkImportCompletion()`
- useSoulseekSearch has cancellation flag pattern

**Memory Leaks:**
- failedUsers Map periodic cleanup (every 5 min)
- jobLoggers Map cleanup on all completion/failure paths

**Code Quality:**
- Async executor anti-pattern removed from Soulseek `searchTrack()`
- Timeout cleanup in catch blocks
- Proper error type narrowing (`catch (error: unknown)`)
- Null guards in artistNormalization functions
- Fisher-Yates shuffle replaces biased `Math.random()` sort
- Debug console.log statements removed/converted
- Empty catch blocks now have proper error handling
- Stale closures fixed with refs in event handlers
- Dead code and unused imports removed

**CSS:**
- Tailwind arbitrary value syntax corrected
- Duplicate z-index values removed

**Infrastructure:**
- Explicit database connection pool configuration
- Deezer album lookups routed through global rate limiter
- Consistent toast system usage

### Deferred to Future Release

- **PR #49** - Playlist visibility toggle (needs PR review)
- **PR #47** - Mood bucket tags (already implemented, verify and close)
- **PR #36** - Docker --user flag (needs security review)

### Contributors

Thanks to everyone who contributed to this release:

- @arsaboo - Docker bind mount permissions fix ([#62](https://github.com/Chevron7Locked/kima-hub/pull/62))
- @rustyricky - Audio analyzer memory limits ([#53](https://github.com/Chevron7Locked/kima-hub/pull/53))
- @RustyJonez - LastFM array normalization ([#39](https://github.com/Chevron7Locked/kima-hub/pull/39))
- @tombatossals - Testing and validation
- @zeknurn - Skip Track bug report ([#68](https://github.com/Chevron7Locked/kima-hub/issues/68))

---

## [1.3.2] - 2025-01-07

### Fixed
- Mobile scrolling blocked by pull-to-refresh component
- Pull-to-refresh component temporarily disabled (will be properly fixed in v1.4)

### Technical Details
- Root cause: CSS flex chain break (`h-full`) and touch event interference
- Implemented early return to bypass problematic wrapper while preserving child rendering
- TODO: Re-enable in v1.4 with proper CSS fix (`flex-1 flex flex-col min-h-0`)

## [1.3.1] - 2025-01-07

### Fixed
- Production database schema mismatch causing SystemSettings endpoints to fail
- Added missing `downloadSource` and `primaryFailureFallback` columns to SystemSettings table

### Database Migrations
- `20260107000000_add_download_source_columns` - Idempotent migration adds missing columns with defaults

### Technical Details
- Root cause: Migration gap between squashed init migration and production database setup
- Uses PostgreSQL IF NOT EXISTS pattern for safe deployment across all environments
- Default values: `downloadSource='soulseek'`, `primaryFailureFallback='none'`

## [1.3.0] - 2026-01-06

### Added

- Multi-source download system with configurable Soulseek/Lidarr primary source and fallback options
- Configurable enrichment speed control (1-5x concurrency) in Settings > Cache & Automation
- Stale job cleanup button in Settings to clear stuck Discovery batches and downloads
- Mobile touch drag support for seek sliders on all player views
- Skip +/-30s buttons for audiobooks/podcasts on mobile players
- iOS PWA media controls support (Control Center and Lock Screen)
- Artist name alias resolution via Last.fm (e.g., "of mice" -> "Of Mice & Men")
- Library grid now supports 8 columns on ultra-wide displays (2xl breakpoint)
- Artist discography sorting options (Year/Date Added)
- Enrichment failure notifications with retry/skip modal
- Download history deduplication to prevent duplicate entries
- Utility function for normalizing API responses to arrays (`normalizeToArray`) - @tombatossals
- Keyword-based mood scoring for standard analysis mode tracks - @RustyJonez
- Global and route-level error boundaries for better error handling
- React Strict Mode for development quality checks
- Next.js image optimization enabled by default
- Mobile-aware animation rendering (GalaxyBackground disables particles on mobile)
- Accessibility motion preferences support (`prefers-reduced-motion`)
- Lazy loading for heavy components (MoodMixer, VibeOverlay, MetadataEditor)
- Bundle analyzer tooling (`npm run analyze`)
- Loading states for all 10 priority routes
- Skip links for keyboard navigation (WCAG 2.1 AA compliance)
- ARIA attributes on all interactive controls and navigation elements
- Toast notifications with ARIA live regions for screen readers
- Bull Board admin dashboard authentication (requires admin user)
- Lidarr webhook signature verification with configurable secret
- Encryption key validation on startup (prevents insecure defaults)
- Session cookie security (httpOnly, sameSite=strict, secure in production)
- Swagger API documentation authentication in production
- JWT token expiration (24h access tokens, 30d refresh tokens)
- JWT refresh token endpoint (`/api/auth/refresh`)
- Token version validation (password changes invalidate existing tokens)
- Download queue reconciliation on server startup (marks stale jobs as failed)
- Redis batch operations for cache warmup (MULTI/EXEC pipelining)
- Memory-efficient database-level shuffle (`ORDER BY RANDOM() LIMIT n`)
- Dynamic import caching in queue cleaner (lazy-load pattern)
- Database index for `DownloadJob.targetMbid` field
- PWA install prompt dismissal persistence (7-day cooldown)

### Fixed

- **Critical:** Audio analyzer crashes on libraries with non-ASCII filenames ([#6](https://github.com/Chevron7Locked/kima-hub/issues/6))
- **Critical:** Audio analyzer BrokenProcessPool after ~1900 tracks ([#21](https://github.com/Chevron7Locked/kima-hub/issues/21))
- **Critical:** Audio analyzer OOM kills with aggressive worker auto-scaling ([#26](https://github.com/Chevron7Locked/kima-hub/issues/26))
- **Critical:** Audio analyzer model downloads and volume mount conflicts ([#2](https://github.com/Chevron7Locked/kima-hub/issues/2))
- Radio stations playing songs from wrong decades due to remaster dates ([#43](https://github.com/Chevron7Locked/kima-hub/issues/43))
- Manual metadata editing failing with 500 errors ([#9](https://github.com/Chevron7Locked/kima-hub/issues/9))
- Active downloads not resolving after Lidarr successfully imports ([#31](https://github.com/Chevron7Locked/kima-hub/issues/31))
- Discovery playlist downloads failing for artists with large catalogs ([#34](https://github.com/Chevron7Locked/kima-hub/issues/34))
- Discovery batches stuck in "downloading" status indefinitely
- Audio analyzer rhythm extraction failures on short/silent audio ([#13](https://github.com/Chevron7Locked/kima-hub/issues/13))
- "Of Mice & Men" artist name truncated to "Of Mice" during scanning
- Edition variant albums (Remastered, Deluxe) failing with "No releases available"
- Downloads stuck in "Lidarr #1" state for 5 minutes before failing
- Download duplicate prevention race condition causing 10+ duplicate jobs
- Lidarr downloads incorrectly cancelled during temporary network issues
- Discovery Weekly track durations showing "NaN:NaN"
- Artist name search ampersand handling ("Earth, Wind & Fire")
- Vibe overlay display issues on mobile devices
- Pagination scroll behavior (now scrolls to top instead of bottom)
- LastFM API crashes when receiving single objects instead of arrays ([#37](https://github.com/Chevron7Locked/kima-hub/issues/37)) - @tombatossals
- Mood bucket infinite loop for tracks analyzed in standard mode ([#40](https://github.com/Chevron7Locked/kima-hub/issues/40)) - @RustyJonez
- Playlist visibility toggle not properly syncing hide/show state - @tombatossals
- Audio player time display showing current time exceeding total duration (e.g., "58:00 / 54:34")
- Progress bar could exceed 100% for long-form media with stale metadata
- Enrichment P2025 errors when retrying enrichment for deleted entities
- Download settings fallback not resetting when changing primary source
- SeekSlider touch events bubbling to parent OverlayPlayer swipe handlers
- Audiobook/podcast position showing 0:00 after page refresh instead of saved progress
- Volume slider showing no visual fill indicator for current level
- PWA install prompt reappearing after user dismissal

### Changed

- Audio analyzer default workers reduced from auto-scale to 2 (memory conservative)
- Audio analyzer Docker memory limits: 6GB limit, 2GB reservation
- Download status polling intervals: 5s (active) / 10s (idle) / 30s (none), previously 15s
- Library pagination options changed to 24/40/80/200 (divisible by 8-column grid)
- Lidarr download failure detection now has 90-second grace period (3 checks)
- Lidarr catalog population timeout increased from 45s to 60s
- Download notifications now use API-driven state instead of local pending state
- Enrichment stop button now gracefully finishes current item before stopping
- Per-album enrichment triggers immediately instead of waiting for batch completion
- Lidarr edition variant detection now proactive (enables `anyReleaseOk` before first search)
- Discovery system now uses AcquisitionService for unified album/track acquisition
- Podcast and audiobook time display now shows time remaining instead of total duration
- Edition variant albums automatically fall back to base title search when edition-specific search fails
- Stale pending downloads cleaned up after 2 minutes (was indefinite)
- Download source detection now prioritizes actual service availability over user preference

### Removed

- Artist delete buttons hidden on mobile to prevent accidental deletion
- Audio analyzer models volume mount (shadowed built-in models)

### Database Migrations Required

```bash
# Run Prisma migrations
cd backend
npx prisma migrate deploy
```

**New Schema Fields:**

- `Album.originalYear` - Stores original release year (separate from remaster dates)
- `SystemSettings.enrichmentConcurrency` - User-configurable enrichment speed (1-5)
- `SystemSettings.downloadSource` - Primary download source selection
- `SystemSettings.primaryFailureFallback` - Fallback behavior on primary source failure
- `SystemSettings.lidarrWebhookSecret` - Shared secret for Lidarr webhook signature verification
- `User.tokenVersion` - Version number for JWT token invalidation on password change
- `DownloadJob.targetMbid` - Index added for improved query performance

**Backfill Script (Optional):**

```bash
# Backfill originalYear for existing albums
cd backend
npx ts-node scripts/backfill-original-year.ts
```

### Breaking Changes

- None - All changes are backward compatible

### Security

- **Critical:** Bull Board admin dashboard now requires authenticated admin user
- **Critical:** Lidarr webhooks verify signature/secret before processing requests
- **Critical:** Encryption key validation on startup prevents insecure defaults
- Session cookies use secure settings in production (httpOnly, sameSite=strict, secure)
- Swagger API documentation requires authentication in production (unless `DOCS_PUBLIC=true`)
- JWT tokens have proper expiration (24h access, 30d refresh) with refresh token support
- Password changes invalidate all existing tokens via tokenVersion increment
- Transaction-based download job creation prevents race conditions
- Enrichment stop control no longer bypassed by worker state
- Download queue webhook handlers use Serializable isolation transactions
- Webhook race conditions protected with exponential backoff retry logic

---

## Release Notes

When deploying this update:

1. **Backup your database** before running migrations
2. **Set required environment variable** (if not already set):
   ```bash
   # Generate secure encryption key
   SETTINGS_ENCRYPTION_KEY=$(openssl rand -base64 32)
   ```
3. Run `npx prisma migrate deploy` in the backend directory
4. Optionally run the originalYear backfill script for era mix accuracy:
   ```bash
   cd backend
   npx ts-node scripts/backfill-original-year.ts
   ```
5. Clear Docker volumes for audio-analyzer if experiencing model issues:
   ```bash
   docker volume rm lidify_audio_analyzer_models 2>/dev/null || true
   docker compose build audio-analyzer --no-cache
   ```
6. Review Settings > Downloads for new multi-source download options
7. Review Settings > Cache for new enrichment speed control
8. Configure Lidarr webhook secret in Settings for webhook signature verification (recommended)
9. Review Settings > Security for JWT token settings

### Known Issues

- Pre-existing TypeScript errors in spotifyImport.ts matchTrack method (unrelated to this release)
- Simon & Garfunkel artist name may be truncated due to short second part (edge case, not blocking)

### Contributors

Big thanks to everyone who contributed, tested, and helped make this release happen:

- @tombatossals - LastFM API normalization utility ([#39](https://github.com/Chevron7Locked/kima-hub/pull/39)), playlist visibility toggle fix ([#49](https://github.com/Chevron7Locked/kima-hub/pull/49))
- @RustyJonez - Mood bucket standard mode keyword scoring ([#47](https://github.com/Chevron7Locked/kima-hub/pull/47))
- @iamiq - Audio analyzer crash reporting ([#2](https://github.com/Chevron7Locked/kima-hub/issues/2))
- @volcs0 - Memory pressure testing ([#26](https://github.com/Chevron7Locked/kima-hub/issues/26))
- @Osiriz - Long-running analysis testing ([#21](https://github.com/Chevron7Locked/kima-hub/issues/21))
- @hessonam - Non-ASCII character testing ([#6](https://github.com/Chevron7Locked/kima-hub/issues/6))
- @niles - RhythmExtractor edge case reporting ([#13](https://github.com/Chevron7Locked/kima-hub/issues/13))
- @TheChrisK - Metadata editor bug reporting ([#9](https://github.com/Chevron7Locked/kima-hub/issues/9))
- @lizar93 - Discovery playlist testing ([#34](https://github.com/Chevron7Locked/kima-hub/issues/34))
- @brokenglasszero - Mood tags feature verification ([#35](https://github.com/Chevron7Locked/kima-hub/issues/35))

And all users who reported bugs, tested fixes, and provided feedback!

---

For detailed technical implementation notes, see [docs/PENDING_DEPLOY-2.md](docs/PENDING_DEPLOY-2.md).


## /CONTRIBUTING.md

# Contributing to Kima

First off, thanks for taking the time to contribute! 🎉

## Getting Started

1. Fork the repository
2. Clone your fork locally
3. Set up the development environment (see README.md)
4. Create a new branch from `main` for your changes

## Branch Strategy

All development happens on the `main` branch:

-   **All PRs should target `main`**
-   Every push to `main` triggers a nightly Docker build
-   Stable releases are created via version tags

## Making Contributions

### Bug Fixes

1. Check existing issues to see if the bug has been reported
2. If not, open a bug report issue first
3. Fork, branch, fix, and submit a PR referencing the issue

### Small Enhancements

1. Open a feature request issue to discuss first
2. Keep changes focused and minimal

### Large Features

Please open an issue to discuss before starting work.

## Code Style

### Frontend

The frontend uses ESLint. Before submitting a PR:

```bash
cd frontend
npm run lint
```

### Backend

Follow existing code patterns and TypeScript conventions.

## Pull Request Process

1. **Target the `main` branch**
2. Fill out the PR template completely
3. Ensure the Docker build check passes
4. Wait for review - we'll provide feedback or approve

## Questions?

Open a Discussion thread for questions that aren't bugs or feature requests.

Thanks for contributing!


## /Dockerfile

``` path="/Dockerfile" 
# Kima All-in-One Docker Image (Hardened)
# Contains: Backend, Frontend, PostgreSQL, Redis, Audio Analyzer (Essentia AI)
# Usage: docker run -d -p 3030:3030 -v /path/to/music:/music kima/kima

FROM node:22-slim

# Add PostgreSQL 16 repository (Debian Bookworm only has PG15 by default)
RUN apt-get update && apt-get install -y --no-install-recommends \
    gnupg lsb-release curl ca-certificates && \
    echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
    curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \
    apt-get update

# Install system dependencies including Python for audio analysis
RUN apt-get install -y --no-install-recommends \
    postgresql-16 \
    postgresql-contrib-16 \
    postgresql-16-pgvector \
    redis-server \
    supervisor \
    ffmpeg \
    tini \
    openssl \
    bash \
    gosu \
    # Python for audio analyzer
    python3 \
    python3-pip \
    python3-numpy \
    # Build tools (needed for some Python packages)
    build-essential \
    python3-dev \
    && rm -rf /var/lib/apt/lists/*

# Create directories
RUN mkdir -p /app/backend /app/frontend /app/audio-analyzer /app/models \
    /data/postgres /data/redis /run/postgresql /var/log/supervisor \
    && chown -R postgres:postgres /data/postgres /run/postgresql

# ============================================
# AUDIO ANALYZER SETUP (Essentia AI)
# ============================================
WORKDIR /app/audio-analyzer

# Core Python dependencies -- must succeed on all architectures (AMD64 + ARM64)
RUN pip3 install --no-cache-dir --break-system-packages \
    redis \
    psycopg2-binary \
    'pgvector>=0.2.0' \
    'python-dotenv>=1.0.0' \
    'requests>=2.31.0' \
    'bullmq==2.19.5' \
    'yt-dlp>=2024.12.0'

# ML dependencies -- torch/torchaudio for CLAP, tensorflow/essentia for MusiCNN
# CPU-only torch: install first via the CPU index so downstream packages
# (laion-clap, transformers) reuse the already-installed CPU wheels.
# tensorflow-cpu + essentia-tensorflow: no Linux ARM64 wheels exist upstream,
# so MusiCNN audio analysis is unavailable on ARM64. CLAP still works.
#
# Pins omit the +cpu local version suffix so the same spec resolves on both
# architectures. On amd64 the CPU index serves torch==2.5.1+cpu and PEP 440
# matches without the local tag; on arm64 the same index serves torch==2.5.1
# without any local tag -- the +cpu suffix only appears from torch 2.6.0+.
# All three packages must be pinned together so pip resolves a compatible
# set; unpinning torchaudio causes it to drift to newer versions with
# mismatched torch ABI (the cause of #165 in v1.7.9).
RUN pip3 install --no-cache-dir --break-system-packages \
    --index-url https://download.pytorch.org/whl/cpu \
    'torch==2.5.1' \
    'torchaudio==2.5.1' \
    'torchvision==0.20.1' \
    && pip3 install --no-cache-dir --break-system-packages \
    'laion-clap>=1.1.4' \
    'librosa>=0.10.0' \
    'transformers>=4.30.0'

# tensorflow-cpu + essentia-tensorflow (AMD64 only -- no ARM64 wheels upstream)
RUN pip3 install --no-cache-dir --break-system-packages \
    'tensorflow-cpu>=2.13.0,<2.14.0' \
    && pip3 install --no-cache-dir --break-system-packages --no-deps \
    essentia-tensorflow \
    || echo "[ARM64] tensorflow-cpu/essentia-tensorflow unavailable -- MusiCNN analysis disabled"

# Keep scipy/pandas aligned with tensorflow's numpy constraint in the shared Python env.
# Force exact wheel versions to avoid resolver drift leaving incompatible pandas/scipy.
RUN pip3 uninstall -y pandas scipy numpy || true \
    && pip3 install --no-cache-dir --break-system-packages --force-reinstall \
    'numpy==1.24.4' \
    'scipy==1.10.1' \
    'pandas==2.0.3'

# Fail fast during build if CLAP/Transformers dependency resolution regresses.
RUN python3 -c "import numpy, scipy, pandas, torch, torchaudio, laion_clap; from transformers import BertModel; print(f'CLAP deps OK: torch={torch.__version__} torchaudio={torchaudio.__version__} numpy={numpy.__version__} scipy={scipy.__version__} pandas={pandas.__version__}')"

# Cleanup
RUN pip cache purge \
    && find /usr -name "*.pyc" -delete \
    && find /usr -name "__pycache__" -type d -exec rm -rf {} + 2>/dev/null || true

# Download all ML models in a single layer (~800MB total)
# IMPORTANT: Using MusiCNN models to match analyzer.py expectations
RUN echo "Downloading ML models..." && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/autotagging/msd/msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_happy-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_happy/mood_happy-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_sad-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_sad/mood_sad-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_relaxed-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_relaxed/mood_relaxed-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_aggressive-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_aggressive/mood_aggressive-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_party-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_party/mood_party-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_acoustic-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_acoustic/mood_acoustic-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/mood_electronic-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/mood_electronic/mood_electronic-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/danceability-msd-musicnn-1.pb \
        "https://essentia.upf.edu/models/classification-heads/danceability/danceability-msd-musicnn-1.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/deam-msd-musicnn-2.pb \
        "https://essentia.upf.edu/models/classification-heads/deam/deam-msd-musicnn-2.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 300 -o /app/models/emomusic-msd-musicnn-2.pb \
        "https://essentia.upf.edu/models/classification-heads/emomusic/emomusic-msd-musicnn-2.pb" && \
    curl -L --retry 3 --retry-delay 5 --connect-timeout 30 --max-time 600 -o /tmp/clap_full.pt \
        "https://huggingface.co/lukewys/laion_clap/resolve/main/music_audioset_epoch_15_esc_90.14.pt" && \
    python3 -c "import torch; ckpt = torch.load('/tmp/clap_full.pt', map_location='cpu', weights_only=False); torch.save({'state_dict': ckpt['state_dict']}, '/app/models/music_audioset_epoch_15_esc_90.14.pt')" && \
    rm /tmp/clap_full.pt && \
    echo "All ML models downloaded" && \
    ls -lh /app/models/

# Copy audio analyzer scripts
COPY services/audio-analyzer/analyzer.py /app/audio-analyzer/

# ============================================
# CLAP ANALYZER SETUP (Vibe Similarity)
# ============================================
WORKDIR /app/audio-analyzer-clap

# Copy CLAP analyzer script
COPY services/audio-analyzer-clap/analyzer.py /app/audio-analyzer-clap/

# Create database readiness check script
RUN cat > /app/wait-for-db.sh << 'EOF'
#!/bin/bash
TIMEOUT=${1:-120}
COUNTER=0

echo "[wait-for-db] Waiting for Redis and database schema (timeout: ${TIMEOUT}s)..."

# Wait for Redis to finish loading
echo "[wait-for-db] Checking Redis readiness..."
REDIS_COUNTER=0
while [ $REDIS_COUNTER -lt $TIMEOUT ]; do
    if redis-cli -h localhost ping 2>/dev/null | grep -q PONG; then
        echo "[wait-for-db] ✓ Redis is ready!"
        break
    fi
    sleep 1
    REDIS_COUNTER=$((REDIS_COUNTER + 1))
done

if [ $REDIS_COUNTER -ge $TIMEOUT ]; then
    echo "[wait-for-db] ERROR: Redis not ready after ${TIMEOUT}s"
    exit 1
fi

# Quick check for schema ready flag
if [ -f /data/.schema_ready ]; then
    echo "[wait-for-db] Schema ready flag found, verifying connection..."
fi

while [ $COUNTER -lt $TIMEOUT ]; do
    if PGPASSWORD=kima psql -h localhost -U kima -d kima -c "SELECT 1 FROM \"Track\" LIMIT 1" > /dev/null 2>&1; then
        echo "[wait-for-db] ✓ Database is ready and schema exists!"
        exit 0
    fi
    
    if [ $((COUNTER % 15)) -eq 0 ]; then
        echo "[wait-for-db] Still waiting... (${COUNTER}s elapsed)"
    fi
    
    sleep 1
    COUNTER=$((COUNTER + 1))
done

echo "[wait-for-db] ERROR: Database schema not ready after ${TIMEOUT}s"
echo "[wait-for-db] Listing available tables:"
PGPASSWORD=kima psql -h localhost -U kima -d kima -c "\dt" 2>&1 || echo "Could not list tables"
exit 1
EOF

RUN chmod +x /app/wait-for-db.sh && \
    sed -i 's/\r$//' /app/wait-for-db.sh

# ============================================
# BACKEND BUILD
# ============================================
WORKDIR /app/backend

# Copy backend package files and install dependencies
COPY backend/package*.json ./
COPY backend/prisma ./prisma/
RUN echo "=== Migrations copied ===" && ls -la prisma/migrations/ && echo "=== End migrations ==="
RUN npm ci && npm cache clean --force
RUN npx prisma generate

# Copy backend source and build
COPY backend/src ./src
COPY backend/tsconfig.json ./
RUN npm run build && \
    npm prune --production && \
    rm -rf src tests __tests__ tsconfig*.json

COPY backend/docker-entrypoint.sh ./
COPY backend/healthcheck.js ./healthcheck-backend.js

# Create log directory (cache will be in /data volume)
RUN mkdir -p /app/backend/logs

# ============================================
# FRONTEND BUILD
# ============================================
WORKDIR /app/frontend

# Copy frontend package files and install dependencies
COPY frontend/package*.json ./
RUN npm ci && npm cache clean --force

# Copy frontend source and build
COPY frontend/ ./

# Build Next.js (production)
# MALLOC_ARENA_MAX=1 reduces mmap arena churn during build.
# Build needs 2GB for tsc; runtime stays at 512MB (set in supervisor config).
ENV NEXT_PUBLIC_BACKEND_URL=http://127.0.0.1:3006
RUN MALLOC_ARENA_MAX=1 NODE_OPTIONS="--max-old-space-size=2048" npm run build

# ============================================
# SECURITY HARDENING
# ============================================
# Remove dangerous tools and build dependencies AFTER all builds are complete
# Keep: bash (supervisor), gosu (postgres user switching), python3 (audio analyzer)
RUN apt-get purge -y --auto-remove build-essential python3-dev 2>/dev/null || true && \
    rm -f /usr/bin/wget /bin/wget 2>/dev/null || true && \
    rm -f /usr/bin/curl /bin/curl 2>/dev/null || true && \
    rm -f /usr/bin/nc /bin/nc /usr/bin/ncat /usr/bin/netcat 2>/dev/null || true && \
    rm -f /usr/bin/ftp /usr/bin/tftp /usr/bin/telnet 2>/dev/null || true && \
    rm -rf /var/lib/apt/lists/*

# ============================================
# CONFIGURATION
# ============================================
WORKDIR /app

# Copy healthcheck script
COPY healthcheck-prod.js /app/healthcheck.js

# Create supervisord config - logs to stdout/stderr for Docker visibility
RUN cat > /etc/supervisor/conf.d/kima.conf << 'EOF'
[supervisord]
nodaemon=true
logfile=/dev/null
logfile_maxbytes=0
pidfile=/var/run/supervisord.pid
user=root

[program:postgres]
command=/usr/lib/postgresql/16/bin/postgres -D /data/postgres
user=postgres
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
priority=10

[program:redis]
command=/usr/bin/redis-server --dir /data/redis --appendonly yes
user=redis
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
priority=20

[program:backend]
command=/bin/bash -c "/app/wait-for-db.sh 120 && cd /app/backend && node dist/index.js"
autostart=true
autorestart=true
startretries=3
startsecs=10
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
directory=/app/backend
priority=30

[program:frontend]
command=/bin/bash -c "sleep 10 && cd /app/frontend && npm start"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
environment=NODE_ENV="production",BACKEND_URL="http://localhost:3006",PORT="3030",MALLOC_ARENA_MAX="1",NODE_OPTIONS="--max-old-space-size=512"
priority=40

[program:audio-analyzer]
command=/bin/bash -c "/app/wait-for-db.sh 120 && cd /app/audio-analyzer && python3 analyzer.py"
autostart=true
autorestart=true
startretries=3
startsecs=10
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
environment=DATABASE_URL="postgresql://kima:kima@localhost:5432/kima",REDIS_URL="redis://localhost:6379",MUSIC_PATH="/music",BATCH_SIZE="10",SLEEP_INTERVAL="5",MAX_ANALYZE_SECONDS="90",BRPOP_TIMEOUT="5",MODEL_IDLE_TIMEOUT="300",NUM_WORKERS="2",THREADS_PER_WORKER="1"
priority=50

[program:audio-analyzer-clap]
command=/bin/bash -c "/app/wait-for-db.sh 120 && cd /app/audio-analyzer-clap && python3 analyzer.py"
autostart=true
autorestart=true
startretries=3
startsecs=30
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
environment=DATABASE_URL="postgresql://kima:kima@localhost:5432/kima",REDIS_URL="redis://localhost:6379",MUSIC_PATH="/music",BACKEND_URL="http://localhost:3006",SLEEP_INTERVAL="5",NUM_WORKERS="1",MODEL_IDLE_TIMEOUT="300",INTERNAL_API_SECRET="kima-internal-aio"
priority=60
EOF

# Fix Windows line endings in supervisor config
RUN sed -i 's/\r$//' /etc/supervisor/conf.d/kima.conf

# Create startup script with root check
RUN cat > /app/start.sh << 'EOF'
#!/bin/bash
set -e

# Security check: Warn if running internal services as root
# Note: This container runs multiple services, some require root for initial setup
# but individual services (postgres, backend processes) run as non-root users

echo ""
echo "============================================================"
echo "  Kima - Premium Self-Hosted Music Server"
echo ""
echo "  Features:"
echo "    - AI-Powered Vibe Matching (Essentia ML)"
echo "    - Smart Playlists & Mood Detection"
echo "    - High-Quality Audio Streaming"
echo ""
echo "  Security:"
echo "    - Hardened container (no wget/curl/nc)"
echo "    - Auto-generated encryption keys"
echo "============================================================"
echo ""

# Find PostgreSQL binaries (version may vary)
PG_BIN=$(find /usr/lib/postgresql -name "bin" -type d | head -1)
if [ -z "$PG_BIN" ]; then
    echo "ERROR: PostgreSQL binaries not found!"
    exit 1
fi
echo "Using PostgreSQL from: $PG_BIN"

# Prepare data directories (bind-mount safe)
echo "Preparing data directories..."
mkdir -p /data/postgres /data/redis /run/postgresql

if id postgres >/dev/null 2>&1; then
    chown -R postgres:postgres /data/postgres /run/postgresql 2>/dev/null || true
    chmod 700 /data/postgres 2>/dev/null || true
    if ! gosu postgres test -w /data/postgres; then
        POSTGRES_UID=$(id -u postgres)
        POSTGRES_GID=$(id -g postgres)
        echo "ERROR: /data/postgres is not writable by postgres (${POSTGRES_UID}:${POSTGRES_GID})."
        echo "If you bind-mount /data, ensure the host path is writable by that UID/GID."
        exit 1
    fi
fi

if id redis >/dev/null 2>&1; then
    chown -R redis:redis /data/redis 2>/dev/null || true
    chmod 700 /data/redis 2>/dev/null || true
    if ! gosu redis test -w /data/redis; then
        REDIS_UID=$(id -u redis)
        REDIS_GID=$(id -g redis)
        echo "ERROR: /data/redis is not writable by redis (${REDIS_UID}:${REDIS_GID})."
        echo "If you bind-mount /data, ensure the host path is writable by that UID/GID."
        exit 1
    fi
fi

# Clean up stale PID file if exists
rm -f /data/postgres/postmaster.pid 2>/dev/null || true

# Initialize PostgreSQL if not already done
if [ ! -f /data/postgres/PG_VERSION ]; then
    echo "Initializing PostgreSQL database..."
    gosu postgres $PG_BIN/initdb -D /data/postgres

    # Configure PostgreSQL
    echo "host all all 0.0.0.0/0 md5" >> /data/postgres/pg_hba.conf
    echo "listen_addresses='*'" >> /data/postgres/postgresql.conf
fi

# Start PostgreSQL temporarily to create database and user
gosu postgres $PG_BIN/pg_ctl -D /data/postgres -w start

# Migrate from Lidify -> Kima: rename old database and user if they exist
if gosu postgres psql -tc "SELECT 1 FROM pg_database WHERE datname = 'lidify'" | grep -q 1; then
    echo "Found legacy 'lidify' database, migrating to 'kima'..."
    # Terminate any connections to the old database
    gosu postgres psql -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'lidify' AND pid <> pg_backend_pid();" 2>/dev/null || true
    # Rename the database
    gosu postgres psql -c "ALTER DATABASE lidify RENAME TO kima;"
    echo "✓ Database renamed: lidify -> kima"
    # Rename the user if it exists
    if gosu postgres psql -tc "SELECT 1 FROM pg_roles WHERE rolname = 'lidify'" | grep -q 1; then
        gosu postgres psql -c "ALTER USER lidify RENAME TO kima;"
        gosu postgres psql -c "ALTER USER kima WITH PASSWORD 'kima';"
        echo "✓ User renamed: lidify -> kima"
    fi
fi

# Create user and database if they don't exist (fresh install)
gosu postgres psql -tc "SELECT 1 FROM pg_roles WHERE rolname = 'kima'" | grep -q 1 || \
    gosu postgres psql -c "CREATE USER kima WITH PASSWORD 'kima';"
gosu postgres psql -tc "SELECT 1 FROM pg_database WHERE datname = 'kima'" | grep -q 1 || \
    gosu postgres psql -c "CREATE DATABASE kima OWNER kima;"

# Create pgvector extension as superuser (required before migrations)
echo "Creating pgvector extension..."
gosu postgres psql -d kima -c "CREATE EXTENSION IF NOT EXISTS vector;"

# Run Prisma migrations
cd /app/backend
export DATABASE_URL="postgresql://kima:kima@localhost:5432/kima"
echo "Running Prisma migrations..."
ls -la prisma/migrations/ || echo "No migrations directory!"

# Check if _prisma_migrations table exists (indicates previous Prisma setup)
MIGRATIONS_EXIST=$(gosu postgres psql -d kima -tAc "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '_prisma_migrations')" 2>/dev/null || echo "f")

# Check if User table exists (indicates existing data)
USER_TABLE_EXIST=$(gosu postgres psql -d kima -tAc "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'User')" 2>/dev/null || echo "f")

# Handle rename migration for existing databases
echo "Checking if rename migration needs to be marked as applied..."
if gosu postgres psql -d kima -tAc "SELECT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name='SystemSettings' AND column_name='soulseekFallback');" 2>/dev/null | grep -q 't'; then
    echo "Old column exists, marking migration as applied..."
    gosu postgres psql -d kima -c "INSERT INTO \"_prisma_migrations\" (id, checksum, finished_at, migration_name, logs, rolled_back_at, started_at, applied_steps_count) VALUES (gen_random_uuid(), '', NOW(), '20250101000000_rename_soulseek_fallback', '', NULL, NOW(), 1) ON CONFLICT DO NOTHING;" 2>/dev/null || true
fi

if [ "$MIGRATIONS_EXIST" = "t" ]; then
    # Normal migration flow - migrations table exists
    echo "Migration history found, running migrate deploy..."
    if ! npx prisma migrate deploy 2>&1; then
        echo "FATAL: Database migration failed! Check logs above."
        exit 1
    fi
elif [ "$USER_TABLE_EXIST" = "t" ]; then
    # Database has data but no migrations table - needs baseline
    echo "Existing database detected without migration history."
    echo "Creating baseline from current schema..."
    # Mark the init migration as already applied (baseline)
    npx prisma migrate resolve --applied 20241130000000_init 2>&1 || true
    # Now run any subsequent migrations
    if ! npx prisma migrate deploy 2>&1; then
        echo "FATAL: Migration after baseline failed!"
        exit 1
    fi
else
    # Fresh database - run migrations normally
    echo "Fresh database detected, running initial migrations..."
    if ! npx prisma migrate deploy 2>&1; then
        echo "FATAL: Initial migration failed. Check database connection and schema."
        exit 1
    fi
fi
echo "✓ Migrations completed successfully"

# Verify schema exists before starting services
echo "Verifying database schema..."
if ! gosu postgres psql -d kima -c "SELECT 1 FROM \"Track\" LIMIT 1" >/dev/null 2>&1; then
    echo "FATAL: Track table does not exist after migration!"
    echo "Database schema verification failed. Container will exit."
    exit 1
fi
echo "✓ Schema verification passed"

# Create flag file for wait-for-db.sh
touch /data/.schema_ready
echo "✓ Schema ready flag created"

# Stop PostgreSQL (supervisord will start it)
gosu postgres $PG_BIN/pg_ctl -D /data/postgres -w stop

# Create persistent cache directories in /data volume
mkdir -p /data/cache/covers /data/cache/transcodes /data/secrets

# Load or generate persistent secrets
if [ -f /data/secrets/session_secret ]; then
    SESSION_SECRET=$(cat /data/secrets/session_secret)
    echo "Loaded existing SESSION_SECRET"
else
    SESSION_SECRET=$(openssl rand -hex 32)
    echo "$SESSION_SECRET" > /data/secrets/session_secret
    chmod 600 /data/secrets/session_secret
    echo "Generated and saved new SESSION_SECRET"
fi

if [ -f /data/secrets/encryption_key ]; then
    SETTINGS_ENCRYPTION_KEY=$(cat /data/secrets/encryption_key)
    echo "Loaded existing SETTINGS_ENCRYPTION_KEY"
else
    SETTINGS_ENCRYPTION_KEY=$(openssl rand -hex 32)
    echo "$SETTINGS_ENCRYPTION_KEY" > /data/secrets/encryption_key
    chmod 600 /data/secrets/encryption_key
    echo "Generated and saved new SETTINGS_ENCRYPTION_KEY"
fi

# Write environment file for backend
cat > /app/backend/.env << ENVEOF
NODE_ENV=production
DATABASE_URL=postgresql://kima:kima@localhost:5432/kima
REDIS_URL=redis://localhost:6379
PORT=3006
MUSIC_PATH=/music
TRANSCODE_CACHE_PATH=/data/cache/transcodes
SESSION_SECRET=$SESSION_SECRET
SETTINGS_ENCRYPTION_KEY=$SETTINGS_ENCRYPTION_KEY
INTERNAL_API_SECRET=kima-internal-aio
DISABLE_CLAP=${DISABLE_CLAP:-}
ENVEOF

# Optionally disable CLAP audio analyzer (for low-memory deployments)
if [ "${DISABLE_CLAP:-false}" = "true" ] || [ "${DISABLE_CLAP:-0}" = "1" ]; then
    python3 -c "
import re
conf = open('/etc/supervisor/conf.d/kima.conf').read()
conf = re.sub(
    r'(\[program:audio-analyzer-clap\][^\[]*autostart=)true',
    r'\g<1>false',
    conf,
    flags=re.DOTALL
)
open('/etc/supervisor/conf.d/kima.conf', 'w').write(conf)
"
    echo "CLAP audio analyzer disabled (DISABLE_CLAP=${DISABLE_CLAP})"
fi

echo "Starting Kima..."
exec env \
    NODE_ENV=production \
    DATABASE_URL="postgresql://kima:kima@localhost:5432/kima" \
    SESSION_SECRET="$SESSION_SECRET" \
    SETTINGS_ENCRYPTION_KEY="$SETTINGS_ENCRYPTION_KEY" \
    /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
EOF

# Fix Windows line endings (CRLF -> LF) and make executable
RUN sed -i 's/\r$//' /app/start.sh && chmod +x /app/start.sh

# Expose ports
EXPOSE 3030

# Health check using Node.js (no wget)
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
    CMD ["node", "/app/healthcheck.js"]

# Volumes
VOLUME ["/music", "/data"]

# Use tini for proper signal handling
ENTRYPOINT ["/usr/bin/tini", "--"]
CMD ["/app/start.sh"]

```

## /README.md

# kima-hub

[![Docker Image](https://img.shields.io/docker/v/chevron7locked/kima?label=Docker&sort=semver)](https://hub.docker.com/r/chevron7locked/kima)
[![GitHub Release](https://img.shields.io/github/v/release/Chevron7Locked/kima-hub?label=Release)](https://github.com/Chevron7Locked/kima-hub/releases)
[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)

A self-hosted, on-demand audio streaming platform that brings the Spotify experience to your personal music library.

Kima is built for music lovers who want the convenience of streaming services without sacrificing ownership of their library. Point it at your music collection, and Kima handles the rest: artist discovery, personalized playlists, podcast subscriptions, and seamless integration with tools you already use like Lidarr and Audiobookshelf.

![Kima Home Screen](assets/screenshots/desktop-home.png)

---

## A Note on Native Apps

Once the core experience is solid and properly tested, a native mobile app (likely React Native) is on the roadmap. The PWA works great for most cases for now.

Thanks for your patience while I work through this.

---

## Table of Contents

-   [Features](#features)
    -   [The Vibe System](#the-vibe-system)
    -   [Playlist Import](#playlist-import)
-   [Mobile Support](#mobile-support)
-   [Quick Start](#quick-start)
-   [Configuration](#configuration)
-   [CLAP Audio Analysis](#clap-audio-analysis)
-   [GPU Acceleration](#gpu-acceleration)
-   [Integrations](#integrations)
    -   [Native Apps (Subsonic)](#native-apps-subsonic)
-   [Using Kima](#using-kima)
    -   [Using the Vibe System](#using-the-vibe-system)
-   [Administration](#administration)
-   [Architecture](#architecture)
-   [Roadmap](#roadmap)
-   [License](#license)
-   [Acknowledgments](#acknowledgments)

---

## Features

### Your Music, Your Way

-   **Stream your library** - FLAC, MP3, AAC, OGG, and other common formats work out of the box
-   **Automatic cataloging** - Kima scans your library and enriches it with metadata from MusicBrainz and Last.fm, including ISRC codes and genre tags
-   **Audio transcoding** - Stream at original quality or transcode on-the-fly (320kbps, 192kbps, or 128kbps)
-   **Lyrics** - Displays embedded lyrics or fetches them automatically from LRCLIB. Timed lyrics get line-by-line sync during playback; untimed lyrics display as static text. Coverage is good for major artists but varies for niche or independent music\*
-   **Ultra-wide support** - Library grid scales up to 8 columns on large displays

<p align="center">
  <img src="assets/screenshots/desktop-library.png" alt="Library View" width="800">
</p>

### Discovery and Playlists

-   **Made For You mixes** - Programmatically generated playlists based on your library:
    -   Era mixes (Your 90s, Your 2000s, etc.)
    -   Genre mixes
    -   Top tracks
    -   Rediscover forgotten favorites
    -   Similar artist recommendations
-   **Library Radio Stations** - One-click radio modes for instant listening:
    -   Shuffle All (your entire library)
    -   Workout (high energy tracks)
    -   Discovery (lesser-played gems)
    -   Favorites (most played)
    -   Dynamic genre and decade stations generated from your library
-   **Discover Weekly** - Weekly playlists of new music tailored to your listening habits (requires Lidarr)
-   **Artist recommendations** - Find similar artists based on what you already love
-   **Artist name resolution** - Smart alias lookup via Last.fm (e.g., "of mice" → "Of Mice & Men")
-   **Discography sorting** - Sort artist albums by year or date added
-   **Deezer previews** - Preview tracks you don't own before adding them to your library
-   **Vibe matching** - Find tracks that match your current mood (see [The Vibe System](#the-vibe-system))

### Podcasts

-   **Subscribe via RSS** - Search iTunes for podcasts and subscribe directly
-   **Track progress** - Pick up where you left off across devices
-   **Episode management** - Browse episodes, mark as played, and manage your subscriptions
-   **Mobile skip buttons** - Jump ±30 seconds on mobile for easy navigation

<p align="center">
  <img src="assets/screenshots/desktop-podcasts.png" alt="Podcasts" width="800">
</p>

### Audiobooks

-   **Audiobookshelf integration** - Connect your existing Audiobookshelf instance
-   **Unified experience** - Browse and listen to audiobooks alongside your music
-   **Progress sync** - Your listening position syncs with Audiobookshelf
-   **Mobile skip buttons** - Jump ±30 seconds on mobile for easy chapter navigation

<p align="center">
  <img src="assets/screenshots/desktop-audiobooks.png" alt="Audiobooks" width="800">
</p>

### The Vibe System

The centerpiece of music discovery in Kima. Your entire library is analyzed by a CLAP neural network and projected into a 2D/3D space where similar-sounding tracks cluster together. The result is a living map of your music collection you can explore, search, and navigate.

**Music Map** -- the default 2D view. Every track in your library is a point on the map, colored by mood cluster. Zoom and pan to explore. Click any track to inspect it; double-click to play it immediately.

<p align="center">
  <img src="assets/screenshots/vibe-map.png" alt="Vibe Music Map" width="800">
</p>

**Galaxy View** -- the same data rendered as a 3D star field. Orbit, zoom, and fly through your library. Switch between Map and Galaxy with the toggle in the top-left corner.

<p align="center">
  <img src="assets/screenshots/vibe-galaxy.png" alt="Vibe Galaxy" width="800">
</p>

**Drift** -- pick any two tracks as start and end points and Kima plots a smooth path through the audio space between them. The resulting queue travels gradually from one sonic neighborhood to the other.

<p align="center">
  <img src="assets/screenshots/vibe-drift.png" alt="Vibe Drift -- Song Path" width="800">
</p>

**Blend** -- add multiple tracks and let Kima find the centroid in audio space. The result is a queue of tracks that blend all of the inputs together into something new.

<p align="center">
  <img src="assets/screenshots/vibe-blend.png" alt="Vibe Blend" width="800">
</p>

**Additional features:**

-   **Text search** - Type any descriptor ("loud and fast", "rainy day piano") to highlight matching tracks on the map
-   **Right-click context menu** - Vibe from any track (similar-track queue), find similar (highlight on map), or start a Drift
-   **Labels** - Toggle track/artist labels on the map
-   **Keep The Vibe Going** - From the player, activate vibe mode to continuously queue tracks that match what's playing

**Mood Mixer** -- pick a mood preset (Happy, Energetic, Chill, Focus, Party, Acoustic, Melancholy, Sad, Aggressive) to instantly generate a playlist calibrated to that sound. Moods are derived from audio analysis of your actual library, not genre tags.

<p align="center">
  <img src="assets/screenshots/mood-mixer.png" alt="Mood Mixer" width="800">
</p>

### Playlist Import

Import playlists from Spotify, Deezer, and YouTube, or browse and discover new music directly.

-   **Spotify Import** - Paste any Spotify playlist URL to import tracks
-   **Deezer Import** - Same functionality for Deezer playlists
-   **YouTube Import** - Import from YouTube and YouTube Music playlists
-   **ISRC Matching** - Deterministic track matching via International Standard Recording Codes before falling back to fuzzy text matching
-   **Smart Preview** - See which tracks are already in your library, which albums can be downloaded, and which have no matches
-   **Selective Download** - Choose exactly which albums to add to your library
-   **Browse Deezer** - Explore Deezer's featured playlists and radio stations directly in-app

<p align="center">
  <img src="assets/screenshots/deezer-browse.png" alt="Browse Deezer" width="800">
</p>
<p align="center">
  <img src="assets/screenshots/spotify-import-preview.png" alt="Import Preview" width="800">
</p>

### Native Apps

-   **OpenSubsonic API** - Use any Subsonic-compatible client (Symfonium, DSub, Ultrasonic, etc.) to stream your Kima library
-   **Standard Subsonic auth** - MD5 token auth supported; enter your API token as the password -- works with Amperfy, Symfonium, DSub, and any standard Subsonic client
-   **Per-client tokens** - Generate named API tokens in Settings > Native Apps; revoke them individually when a device is lost or replaced
-   **Enrichment-aware** - Genres and artist biographies exposed to clients come from Last.fm enrichment, not just file tags
-   **Lyrics, bookmarks, and play queue** - getLyrics, bookmarks, and savePlayQueue/getPlayQueue for cross-device resume

### Multi-User Support

-   **Separate accounts** - Each user gets their own playlists, listening history, and preferences
-   **Admin controls** - Manage users and system settings from the web interface
-   **Two-factor authentication** - Secure accounts with TOTP-based 2FA

### Custom Playlists

-   **Create and curate** - Build your own playlists from your library
-   **Share with others** - Make playlists public for other users on your instance
-   **Save mixes** - Convert any auto-generated mix into a permanent playlist

### Mobile and TV

-   **Progressive Web App (PWA)** - Install Kima on your phone or tablet for a native-like experience
-   **Android TV** - Fully optimized 10-foot interface with D-pad/remote navigation
-   **Responsive Web** - Works on any device with a modern browser

<p align="center">
  <img src="assets/screenshots/mobile-home.png" alt="Mobile Home" width="280">
  <img src="assets/screenshots/mobile-player.png" alt="Mobile Player" width="280">
  <img src="assets/screenshots/mobile-library.png" alt="Mobile Library" width="280">
</p>

---

## Mobile Support

### Progressive Web App (PWA)

Kima works as a PWA on mobile devices, giving you a native app-like experience without needing to download from an app store.

**To install on Android:**

1. Open your Kima server in Chrome
2. Tap the menu (⋮)
3. Select "Add to Home Screen" or "Install app"

**To install on iOS:**

1. Open your Kima server in Safari
2. Tap the Share button
3. Select "Add to Home Screen"

**PWA Features:**

-   Full streaming functionality
-   Background audio playback
-   Lock screen and notification media controls (iOS Control Center and Android notifications)
-   Offline caching for faster loads
-   Installable icon on home screen

### Android TV

Kima includes a dedicated interface optimized for television displays:

-   Large artwork and readable text from across the room
-   Full D-pad and remote navigation support
-   Persistent Now Playing bar for quick access to playback controls
-   Simplified navigation focused on browsing and playback

The TV interface is automatically enabled when accessing Kima from an Android TV device's browser.

---

## Quick Start

### One Command Install

```bash
docker run -d \
  --name kima \
  -p 3030:3030 \
  -v /path/to/your/music:/music \
  -v kima_data:/data \
  chevron7locked/kima:latest
```

That's it! Open http://localhost:3030 and create your account.

**With GPU acceleration** (requires [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)):

```bash
docker run -d \
  --name kima \
  --gpus all \
  -p 3030:3030 \
  -v /path/to/your/music:/music \
  -v kima_data:/data \
  chevron7locked/kima:latest
```

### What's Included

The Kima container includes everything you need:

-   **Web Interface** (port 3030)
-   **API Server** (internal)
-   **PostgreSQL Database** (internal)
-   **Redis Cache** (internal)

### Configuration Options

```bash
docker run -d \
  --name kima \
  -p 3030:3030 \
  -v /path/to/your/music:/music \
  -v kima_data:/data \
  -e SESSION_SECRET=your-secret-key \
  -e TZ=America/New_York \
  --add-host=host.docker.internal:host-gateway \
  chevron7locked/kima:latest
```

| Variable         | Description            | Default        |
| ---------------- | ---------------------- | -------------- |
| `SESSION_SECRET` | Session encryption key | Auto-generated |
| `TZ`             | Timezone               | UTC            |

### Using Docker Compose

Create a `docker-compose.yml` file:

```yaml
services:
    kima:
        image: chevron7locked/kima:latest
        container_name: kima
        ports:
            - "3030:3030"
        volumes:
            - /path/to/your/music:/music
            - kima_data:/data
        environment:
            - TZ=America/New_York
        # Required for Lidarr webhook integration on Linux
        extra_hosts:
            - "host.docker.internal:host-gateway"
        restart: unless-stopped

volumes:
    kima_data:
```

Then run:

```bash
docker compose up -d
```

**Updating with Docker Compose:**

```bash
docker compose pull
docker compose up -d
```

### Bind-mounting `/data` on Linux

Named volumes are recommended. If you bind-mount `/data`, make sure required subdirectories exist and are writable by the container service users.

```bash
mkdir -p /path/to/kima-data/postgres /path/to/kima-data/redis
```

If startup logs report a permission error, `chown` the host path to the UID/GID shown in the logs (for example, the postgres user).

---

Kima will begin scanning your music library automatically. Depending on the size of your collection, this may take a few minutes to several hours.

---

## Release Channels

Kima offers two release channels to match your stability preferences:

### 🟢 Stable (Recommended)

Production-ready releases. Updated when new stable versions are released.

```bash
docker pull chevron7locked/kima:latest
# or specific version
docker pull chevron7locked/kima:v1.7.3
```

### 🔴 Nightly (Development)

Latest development build. Built on every push to main.

⚠️ **Not recommended for production** - may be unstable or broken.

```bash
docker pull chevron7locked/kima:nightly
```

**For contributors:** See [`CONTRIBUTING.md`](CONTRIBUTING.md) for information on submitting pull requests and contributing to Kima.

---

## Configuration

### Environment Variables

The unified Kima container handles most configuration automatically. Here are the available options:

| Variable                            | Default                            | Description                                                                 |
| ----------------------------------- | ---------------------------------- | --------------------------------------------------------------------------- |
| `SESSION_SECRET`                    | Auto-generated                     | Session encryption key (recommended to set for persistence across restarts) |
| `SETTINGS_ENCRYPTION_KEY`           | Required                           | Encryption key for stored credentials (generate with `openssl rand -base64 32`) |
| `TZ`                                | `UTC`                              | Timezone for the container                                                  |
| `PORT`                              | `3030`                             | Port to access Kima                                                       |
| `KIMA_CALLBACK_URL`               | `http://host.docker.internal:3030` | URL for Lidarr webhook callbacks (see [Lidarr integration](#lidarr))        |
| `AUDIO_ANALYSIS_WORKERS`            | `2`                                | Number of parallel workers for audio analysis (1-8)                         |
| `AUDIO_ANALYSIS_THREADS_PER_WORKER` | `1`                                | Threads per worker for TensorFlow/FFT operations (1-4)                      |
| `AUDIO_ANALYSIS_BATCH_SIZE`         | `10`                               | Tracks per analysis batch                                                   |
| `AUDIO_BRPOP_TIMEOUT`              | `30`                               | Redis blocking wait timeout in seconds (also controls DB reconciliation)     |
| `AUDIO_MODEL_IDLE_TIMEOUT`         | `300`                              | Seconds before unloading idle ML models to free memory (0 = never unload)    |
| `LOG_LEVEL`                         | `warn` (prod) / `debug` (dev)      | Logging verbosity: debug, info, warn, error, silent                         |
| `DOCS_PUBLIC`                       | `false`                            | Set to `true` to allow public access to API docs in production              |

The music library path is configured via Docker volume mount (`-v /path/to/music:/music`).

#### External Access

If you're accessing Kima from outside your local network (via reverse proxy, for example), set the API URL:

```env
NEXT_PUBLIC_API_URL=https://kima-api.yourdomain.com
```

And add your domain to the allowed origins:

```env
ALLOWED_ORIGINS=http://localhost:3030,https://kima.yourdomain.com
```

---

## Security Considerations

### Environment Variables

Kima uses several sensitive environment variables. Never commit your `.env` file.

| Variable                  | Purpose                        | Required          |
| ------------------------- | ------------------------------ | ----------------- |
| `SESSION_SECRET`          | Session encryption (32+ chars) | Yes               |
| `SETTINGS_ENCRYPTION_KEY` | Encrypts stored credentials    | Yes               |
| `SOULSEEK_USERNAME`       | Soulseek login                 | If using Soulseek |
| `SOULSEEK_PASSWORD`       | Soulseek password              | If using Soulseek |
| `LIDARR_API_KEY`          | Lidarr integration             | If using Lidarr   |
| `OPENAI_API_KEY`          | AI features                    | Optional          |
| `LASTFM_API_KEY`          | Artist recommendations         | Optional          |
| `FANART_API_KEY`          | Artist images                  | Optional          |

### Authentication & Session Security

-   **JWT tokens** - Access tokens expire after 24 hours; refresh tokens after 30 days
-   **Token refresh** - Automatic token refresh via `/api/auth/refresh` endpoint
-   **Password changes** - Changing your password invalidates all existing sessions
-   **Session cookies** - Secured with `httpOnly`, `sameSite=strict`, and `secure` (in production)
-   **Encryption validation** - Encryption key is validated on startup to prevent insecure defaults

### Webhook Security

-   **Lidarr webhooks** - Support signature verification with configurable secret
-   Configure the webhook secret in Settings → Lidarr for additional security

### Admin Dashboard Security

-   **Bull Board** - Job queue dashboard at `/admin/queues` requires authenticated admin user
-   **API Documentation** - Swagger docs at `/api-docs` require authentication in production (unless `DOCS_PUBLIC=true`)

### VPN Configuration (Optional)

If using Mullvad VPN for Soulseek:

-   Place WireGuard config in `backend/mullvad/` (gitignored)
-   Never commit VPN credentials or private keys
-   The `*.conf` and `key.txt` patterns are already in .gitignore

### Generating Secrets

```bash
# Generate a secure session secret
openssl rand -base64 32

# Generate encryption key
openssl rand -base64 32
```

### Network Security

-   Kima is designed for self-hosted LAN use
-   For external access, use a reverse proxy with HTTPS
-   Configure `ALLOWED_ORIGINS` for your domain

---

## CLAP Audio Analysis

The CLAP (Contrastive Language-Audio Pretraining) service generates embeddings for audio similarity search, powering the Vibe button's track matching feature.

### Requirements

-   PostgreSQL with pgvector extension (included in `pgvector/pgvector:pg16` image)
-   2-4GB RAM per worker
-   CLAP model downloads automatically on first build (~700MB)

### Configuration

Environment variables in docker-compose.yml:

| Variable                  | Default | Description                                |
| ------------------------- | ------- | ------------------------------------------ |
| `CLAP_WORKERS`            | `2`     | Number of analysis workers (1-8)           |
| `CLAP_THREADS_PER_WORKER` | `1`     | CPU threads per worker (1-4)               |
| `CLAP_SLEEP_INTERVAL`     | `5`     | Queue poll interval in seconds             |

### Usage

The CLAP analyzer runs automatically alongside the main audio analyzer. The vibe button uses CLAP embeddings for finding similar tracks. Text-based vibe search is available at `/api/vibe/search`.

### API Endpoints

| Endpoint                       | Method | Description                                |
| ------------------------------ | ------ | ------------------------------------------ |
| `/api/vibe/similar/:trackId`   | GET    | Get tracks similar to the given track      |
| `/api/vibe/search`             | POST   | Search tracks by text description          |
| `/api/vibe/status`             | GET    | Get embedding progress                     |

---

## GPU Acceleration

GPU acceleration speeds up audio analysis (mood detection, BPM extraction, vibe embeddings). It is **optional** -- everything works on CPU, just slower.

### Requirements

-   NVIDIA GPU with CUDA support
-   NVIDIA drivers installed on the host (`nvidia-smi` should work)
-   [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) -- bridges Docker to your GPU

### Install NVIDIA Container Toolkit

The toolkit is required for any Docker container to access the GPU. Install it once:

**Fedora / Nobara / RHEL:**
```bash
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo && sudo dnf install -y nvidia-container-toolkit && sudo nvidia-ctk runtime configure --runtime=docker && sudo systemctl restart docker
```

**Ubuntu / Debian:**
```bash
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list && sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit && sudo nvidia-ctk runtime configure --runtime=docker && sudo systemctl restart docker
```

### Verify Host Setup

```bash
# Check NVIDIA driver
nvidia-smi

# Check container toolkit
nvidia-container-runtime --version
```

### Enable GPU

**All-in-One container:**
```bash
docker run -d --gpus all -p 3030:3030 -v /path/to/music:/music -v kima_data:/data chevron7locked/kima:latest
```

**Docker Compose:**

Uncomment the `devices` block under `audio-analyzer` (and optionally `audio-analyzer-clap`) in `docker-compose.yml`:

```yaml
reservations:
    memory: 2G
    devices:
        - driver: nvidia
          count: 1
          capabilities: [gpu]
```

Then restart: `docker compose up -d`

### Verify GPU Detection

```bash
# MusiCNN analyzer
docker logs kima_audio_analyzer 2>&1 | grep -i gpu

# CLAP analyzer
docker logs kima_audio_analyzer_clap 2>&1 | grep -i gpu
```

Expected: `TensorFlow GPU detected: ...` or `CUDA available: True`

If you see `TensorFlow running on CPU`, GPU passthrough is not active.

---

## Integrations

Kima works beautifully on its own, but it becomes even more powerful when connected to other services.

### Lidarr

Connect Kima to your Lidarr instance to request and download new music directly from the app.

**What you get:**

-   Browse artists and albums you don't own
-                 Request downloads with a single click
-   Discover Weekly playlists that automatically download new recommendations
-   Automatic library sync when Lidarr finishes importing

**Setup:**

1. Go to Settings in Kima
2. Navigate to the Lidarr section
3. Enter your Lidarr URL (e.g., `http://localhost:8686`)
4. Enter your Lidarr API key (found in Lidarr under Settings > General)
5. Test the connection and save

Kima will automatically configure a webhook in Lidarr to receive notifications when new music is imported.

**Networking Note:**

The webhook requires Lidarr to be able to reach Kima. By default, Kima uses `host.docker.internal:3030` which works automatically when using the provided docker-compose files (they include `extra_hosts` to enable this on Linux).

If you're using **custom Docker networks** with static IPs, set the callback URL so Lidarr knows how to reach Kima:

```yaml
environment:
    - KIMA_CALLBACK_URL=http://YOUR_KIMA_IP:3030
```

Use the IP address that Lidarr can reach. If both containers are on the same Docker network, use Kima's container IP.

### Audiobookshelf

Connect to your Audiobookshelf instance to browse and listen to audiobooks within Kima.

**What you get:**

-   Browse your audiobook library
-   Stream audiobooks directly in Kima
-   Progress syncs between Kima and Audiobookshelf

**Setup:**

1. Go to Settings in Kima
2. Navigate to the Audiobookshelf section
3. Enter your Audiobookshelf URL (e.g., `http://localhost:13378`)
4. Enter your API key (found in Audiobookshelf under Settings > Users > your user > API Token)
5. Test the connection and save

### Soulseek

Kima includes built-in Soulseek support for finding rare tracks and one-offs that aren't available through traditional download sources like Lidarr.

[Soulseek](https://www.slsknet.org/) is a peer-to-peer file sharing network focused on music. Users share their music libraries and can browse/download from each other. Kima connects directly to the Soulseek network -- no additional software (like slskd) is required.

**Setup:**

1. Go to Settings in Kima
2. Navigate to the Soulseek section
3. Enter your Soulseek username and password (create an account at [slsknet.org](https://www.slsknet.org/) if you don't have one)
4. Save your settings

**How Search Works:**

When you search for music in Kima's Discovery tab, Soulseek results appear alongside Last.fm and Deezer results. Each result shows the filename, file size, bitrate, and format (FLAC/MP3). Metadata like artist and album is parsed from the file path structure (typically `Artist/Album/01 - Track.flac`).

**How Download Works:**

1. Click the download button on a Soulseek search result
2. Kima searches the Soulseek network for the best match (preferring FLAC, high bitrate)
3. The file is downloaded directly to your music library path
4. A library scan is triggered to import the new file
5. Metadata enrichment runs automatically (artist info, mood tags, audio analysis)

You can also configure Soulseek as a download source for playlist imports. In Settings > Downloads, set Soulseek as primary or fallback source. When importing a Spotify/Deezer playlist, tracks not found in your library will be searched and downloaded from Soulseek automatically.

**Download progress** is visible in the Activity Panel (bell icon in the top bar).

**Limitations:**

- Download speed depends on the sharing user's connection and availability
- Not all tracks will have results -- Soulseek coverage varies by genre and popularity
- Some users may have slow connections or go offline during transfers
- Kima retries with alternative users if a download fails or times out

### Native Apps (Subsonic)

Kima implements the [OpenSubsonic](https://opensubsonic.netlify.app/) REST API, making it compatible with any Subsonic client.

**Tested clients:** Amperfy (iOS), Symfonium, DSub, Ultrasonic

**Setup:**

1. Go to Settings > Native Apps in Kima
2. Enter a client name (e.g. "Amperfy on iPhone") and click **Generate Token**
3. Copy and save the token -- it is only shown once
4. In your client app, configure:
   - **Server URL** -- your Kima server address (e.g. `http://192.168.1.10:3030`)
   - **Username** -- your Kima username
   - **Password** -- the token you just generated

**Notes:**

- Standard MD5 token auth is supported -- clients that hash their password automatically will work correctly when you enter an API token as the password
- Each client should have its own token so you can revoke access per device
- Genres and biographies surfaced to clients come from Last.fm enrichment, not just file tags
- DISCOVER-location albums are excluded from all library views
- OpenSubsonic extensions exposed: `apiKeyAuthentication`, `songLyrics`, `indexBasedQueue`, and `getPodcastEpisode`
- Additional OpenSubsonic endpoints supported: `tokenInfo`, `startScan`, `getScanStatus`, `search`, `search2`, `search3`, `getUser`, `getUsers`, `createUser`, `updateUser`, `deleteUser`, `changePassword`, `getPlaylists`, `getPlaylist`, `createPlaylist`, `updatePlaylist`, `deletePlaylist`, `setRating`, `getPlayQueue`, `getPlayQueueByIndex`, `savePlayQueue`, `savePlayQueueByIndex`, `getBookmarks`, `createBookmark`, `deleteBookmark`, `getInternetRadioStations`, `createInternetRadioStation`, `updateInternetRadioStation`, `deleteInternetRadioStation`, `getAvatar`, `getShares`, `createShare`, `updateShare`, `deleteShare`, `getChatMessages`, `addChatMessage`, `getVideos`, `getVideoInfo`, `getCaptions`, `jukeboxControl`, `getTranscodeDecision`, `getTranscodeStream`, `hls`, `getLyricsBySongId`, `getLyrics`, `getNowPlaying`, `getTopSongs`, `getSongsByGenre`, `getSimilarSongs`, `getSimilarSongs2`, `getMusicDirectory`, `getPodcasts`, `getNewestPodcasts`, `getPodcastEpisode`, `refreshPodcasts`

**Subsonic route module layout (backend):**

- `backend/src/routes/subsonic/index.ts` -- top-level router composition, auth/rate-limit, system endpoints
- `library.ts` -- artists/albums/tracks browsing and directory traversal
- `search.ts` -- `search`/`search2`/`search3`, genre/top/similar discovery
- `playback.ts` -- stream/download/cover-art/scrobble/now-playing plus `hls`/`getTranscodeStream`
- `playlists.ts` -- playlist list/read/create/update/delete
- `queue.ts` -- play queue get/save (ID-based and index-based)
- `starred.ts` -- star/unstar, starred lists, `setRating`
- `artistInfo.ts` / `lyrics.ts` -- artist metadata and lyric endpoints
- `userManagement.ts` / `profile.ts` -- user admin endpoints and `getUser`
- `podcasts.ts` -- podcast subscription and episode endpoints
- `compat.ts` -- compatibility/stub endpoints for clients that expect optional APIs

**When adding a Subsonic endpoint:**

1. Add the handler in the module that matches endpoint ownership (or create a new focused module if needed).
2. Validate required query params and return Subsonic-compatible errors via `subsonicError`.
3. Return response payloads through `subsonicOk` using existing mapper helpers where possible.
4. Register the router in `backend/src/routes/subsonic/index.ts` (preserve catch-all behavior).
5. Update the endpoint support list in this README and run diagnostics on touched Subsonic route files.

---

## Using Kima

### First-Time Setup

When you first access Kima, you'll be guided through a setup wizard:

1. **Create your account** - The first user becomes the administrator
2. **Configure integrations** - Optionally connect Lidarr, Audiobookshelf, and other services
3. **Wait for library scan** - Kima will scan and catalog your music collection

### The Home Screen

After setup, your home screen displays:

-   **Continue Listening** - Pick up where you left off
-   **Recently Added** - New additions to your library
-   **Library Radio Stations** - One-click radio modes (Shuffle All, Workout, Discovery, Favorites, plus genre and decade stations)
-   **Made For You** - Auto-generated mixes based on your library
-   **Recommended For You** - Artist recommendations from Last.fm
-   **Popular Podcasts** - Trending podcasts you might enjoy
-   **Audiobooks** - Quick access to your audiobook library (if Audiobookshelf is connected)

### Searching

Kima offers two search modes:

**Library Search** - Find artists, albums, and tracks in your collection. Results are instant and searchable by name.

**Discovery Search** - Find new music and podcasts you don't own. Powered by Last.fm for music and iTunes for podcasts. From discovery results, you can:

-   Preview tracks via Deezer
-   Request downloads through Lidarr
-   Subscribe to podcasts

<p align="center">
  <img src="assets/screenshots/desktop-artist.png" alt="Artist Page" width="800">
</p>
<p align="center">
  <img src="assets/screenshots/desktop-album.png" alt="Album Page" width="800">
</p>

### Managing Podcasts

1. Use the search bar and select "Podcasts" to find shows
2. Click on a podcast to see its details and recent episodes
3. Click Subscribe to add it to your library
4. Episodes stream directly from the RSS feed - no downloads required

Your listening progress is saved automatically, so you can pause on one device and resume on another.

### Creating Playlists

1. Navigate to your Library and select the Playlists tab
2. Click "New Playlist" and give it a name
3. Add tracks by clicking the menu on any song and selecting "Add to Playlist"
4. Reorder tracks by dragging and dropping
5. Toggle "Public" to share with other users on your instance

### Using the Vibe System

**Exploring the map:**

1. Navigate to **Vibe** in the sidebar
2. Your library loads as a 2D music map -- similar-sounding tracks cluster together
3. Click any point to inspect the track; double-click to play it
4. Switch to **Galaxy** view for a 3D star-field perspective
5. Use the **Search** bar to highlight tracks matching a text description

**Drift -- journey between two tracks:**

1. Click **Drift** in the toolbar
2. Search for and select a start track, then an end track
3. Click **Generate Path** -- Kima queues a smooth sonic journey between them

**Blend -- find the space between multiple tracks:**

1. Click **Blend** in the toolbar
2. Add tracks you want to blend together
3. Kima finds the centroid in audio space and queues tracks from that neighborhood

**Keep The Vibe Going (from the player):**

1. Start playing any track
2. Right-click it on the vibe map and select **Vibe** to queue similar tracks continuously

**Mood Mixer:**

1. On the home screen, click **Mood Mixer** next to the Made For You section
2. Select a mood preset -- Kima instantly generates a playlist from your library calibrated to that mood
3. Each preset count shows how many tracks in your library match that mood

### Importing Playlists

**From Spotify:**

1. Copy a Spotify playlist URL
2. Go to Import (in the sidebar)
3. Paste the URL and click Preview
4. Review the results - you'll see which tracks are in your library, which can be downloaded, and which aren't available
5. Select albums to download and start the import

**From Deezer:**

1. Browse featured playlists directly in the Browse section, or paste a Deezer playlist URL
2. The same preview and import flow applies
3. Explore Deezer's curated playlists and radio stations for discovery

**From YouTube:**

1. Copy a YouTube or YouTube Music playlist URL
2. Paste it in the import field on the Playlists page
3. Kima extracts individual tracks and resolves them via song.link to identify each one
4. The same preview and import flow applies

### Playback Settings

In Settings, you can configure:

-   **Playback Quality** - Choose between Original, High (320kbps), Medium (192kbps), or Low (128kbps)
-   **Cache Size** - Limit how much space transcoded files use

<p align="center">
  <img src="assets/screenshots/desktop-player.png" alt="Now Playing" width="800">
</p>
<p align="center">
  <img src="assets/screenshots/desktop-settings.png" alt="Settings" width="800">
</p>

### Keyboard Shortcuts

When using the web interface, these keyboard shortcuts are available during playback:

| Key         | Action                   |
| ----------- | ------------------------ |
| Space       | Play / Pause             |
| N           | Next track               |
| P           | Previous track           |
| S           | Toggle shuffle           |
| M           | Toggle mute              |
| Arrow Up    | Volume up                |
| Arrow Down  | Volume down              |
| Arrow Right | Seek forward 10 seconds  |
| Arrow Left  | Seek backward 10 seconds |

### Android TV

Kima includes a dedicated interface optimized for television displays:

-   Large artwork and readable text from across the room
-   Full D-pad and remote navigation support
-   Persistent Now Playing bar for quick access to playback controls
-   Simplified navigation focused on browsing and playback

The TV interface is automatically enabled when accessing Kima from an Android TV device. Access it through your TV's web browser.

---

## Administration

### Managing Users

As an administrator, you can:

1. Go to Settings > User Management
2. Create new user accounts
3. Delete existing users (except yourself)
4. Users can be assigned "admin" or "user" roles

### System Settings

Administrators have access to additional settings:

-   **Lidarr/Audiobookshelf/Soulseek** - Configure integrations
-   **Storage Paths** - View configured paths
-   **Cache Management** - Clear caches if needed
-   **Advanced** - Download retry settings, concurrent download limits

### Download Settings

Configure how Kima acquires new music in Settings → Downloads:

-   **Primary Source** - Choose between Soulseek or Lidarr as your main download source
-   **Fallback Behavior** - Optionally fall back to the other source if the primary fails
-   **Stale Job Cleanup** - Clear stuck Discovery batches and downloads that aren't progressing

### Enrichment Settings

Control metadata enrichment in Settings → Cache & Automation:

-   **Enrichment Speed** - Adjust concurrency (1-5x) to balance speed vs. system load
-   **Failure Notifications** - Get notified when enrichment fails for specific items
-   **Retry/Skip Modal** - Choose to retry failed items or skip them to continue processing

### Activity Panel

The Activity Panel provides real-time visibility into downloads and system events:

-   **Notifications** - Alerts for completed downloads, ready playlists, and import completions
-   **Active Downloads** - Monitor download progress in real-time
-   **History** - View completed downloads and past events

Access the Activity Panel by clicking the bell icon in the top bar (desktop) or through the menu (mobile).

### API Keys

For programmatic access to Kima:

1. Go to Settings > API Keys
2. Generate a new key with a descriptive name
3. Use the key in the `Authorization` header: `Bearer YOUR_API_KEY`

API documentation is available at `/api-docs` when the backend is running (requires authentication in production).

### Bull Board Dashboard

Monitor background job queues at `/admin/queues`:

-   View active, waiting, completed, and failed jobs
-   Retry or remove stuck jobs
-   Monitor download progress and enrichment tasks
-   Requires admin authentication

---

## Architecture

Kima consists of several components working together:

```
                                    ┌─────────────────┐
                                    │   Your Browser  │
                                    └────────┬────────┘
                                             │
                                             ▼
┌─────────────────┐              ┌─────────────────────┐
│  Music Library  │◄────────────►│     Frontend        │
│   (Your Files)  │              │   (Next.js :3030)   │
└─────────────────┘              └──────────┬──────────┘
                                            │
                                            ▼
┌─────────────────┐              ┌─────────────────────┐
│    Lidarr       │◄────────────►│      Backend        │
│   (Optional)    │              │  (Express.js :3006) │
└─────────────────┘              └──────────┬──────────┘
                                            │
┌─────────────────┐              ┌──────────┴──────────┐
│ Audiobookshelf  │◄────────────►│                     │
│   (Optional)    │              │  ┌───────────────┐  │
└─────────────────┘              │  │  PostgreSQL   │  │
                                 │  └───────────────┘  │
                                 │  ┌───────────────┐  │
                                 │  │     Redis     │  │
                                 │  └───────────────┘  │
                                 └─────────────────────┘
```

| Component           | Purpose                                    | Default Port |
| ------------------- | ------------------------------------------ | ------------ |
| Frontend            | Web interface (Next.js)                    | 3030         |
| Backend             | API server (Express.js)                    | 3006         |
| PostgreSQL          | Database (with pgvector)                   | 5432         |
| Redis               | Caching and job queues                     | 6379         |
| Audio Analyzer      | Mood, BPM, key detection (Essentia MusiCNN)| --           |
| Audio Analyzer CLAP | Vibe similarity embeddings (LAION CLAP)    | --           |

---

## Roadmap

Kima is under active development. Here's what's planned:

-   **Native Mobile App** - React Native application for iOS and Android
-   **Offline Mode** - Download tracks for offline playback
-   **Windows Executable** - Standalone app for Windows users who prefer not to use Docker

Contributions and suggestions are welcome.

---

## License

Kima is released under the [GNU General Public License v3.0](LICENSE).

You are free to use, modify, and distribute this software under the terms of the GPL-3.0 license.

---

## Acknowledgments

Kima wouldn't be possible without these services and projects:

-   [Last.fm](https://www.last.fm/) - Artist recommendations and music metadata
-   [MusicBrainz](https://musicbrainz.org/) - Comprehensive music database
-   [iTunes Search API](https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/iTuneSearchAPI/) - Podcast discovery
-   [Deezer](https://developers.deezer.com/) - Track previews and playlist browsing
-   [Odesli/song.link](https://odesli.co/) - Cross-platform music link resolution
-   [Fanart.tv](https://fanart.tv/) - Artist images and artwork
-   [Lidarr](https://lidarr.audio/) - Music collection management
-   [Audiobookshelf](https://www.audiobookshelf.org/) - Audiobook and podcast server

---

## Support

If you encounter issues or have questions:

1. Check the [Issues](https://github.com/Chevron7Locked/kima-hub/issues) page for known problems
2. Open a new issue with details about your setup and the problem you're experiencing
3. Include logs from `docker compose logs` if relevant

---

_Built with love for the self-hosted community._


## /assets/screenshots/deezer-browse.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/deezer-browse.png

## /assets/screenshots/desktop-album.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-album.png

## /assets/screenshots/desktop-artist.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-artist.png

## /assets/screenshots/desktop-audiobooks.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-audiobooks.png

## /assets/screenshots/desktop-home.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-home.png

## /assets/screenshots/desktop-library.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-library.png

## /assets/screenshots/desktop-player.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-player.png

## /assets/screenshots/desktop-podcasts.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-podcasts.png

## /assets/screenshots/desktop-settings.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/desktop-settings.png

## /assets/screenshots/mobile-album.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-album.png

## /assets/screenshots/mobile-artist.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-artist.png

## /assets/screenshots/mobile-audiobooks.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-audiobooks.png

## /assets/screenshots/mobile-home.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-home.png

## /assets/screenshots/mobile-library.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-library.png

## /assets/screenshots/mobile-login.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-login.png

## /assets/screenshots/mobile-player.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-player.png

## /assets/screenshots/mobile-podcasts.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mobile-podcasts.png

## /assets/screenshots/mood-mixer.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/mood-mixer.png

## /assets/screenshots/reddit post/desktop-home.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/reddit post/desktop-home.png

## /assets/screenshots/reddit post/desktop-podcasts.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/reddit post/desktop-podcasts.png

## /assets/screenshots/reddit post/mobile-home.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/reddit post/mobile-home.png

## /assets/screenshots/reddit post/mobile-player.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/reddit post/mobile-player.png

## /assets/screenshots/reddit post/vibe-overlay.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/reddit post/vibe-overlay.png

## /assets/screenshots/spotify-import-preview.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/spotify-import-preview.png

## /assets/screenshots/vibe-blend.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/vibe-blend.png

## /assets/screenshots/vibe-drift.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/vibe-drift.png

## /assets/screenshots/vibe-galaxy.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/vibe-galaxy.png

## /assets/screenshots/vibe-map.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/vibe-map.png

## /assets/screenshots/vibe-overlay.png

Binary file available at https://raw.githubusercontent.com/Chevron7Locked/lidify/refs/heads/main/assets/screenshots/vibe-overlay.png

## /backend/.dockerignore

```dockerignore path="/backend/.dockerignore" 
# Dependencies
node_modules
npm-debug.log
yarn-error.log

# Build output
dist
build
*.tsbuildinfo

# Environment files
.env
.env.local
.env.*.local

# Testing
coverage
*.test.ts
**/__tests__

# Development
.vscode
.idea
*.swp
*.swo
*~

# Cache and logs
cache
logs
*.log

# Git
.git
.gitignore

# Documentation
*.md
docs

# Misc
.DS_Store
Thumbs.db

```

## /backend/.gitignore

```gitignore path="/backend/.gitignore" 
node_modules/
dist/
.env
.env.*
.DS_Store
logs/
*.log

# Runtime caches (safe to delete; regenerated)
cache/

# VPN configs for local testing (do not commit)
mullvad/

# Stray media artifacts (should never be committed)
*.mp3
*.flac
*.wav
*.m4a
*.ogg
*.opus

```

## /backend/Dockerfile

``` path="/backend/Dockerfile" 
# Stage 1: Dependencies (all deps for tsx runtime)
FROM node:20-slim AS deps

WORKDIR /app

# Copy package files
COPY package*.json ./
COPY prisma ./prisma/

# Install ALL dependencies (tsx needs dev dependencies)
RUN npm ci && \
    npm cache clean --force

# Generate Prisma Client
RUN npx prisma generate

# Stage 2: Production runtime (Hardened)
FROM node:20-slim

WORKDIR /app

# Install runtime dependencies first
# ffmpeg is required for audio transcoding
# openssl is required for Prisma
RUN apt-get update && apt-get install -y --no-install-recommends \
    ffmpeg \
    tini \
    openssl \
    && rm -rf /var/lib/apt/lists/*

# Copy all node_modules (including tsx)
COPY --from=deps /app/node_modules ./node_modules
COPY --from=deps /app/package*.json ./
COPY --from=deps /app/prisma ./prisma

# Copy source code (will run with tsx, not compiled)
COPY src ./src

# Copy healthcheck script, migration script, and shell entrypoint
COPY healthcheck.js ./
COPY migrate-safe.sh ./
COPY docker-entrypoint.sh /usr/local/bin/

# Create directories, fix line endings, set permissions, then remove dangerous tools
# NOTE: We keep /bin/sh because npm/npx require it to spawn processes
RUN mkdir -p /app/cache/covers /app/cache/transcodes /app/logs && \
    sed -i 's/\r$//' /usr/local/bin/docker-entrypoint.sh && \
    sed -i 's/\r$//' /app/migrate-safe.sh && \
    chmod +x /usr/local/bin/docker-entrypoint.sh && \
    chmod +x /app/migrate-safe.sh && \
    chown -R node:node /app && \
    # Remove download/network utilities (prevents downloading malware)
    rm -f /usr/bin/wget /usr/bin/curl /bin/wget /bin/curl 2>/dev/null || true && \
    rm -f /usr/bin/nc /bin/nc /usr/bin/ncat /usr/bin/netcat 2>/dev/null || true && \
    rm -f /usr/bin/ftp /usr/bin/tftp /usr/bin/telnet 2>/dev/null || true

# Use non-root user
USER node

EXPOSE 3006

# Health check using Node.js (no wget needed)
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
    CMD ["node", "healthcheck.js"]

# Use tini for proper signal handling
ENTRYPOINT ["/usr/bin/tini", "--", "docker-entrypoint.sh"]
CMD ["npx", "tsx", "src/index.ts"]

```

## /backend/docker-entrypoint.sh

```sh path="/backend/docker-entrypoint.sh" 
#!/bin/sh
set -e

# Security check: Refuse to run as root
if [ "$(id -u)" = "0" ]; then
  echo ""
  echo "╔══════════════════════════════════════════════════════════════╗"
  echo "║  FATAL: CANNOT START AS ROOT                                 ║"
  echo "║                                                              ║"
  echo "║  Running as root is a security risk. This container must    ║"
  echo "║  run as a non-privileged user.                              ║"
  echo "║                                                              ║"
  echo "║  Do NOT use:                                                 ║"
  echo "║    - docker run --user root                                  ║"
  echo "║    - user: root in docker-compose.yml                        ║"
  echo "║                                                              ║"
  echo "║  The container is configured to run as 'node' user.         ║"
  echo "╚══════════════════════════════════════════════════════════════╝"
  echo ""
  exit 1
fi

echo "[START] Starting Kima Backend..."

# Docker Compose health checks ensure database and Redis are ready
# Add a small delay to be extra safe
echo "[WAIT] Waiting for services to be ready..."
sleep 3
echo "Services are ready"

# Run database migrations (with automatic baselining for existing databases)
echo "[DB] Running database migrations..."
sh ./migrate-safe.sh

# Generate Prisma client (in case of schema changes)
echo "[DB] Generating Prisma client..."
npx prisma generate

# Clear Redis cache on deployment to prevent stale data (e.g., 404 images)
echo "[REDIS] Clearing cache for fresh deployment..."
node -e "
const { createClient } = require('redis');
const client = createClient({ url: process.env.REDIS_URL || 'redis://redis:6379' });
client.connect()
  .then(() => client.flushAll())
  .then(() => { console.log('[REDIS] Cache cleared successfully'); return client.quit(); })
  .catch(err => { console.warn('[REDIS] Cache clear failed (non-critical):', err.message); });
" || echo "[REDIS] Cache clear skipped (Redis unavailable)"

# Generate session secret if not provided
if [ -z "$SESSION_SECRET" ] || [ "$SESSION_SECRET" = "changeme-generate-secure-key" ]; then
  echo "[WARN] SESSION_SECRET not set or using default. Generating random key..."
  export SESSION_SECRET=$(node -e "console.log(require('crypto').randomBytes(32).toString('base64'))")
  echo "Generated SESSION_SECRET (will not persist across restarts - set it in .env for production)"
fi

# Ensure encryption key is stable between restarts
if [ -z "$SETTINGS_ENCRYPTION_KEY" ]; then
  echo "[WARN] SETTINGS_ENCRYPTION_KEY not set."
  echo "   Falling back to the default development key so encrypted data remains readable."
  echo "   Set SETTINGS_ENCRYPTION_KEY in your environment to a 32-character value for production."
  export SETTINGS_ENCRYPTION_KEY="default-encryption-key-change-me"
fi

echo "[START] Kima Backend starting on port ${PORT:-3006}..."
echo "[CONFIG] Music path: ${MUSIC_PATH:-/music}"
echo "[CONFIG] Environment: ${NODE_ENV:-production}"

# Execute the main command
exec "$@"

```

## /backend/healthcheck.js

```js path="/backend/healthcheck.js" 
// Minimal health check script - no external dependencies
const http = require('http');

const options = {
  hostname: 'localhost',
  port: 3006,
  path: '/health',
  method: 'GET',
  timeout: 5000,
};

const req = http.request(options, (res) => {
  process.exit(res.statusCode >= 200 && res.statusCode < 400 ? 0 : 1);
});

req.on('error', () => process.exit(1));
req.on('timeout', () => {
  req.destroy();
  process.exit(1);
});

req.end();



```

## /backend/jest.config.js

```js path="/backend/jest.config.js" 
/** @type {import('jest').Config} */
module.exports = {
    preset: 'ts-jest',
    testEnvironment: 'node',
    setupFiles: ['<rootDir>/src/__mocks__/test-env.cjs'],
    roots: ['<rootDir>/src'],
    testMatch: ['**/__tests__/**/*.test.ts'],
    moduleFileExtensions: ['ts', 'js', 'json'],
    clearMocks: true,
    collectCoverageFrom: ['src/**/*.ts', '!src/**/*.d.ts'],
    transformIgnorePatterns: [
        'node_modules/(?!(p-queue|eventemitter3)/)',
    ],
    moduleNameMapper: {
        // p-queue is pure ESM and cannot be required() by Jest's CJS runner.
        // Map it to a minimal CJS mock that executes functions immediately.
        '^p-queue$': '<rootDir>/src/__mocks__/p-queue.cjs',
    },
};

```

## /backend/migrate-safe.sh

```sh path="/backend/migrate-safe.sh" 
#!/bin/sh
set -e

echo "[MIGRATE] Starting safe migration process..."

# Check migration status
MIGRATE_STATUS=$(npx prisma migrate status 2>&1 || true)

# Count pending migrations
PENDING_COUNT=$(echo "$MIGRATE_STATUS" | grep -c "Following migrations have not yet been applied:" || echo "0")

# If there are pending migrations, try to deploy
if [ "$PENDING_COUNT" != "0" ]; then
  echo "[MIGRATE] Found pending migrations, attempting to apply..."

  # Try deploy - this might fail with P3005 if database exists but isn't tracked
  DEPLOY_RESULT=$(npx prisma migrate deploy 2>&1 || echo "DEPLOY_FAILED")

  # If we got P3005 error, we need to baseline
  if echo "$DEPLOY_RESULT" | grep -q "P3005"; then
    echo ""
    echo "[MIGRATE] ⚠ Database exists but migration history is missing (P3005 error)"
    echo "[MIGRATE] This is normal for existing databases that weren't tracked with Prisma"
    echo "[MIGRATE] Baselining all migrations..."
    echo ""

    # Baseline ALL migrations
    BASELINE_COUNT=0
    for migration_dir in prisma/migrations/*/; do
      if [ -d "$migration_dir" ]; then
        migration_name=$(basename "$migration_dir")

        # Skip if not a real migration directory
        if [ "$migration_name" = "*" ] || [ "$migration_name" = "migration_lock.toml" ]; then
          continue
        fi

        echo "  ✓ Marking as applied: $migration_name"
        npx prisma migrate resolve --applied "$migration_name" >/dev/null 2>&1 || true
        BASELINE_COUNT=$((BASELINE_COUNT + 1))
      fi
    done

    echo ""
    echo "[MIGRATE] ✓ Baselined $BASELINE_COUNT migrations"
    echo ""

    # Now try deploy again to catch any truly new migrations
    echo "[MIGRATE] Checking for any new migrations added after baselining..."
    DEPLOY_RESULT=$(npx prisma migrate deploy 2>&1 || true)
  fi

  # Check final result
  if echo "$DEPLOY_RESULT" | grep -q "applied"; then
    APPLIED=$(echo "$DEPLOY_RESULT" | grep "applied" | head -1)
    echo "[MIGRATE] ✓ $APPLIED"
  elif echo "$DEPLOY_RESULT" | grep -q "up to date\|No pending migrations"; then
    echo "[MIGRATE] ✓ Database is up to date"
  elif echo "$DEPLOY_RESULT" | grep -q "DEPLOY_FAILED"; then
    echo "[MIGRATE] ⚠ Migration deploy encountered issues, but continuing..."
    echo "$DEPLOY_RESULT" | grep -v "DEPLOY_FAILED" | head -20
  else
    echo "[MIGRATE] Migration complete"
  fi
else
  echo "[MIGRATE] ✓ No pending migrations, database is up to date"
fi

echo ""
echo "[MIGRATE] ✓ Safe migration process complete"

```

## /backend/migrations/audio_analysis_cleanup_fix.sql

```sql path="/backend/migrations/audio_analysis_cleanup_fix.sql" 
-- Fix tracks with existing embeddings that were incorrectly marked as failed/pending/processing
UPDATE "Track"
SET
    "analysisStatus" = 'completed',
    "analysisError" = NULL,
    "analysisStartedAt" = NULL
WHERE id IN (
    SELECT te.track_id
    FROM track_embeddings te
    WHERE te.track_id = "Track".id
)
AND "analysisStatus" IN ('failed', 'pending', 'processing');

-- Clean up stale EnrichmentFailure records for tracks that are now completed
DELETE FROM "EnrichmentFailure"
WHERE "entityType" = 'audio'
  AND EXISTS (
      SELECT 1 FROM "Track" t
      WHERE t.id = "EnrichmentFailure"."entityId"
        AND t."analysisStatus" = 'completed'
  );

-- Verify results
SELECT 
    'Before Fix Status' as info,
    "analysisStatus",
    COUNT(*) as count
FROM "Track"
GROUP BY "analysisStatus";

SELECT 
    'Remaining EnrichmentFailures (audio)' as info,
    COUNT(*) as count
FROM "EnrichmentFailure"
WHERE "entityType" = 'audio';

```

## /backend/package.json

```json path="/backend/package.json" 
{
    "name": "kima-backend",
    "version": "1.7.11",
    "description": "Kima backend API server",
    "license": "GPL-3.0",
    "repository": {
        "type": "git",
        "url": "https://github.com/Chevron7Locked/kima-hub.git"
    },
    "scripts": {
        "dev": "tsx watch src/index.ts",
        "prebuild": "rm -rf dist",
        "build": "tsc",
        "start": "node dist/index.js",
        "db:migrate": "prisma migrate deploy",
        "db:studio": "prisma studio",
        "seed:user": "tsx seeds/createUser.ts",
        "typecheck": "tsc --noEmit",
        "test": "jest",
        "test:smoke": "tsx scripts/smoke.ts",
        "sync": "tsx src/workers/sync.ts",
        "create:testuser": "tsx scripts/create-test-user.ts",
        "generate:vocabulary": "tsx scripts/generateVibeVocabulary.ts"
    },
    "dependencies": {
        "@bull-board/api": "^6.20.3",
        "@bull-board/express": "^6.20.3",
        "@ffmpeg-installer/ffmpeg": "^1.1.0",
        "@prisma/client": "^5.22.0",
        "axios": "^1.13.6",
        "bcrypt": "^6.0.0",
        "bullmq": "^5.70.1",
        "connect-redis": "^7.1.0",
        "cors": "^2.8.5",
        "date-fns": "^4.1.0",
        "dotenv": "^16.3.1",
        "express": "^4.18.2",
        "express-rate-limit": "^8.2.1",
        "express-session": "^1.17.3",
        "fast-xml-parser": "^5.4.1",
        "fluent-ffmpeg": "^2.1.3",
        "fuzzball": "^2.2.3",
        "helmet": "^7.1.0",
        "ioredis": "^5.10.0",
        "jsonwebtoken": "^9.0.2",
        "multer": "^2.1.0",
        "music-metadata": "^11.10.0",
        "node-cron": "^4.2.1",
        "p-limit": "^7.2.0",
        "p-queue": "^9.0.0",
        "podcast-index-api": "^1.1.10",
        "prom-client": "^15.1.3",
        "qrcode": "^1.5.4",
        "rss-parser": "^3.13.0",
        "sharp": "^0.34.5",
        "speakeasy": "^2.0.0",
        "typed-emitter": "^2.1.0",
        "umap-js": "^1.4.0",
        "zod": "^3.22.4"
    },
    "devDependencies": {
        "@types/fluent-ffmpeg": "^2.1.28",
        "@types/node-cron": "^3.0.11",
        "@types/qrcode": "^1.5.6",
        "@types/speakeasy": "^2.0.10",
        "@types/bcrypt": "^5.0.2",
        "@types/cors": "^2.8.19",
        "@types/express": "^4.17.21",
        "@types/express-session": "^1.17.10",
        "@types/jest": "^30.0.0",
        "@types/jsonwebtoken": "^9.0.10",
        "@types/multer": "^2.0.0",
        "@types/node": "^20.10.4",
        "@types/supertest": "^7.2.0",
        "jest": "^30.2.0",
        "prisma": "^5.22.0",
        "supertest": "^7.2.2",
        "ts-jest": "^29.4.6",
        "tsx": "^4.7.0",
        "typescript": "^5.3.3"
    }
}

```

## /backend/prisma/migrations/20250101000000_rename_soulseek_fallback/migration.sql

```sql path="/backend/prisma/migrations/20250101000000_rename_soulseek_fallback/migration.sql" 
-- Rename soulseekFallback to primaryFailureFallback (idempotent)
DO $$
BEGIN
    IF EXISTS (
        SELECT 1 FROM information_schema.columns
        WHERE table_name = 'SystemSettings' AND column_name = 'soulseekFallback'
    ) THEN
        ALTER TABLE "SystemSettings" RENAME COLUMN "soulseekFallback" TO "primaryFailureFallback";
    END IF;
END $$;

```

## /backend/prisma/migrations/20250102000000_add_user_token_version/migration.sql

```sql path="/backend/prisma/migrations/20250102000000_add_user_token_version/migration.sql" 
-- Add tokenVersion to User table (idempotent)
DO $$
BEGIN
    IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'User')
    AND NOT EXISTS (
        SELECT 1 FROM information_schema.columns
        WHERE table_name = 'User' AND column_name = 'tokenVersion'
    ) THEN
        ALTER TABLE "User" ADD COLUMN "tokenVersion" INTEGER NOT NULL DEFAULT 0;
    END IF;
END $$;

```

## /backend/prisma/migrations/20250102000001_add_downloadjob_targetmbid_index/migration.sql

```sql path="/backend/prisma/migrations/20250102000001_add_downloadjob_targetmbid_index/migration.sql" 
-- Create targetMbid index on DownloadJob (idempotent)
DO $$
BEGIN
    IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'DownloadJob')
    AND NOT EXISTS (
        SELECT 1 FROM pg_indexes
        WHERE tablename = 'DownloadJob' AND indexname = 'DownloadJob_targetMbid_idx'
    ) THEN
        CREATE INDEX "DownloadJob_targetMbid_idx" ON "DownloadJob"("targetMbid");
    END IF;
END $$;

```

## /backend/prisma/migrations/20251225000000_add_missing_track_updated_at/migration.sql

```sql path="/backend/prisma/migrations/20251225000000_add_missing_track_updated_at/migration.sql" 
-- Add updatedAt column to Track if it doesn't exist
-- This handles databases that were created before this column was added to the schema

DO $$
BEGIN
    IF NOT EXISTS (
        SELECT 1 FROM information_schema.columns
        WHERE table_name = 'Track' AND column_name = 'updatedAt'
    ) THEN
        ALTER TABLE "Track" ADD COLUMN "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP;
    END IF;
END $$;

```

## /backend/prisma/migrations/20251225100000_add_similar_artists_json/migration.sql

```sql path="/backend/prisma/migrations/20251225100000_add_similar_artists_json/migration.sql" 
-- AddColumn: Artist.similarArtistsJson
-- Stores full Last.fm similar artists data as JSON instead of FK-based SimilarArtist table
ALTER TABLE "Artist" ADD COLUMN "similarArtistsJson" JSONB;

```

## /backend/prisma/migrations/20251226000000_add_mood_bucket_system/migration.sql

```sql path="/backend/prisma/migrations/20251226000000_add_mood_bucket_system/migration.sql" 
-- Add MoodBucket table for pre-computed mood assignments
CREATE TABLE "MoodBucket" (
    "id" TEXT NOT NULL,
    "trackId" TEXT NOT NULL,
    "mood" TEXT NOT NULL,
    "score" DOUBLE PRECISION NOT NULL,
    "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
    "updatedAt" TIMESTAMP(3) NOT NULL,

    CONSTRAINT "MoodBucket_pkey" PRIMARY KEY ("id")
);

-- Add UserMoodMix table for storing user's active mood mix
CREATE TABLE "UserMoodMix" (
    "id" TEXT NOT NULL,
    "userId" TEXT NOT NULL,
    "mood" TEXT NOT NULL,
    "trackIds" TEXT[],
    "coverUrls" TEXT[],
    "generatedAt" TIMESTAMP(3) NOT NULL,
    "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
    "updatedAt" TIMESTAMP(3) NOT NULL,

    CONSTRAINT "UserMoodMix_pkey" PRIMARY KEY ("id")
);

-- Unique constraint: one entry per track+mood combination
CREATE UNIQUE INDEX "MoodBucket_trackId_mood_key" ON "MoodBucket"("trackId", "mood");

-- Index for fast mood lookups sorted by score
CREATE INDEX "MoodBucket_mood_score_idx" ON "MoodBucket"("mood", "score" DESC);

-- Index for track lookups
CREATE INDEX "MoodBucket_trackId_idx" ON "MoodBucket"("trackId");

-- Unique constraint: one mood mix per user
CREATE UNIQUE INDEX "UserMoodMix_userId_key" ON "UserMoodMix"("userId");

-- Index for user lookups
CREATE INDEX "UserMoodMix_userId_idx" ON "UserMoodMix"("userId");

-- Foreign key constraints
ALTER TABLE "MoodBucket" ADD CONSTRAINT "MoodBucket_trackId_fkey" FOREIGN KEY ("trackId") REFERENCES "Track"("id") ON DELETE CASCADE ON UPDATE CASCADE;

ALTER TABLE "UserMoodMix" ADD CONSTRAINT "UserMoodMix_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;

-- Add indexes to Track table for mood-related columns (for query optimization)
CREATE INDEX IF NOT EXISTS "Track_analysisMode_idx" ON "Track"("analysisMode");
CREATE INDEX IF NOT EXISTS "Track_moodHappy_idx" ON "Track"("moodHappy");
CREATE INDEX IF NOT EXISTS "Track_moodSad_idx" ON "Track"("moodSad");
CREATE INDEX IF NOT EXISTS "Track_moodRelaxed_idx" ON "Track"("moodRelaxed");
CREATE INDEX IF NOT EXISTS "Track_moodAggressive_idx" ON "Track"("moodAggressive");
CREATE INDEX IF NOT EXISTS "Track_moodParty_idx" ON "Track"("moodParty");
CREATE INDEX IF NOT EXISTS "Track_moodAcoustic_idx" ON "Track"("moodAcoustic");
CREATE INDEX IF NOT EXISTS "Track_moodElectronic_idx" ON "Track"("moodElectronic");
CREATE INDEX IF NOT EXISTS "Track_arousal_idx" ON "Track"("arousal");
CREATE INDEX IF NOT EXISTS "Track_acousticness_idx" ON "Track"("acousticness");
CREATE INDEX IF NOT EXISTS "Track_instrumentalness_idx" ON "Track"("instrumentalness");

```

## /backend/prisma/migrations/20251229004706_add_enrichment_concurrency/migration.sql

```sql path="/backend/prisma/migrations/20251229004706_add_enrichment_concurrency/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN     "enrichmentConcurrency" INTEGER NOT NULL DEFAULT 1;

```

## /backend/prisma/migrations/20251229043907_add_metadata_overrides/migration.sql

```sql path="/backend/prisma/migrations/20251229043907_add_metadata_overrides/migration.sql" 
-- AlterTable
ALTER TABLE "Album" ADD COLUMN     "displayTitle" TEXT,
ADD COLUMN     "displayYear" INTEGER,
ADD COLUMN     "hasUserOverrides" BOOLEAN NOT NULL DEFAULT false,
ADD COLUMN     "userCoverUrl" TEXT,
ADD COLUMN     "userGenres" JSONB;

-- AlterTable
ALTER TABLE "Artist" ADD COLUMN     "displayName" TEXT,
ADD COLUMN     "hasUserOverrides" BOOLEAN NOT NULL DEFAULT false,
ADD COLUMN     "userGenres" JSONB,
ADD COLUMN     "userHeroUrl" TEXT,
ADD COLUMN     "userSummary" TEXT;

-- AlterTable
ALTER TABLE "Track" ADD COLUMN     "displayTitle" TEXT,
ADD COLUMN     "displayTrackNo" INTEGER,
ADD COLUMN     "hasUserOverrides" BOOLEAN NOT NULL DEFAULT false;

-- CreateIndex
CREATE INDEX "Album_hasUserOverrides_idx" ON "Album"("hasUserOverrides");

-- CreateIndex
CREATE INDEX "Artist_hasUserOverrides_idx" ON "Artist"("hasUserOverrides");

-- CreateIndex
CREATE INDEX "Track_hasUserOverrides_idx" ON "Track"("hasUserOverrides");

```

## /backend/prisma/migrations/20251230000000_add_podcast_audiobook_search_vectors/migration.sql

```sql path="/backend/prisma/migrations/20251230000000_add_podcast_audiobook_search_vectors/migration.sql" 
-- Migration: Add search vector triggers for podcasts and audiobooks
-- This migration creates PostgreSQL functions and triggers to automatically
-- populate and maintain search vectors for podcast and audiobook content

-- ============================================================================
-- PODCAST SEARCH VECTOR FUNCTION
-- ============================================================================
-- Function to generate Podcast search vector from title, author, and description
CREATE OR REPLACE FUNCTION podcast_search_vector_trigger() RETURNS trigger AS $$
BEGIN
  -- Combine title, author, and description into search vector
  -- Using setweight: title (A), author (B), description (C)
  NEW."searchVector" := 
    setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') ||
    setweight(to_tsvector('english', COALESCE(NEW.author, '')), 'B') ||
    setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'C');
  
  RETURN NEW;
END
$$ LANGUAGE plpgsql;

-- Create trigger to auto-update Podcast search vector
DROP TRIGGER IF EXISTS podcast_search_vector_update ON "Podcast";
CREATE TRIGGER podcast_search_vector_update
  BEFORE INSERT OR UPDATE OF title, author, description
  ON "Podcast"
  FOR EACH ROW
  EXECUTE FUNCTION podcast_search_vector_trigger();

-- ============================================================================
-- PODCAST EPISODE SEARCH VECTOR FUNCTION
-- ============================================================================
-- Function to generate PodcastEpisode search vector from title and description
CREATE OR REPLACE FUNCTION podcast_episode_search_vector_trigger() RETURNS trigger AS $$
BEGIN
  -- Combine title and description into search vector
  -- Using setweight: title (A), description (B)
  NEW."searchVector" := 
    setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') ||
    setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'B');
  
  RETURN NEW;
END
$$ LANGUAGE plpgsql;

-- Create trigger to auto-update PodcastEpisode search vector
DROP TRIGGER IF EXISTS podcast_episode_search_vector_update ON "PodcastEpisode";
CREATE TRIGGER podcast_episode_search_vector_update
  BEFORE INSERT OR UPDATE OF title, description
  ON "PodcastEpisode"
  FOR EACH ROW
  EXECUTE FUNCTION podcast_episode_search_vector_trigger();

-- ============================================================================
-- AUDIOBOOK SEARCH VECTOR FUNCTION
-- ============================================================================
-- Function to generate Audiobook search vector from title, author, narrator, series, and description
CREATE OR REPLACE FUNCTION audiobook_search_vector_trigger() RETURNS trigger AS $$
BEGIN
  -- Combine title, author/narrator/series, and description into search vector
  -- Using setweight: title (A), author/narrator/series (B), description (C)
  NEW."searchVector" := 
    setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') ||
    setweight(to_tsvector('english', COALESCE(NEW.author, '')), 'B') ||
    setweight(to_tsvector('english', COALESCE(NEW.narrator, '')), 'B') ||
    setweight(to_tsvector('english', COALESCE(NEW.series, '')), 'B') ||
    setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'C');
  
  RETURN NEW;
END
$$ LANGUAGE plpgsql;

-- Create trigger to auto-update Audiobook search vector
DROP TRIGGER IF EXISTS audiobook_search_vector_update ON "Audiobook";
CREATE TRIGGER audiobook_search_vector_update
  BEFORE INSERT OR UPDATE OF title, author, narrator, series, description
  ON "Audiobook"
  FOR EACH ROW
  EXECUTE FUNCTION audiobook_search_vector_trigger();

-- ============================================================================
-- ADD SEARCH VECTOR COLUMNS
-- ============================================================================
-- Add searchVector column to Podcast table
ALTER TABLE "Podcast" ADD COLUMN IF NOT EXISTS "searchVector" tsvector;

-- Add searchVector column to PodcastEpisode table
ALTER TABLE "PodcastEpisode" ADD COLUMN IF NOT EXISTS "searchVector" tsvector;

-- Add searchVector column to Audiobook table
ALTER TABLE "Audiobook" ADD COLUMN IF NOT EXISTS "searchVector" tsvector;

-- ============================================================================
-- CREATE GIN INDEXES
-- ============================================================================
-- Create GIN index on Podcast search vector
CREATE INDEX IF NOT EXISTS "Podcast_searchVector_idx" ON "Podcast" USING GIN ("searchVector");

-- Create GIN index on PodcastEpisode search vector
CREATE INDEX IF NOT EXISTS "PodcastEpisode_searchVector_idx" ON "PodcastEpisode" USING GIN ("searchVector");

-- Create GIN index on Audiobook search vector
CREATE INDEX IF NOT EXISTS "Audiobook_searchVector_idx" ON "Audiobook" USING GIN ("searchVector");

-- ============================================================================
-- POPULATE EXISTING RECORDS
-- ============================================================================
-- Update all existing Podcasts to populate their search vectors
UPDATE "Podcast"
SET "searchVector" = 
  setweight(to_tsvector('english', COALESCE(title, '')), 'A') ||
  setweight(to_tsvector('english', COALESCE(author, '')), 'B') ||
  setweight(to_tsvector('english', COALESCE(description, '')), 'C');

-- Update all existing PodcastEpisodes to populate their search vectors
UPDATE "PodcastEpisode"
SET "searchVector" = 
  setweight(to_tsvector('english', COALESCE(title, '')), 'A') ||
  setweight(to_tsvector('english', COALESCE(description, '')), 'B');

-- Update all existing Audiobooks to populate their search vectors
UPDATE "Audiobook"
SET "searchVector" = 
  setweight(to_tsvector('english', COALESCE(title, '')), 'A') ||
  setweight(to_tsvector('english', COALESCE(author, '')), 'B') ||
  setweight(to_tsvector('english', COALESCE(narrator, '')), 'B') ||
  setweight(to_tsvector('english', COALESCE(series, '')), 'B') ||
  setweight(to_tsvector('english', COALESCE(description, '')), 'C');

```

## /backend/prisma/migrations/20251230234224_add_enrichment_and_overrides/migration.sql

```sql path="/backend/prisma/migrations/20251230234224_add_enrichment_and_overrides/migration.sql" 
-- CreateTable
CREATE TABLE "EnrichmentFailure" (
    "id" TEXT NOT NULL,
    "entityType" TEXT NOT NULL,
    "entityId" TEXT NOT NULL,
    "entityName" TEXT,
    "errorMessage" TEXT,
    "errorCode" TEXT,
    "retryCount" INTEGER NOT NULL DEFAULT 0,
    "maxRetries" INTEGER NOT NULL DEFAULT 3,
    "firstFailedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
    "lastFailedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
    "skipped" BOOLEAN NOT NULL DEFAULT false,
    "skippedAt" TIMESTAMP(3),
    "resolved" BOOLEAN NOT NULL DEFAULT false,
    "resolvedAt" TIMESTAMP(3),
    "metadata" JSONB,

    CONSTRAINT "EnrichmentFailure_pkey" PRIMARY KEY ("id")
);

-- CreateIndex
CREATE INDEX "EnrichmentFailure_entityType_resolved_idx" ON "EnrichmentFailure"("entityType", "resolved");

-- CreateIndex
CREATE INDEX "EnrichmentFailure_skipped_idx" ON "EnrichmentFailure"("skipped");

-- CreateIndex
CREATE INDEX "EnrichmentFailure_lastFailedAt_idx" ON "EnrichmentFailure"("lastFailedAt");

-- CreateIndex
CREATE UNIQUE INDEX "EnrichmentFailure_entityType_entityId_key" ON "EnrichmentFailure"("entityType", "entityId");

```

## /backend/prisma/migrations/20251231041041_add_original_year_to_album/migration.sql

```sql path="/backend/prisma/migrations/20251231041041_add_original_year_to_album/migration.sql" 
-- AlterTable
ALTER TABLE "Album" ADD COLUMN     "originalYear" INTEGER;

```

## /backend/prisma/migrations/20260101152925_add_lidarr_webhook_secret/migration.sql

```sql path="/backend/prisma/migrations/20260101152925_add_lidarr_webhook_secret/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN "lidarrWebhookSecret" TEXT;

```

## /backend/prisma/migrations/20260102142537_add_analysis_started_at/migration.sql

```sql path="/backend/prisma/migrations/20260102142537_add_analysis_started_at/migration.sql" 
-- AlterTable
ALTER TABLE "Track" ADD COLUMN "analysisStartedAt" TIMESTAMP(3);

```

## /backend/prisma/migrations/20260102150000_add_audio_analyzer_workers/migration.sql

```sql path="/backend/prisma/migrations/20260102150000_add_audio_analyzer_workers/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN     "audioAnalyzerWorkers" INTEGER NOT NULL DEFAULT 2;

```

## /backend/prisma/migrations/20260103045951_add_lastfm_api_key/migration.sql

```sql path="/backend/prisma/migrations/20260103045951_add_lastfm_api_key/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN     "lastfmApiKey" TEXT;

```

## /backend/prisma/migrations/20260104000000_add_soulseek_concurrent_downloads/migration.sql

```sql path="/backend/prisma/migrations/20260104000000_add_soulseek_concurrent_downloads/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN     "soulseekConcurrentDownloads" INTEGER NOT NULL DEFAULT 4;

```

## /backend/prisma/migrations/20260107000000_add_download_source_columns/migration.sql

```sql path="/backend/prisma/migrations/20260107000000_add_download_source_columns/migration.sql" 
-- Add downloadSource column if it doesn't exist (idempotent)
DO $$
BEGIN
    IF NOT EXISTS (
        SELECT 1 FROM information_schema.columns
        WHERE table_name = 'SystemSettings' AND column_name = 'downloadSource'
    ) THEN
        ALTER TABLE "SystemSettings" ADD COLUMN "downloadSource" TEXT NOT NULL DEFAULT 'soulseek';
    END IF;
END $$;

-- Add primaryFailureFallback column if it doesn't exist (idempotent)
DO $$
BEGIN
    IF NOT EXISTS (
        SELECT 1 FROM information_schema.columns
        WHERE table_name = 'SystemSettings' AND column_name = 'primaryFailureFallback'
    ) THEN
        ALTER TABLE "SystemSettings" ADD COLUMN "primaryFailureFallback" TEXT NOT NULL DEFAULT 'none';
    END IF;
END $$;

```

## /backend/prisma/migrations/20260118000000_add_partial_unique_index_active_downloads/migration.sql

```sql path="/backend/prisma/migrations/20260118000000_add_partial_unique_index_active_downloads/migration.sql" 
-- CreateIndex
-- Partial unique index to prevent duplicate active download jobs for the same album
-- This only applies to 'pending' and 'processing' statuses, allowing multiple
-- completed/failed/exhausted downloads for the same MBID
-- Note: Prisma doesn't natively support partial indexes, so this is a raw SQL migration

CREATE UNIQUE INDEX IF NOT EXISTS "DownloadJob_targetMbid_active_unique"
ON "DownloadJob" ("targetMbid")
WHERE status IN ('pending', 'processing');

-- Add a comment explaining the index purpose
COMMENT ON INDEX "DownloadJob_targetMbid_active_unique" IS
    'Prevents duplicate download jobs for the same album when status is pending or processing. Allows multiple completed/failed downloads for retry scenarios.';

```

## /backend/prisma/migrations/20260123181610_add_artist_counts_and_indexes/migration.sql

```sql path="/backend/prisma/migrations/20260123181610_add_artist_counts_and_indexes/migration.sql" 
-- AlterTable
ALTER TABLE "Artist" ADD COLUMN     "countsLastUpdated" TIMESTAMP(3),
ADD COLUMN     "discoveryAlbumCount" INTEGER NOT NULL DEFAULT 0,
ADD COLUMN     "libraryAlbumCount" INTEGER NOT NULL DEFAULT 0,
ADD COLUMN     "totalTrackCount" INTEGER NOT NULL DEFAULT 0;

-- CreateIndex
CREATE INDEX "Album_artistId_location_idx" ON "Album"("artistId", "location");

-- CreateIndex
CREATE INDEX "Artist_libraryAlbumCount_idx" ON "Artist"("libraryAlbumCount");

-- CreateIndex
CREATE INDEX "Artist_discoveryAlbumCount_idx" ON "Artist"("discoveryAlbumCount");

-- CreateIndex
CREATE INDEX "Artist_totalTrackCount_idx" ON "Artist"("totalTrackCount");

```

## /backend/prisma/migrations/20260127000000_add_pgvector/migration.sql

```sql path="/backend/prisma/migrations/20260127000000_add_pgvector/migration.sql" 
-- Enable pgvector extension for vector similarity search
CREATE EXTENSION IF NOT EXISTS vector;

-- Create track_embeddings table for storing CLAP embeddings
CREATE TABLE "track_embeddings" (
    "track_id" TEXT NOT NULL,
    "embedding" vector(1024) NOT NULL,
    "model_version" VARCHAR(50) NOT NULL DEFAULT 'laion-clap-music',
    "analyzed_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,

    CONSTRAINT "track_embeddings_pkey" PRIMARY KEY ("track_id")
);

-- Foreign key constraint with CASCADE delete
ALTER TABLE "track_embeddings" ADD CONSTRAINT "track_embeddings_track_id_fkey"
    FOREIGN KEY ("track_id") REFERENCES "Track"("id") ON DELETE CASCADE ON UPDATE CASCADE;

-- IVFFlat index for approximate nearest neighbor search
-- lists = 224 (sqrt of 50k target tracks)
CREATE INDEX "track_embeddings_embedding_idx" ON "track_embeddings"
    USING ivfflat ("embedding" vector_cosine_ops) WITH (lists = 224);

-- Index on model_version for filtering by embedding version
CREATE INDEX "track_embeddings_model_version_idx" ON "track_embeddings"("model_version");

```

## /backend/prisma/migrations/20260128000000_add_clap_workers/migration.sql

```sql path="/backend/prisma/migrations/20260128000000_add_clap_workers/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN "clapWorkers" INTEGER NOT NULL DEFAULT 2;

```

## /backend/prisma/migrations/20260214145320_standardize_integration_config/migration.sql

```sql path="/backend/prisma/migrations/20260214145320_standardize_integration_config/migration.sql" 
-- AlterTable
ALTER TABLE "SystemSettings" ADD COLUMN "soulseekEnabled" BOOLEAN,
ADD COLUMN "soulseekDownloadPath" TEXT,
ADD COLUMN "lastfmApiSecret" TEXT,
ADD COLUMN "lastfmUserKey" TEXT,
ADD COLUMN "lastfmEnabled" BOOLEAN;

-- Auto-enable Soulseek for existing installations with credentials
UPDATE "SystemSettings" SET "soulseekEnabled" = true WHERE "soulseekUsername" IS NOT NULL AND "soulseekUsername" != '';

```

## /backend/src/lib/soulseek/common.ts

```ts path="/backend/src/lib/soulseek/common.ts" 
export type Address = { host: string; port: number }

```


The content has been capped at 50000 tokens. The user could consider applying other filters to refine the result. The better and more specific the context, the better the LLM can follow instructions. If the context seems verbose, the user can refine the filter using uithub. Thank you for using https://uithub.com - Perfect LLM context for any GitHub repo.