``` ├── .gitattributes ├── .github/ ├── ISSUE_TEMPLATE/ ├── bug-report.yaml ├── feature_request.md ├── other-issues.md ├── workflows/ ├── build.yaml ├── .gitignore ├── .metadata ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── README_tr.md ├── README_zh.md ├── analysis_options.yaml ├── android/ ├── .gitignore ├── app/ ├── build.gradle ├── src/ ├── debug/ ├── AndroidManifest.xml ├── main/ ├── AndroidManifest.xml ├── kotlin/ ├── com/ ├── anxcye/ ├── anx_reader/ ├── MainActivity.kt ├── example/ ├── anx_reader/ ├── MainActivity.kt ├── res/ ├── drawable-v21/ ├── launch_background.xml ├── drawable/ ├── launch_background.xml ├── mipmap-hdpi/ ├── ic_launcher.png ├── launcher_icon.png ├── mipmap-mdpi/ ├── ic_launcher.png ├── launcher_icon.png ├── mipmap-xhdpi/ ├── ic_launcher.png ├── launcher_icon.png ├── mipmap-xxhdpi/ ├── ic_launcher.png ├── launcher_icon.png ├── mipmap-xxxhdpi/ ├── ic_launcher.png ├── launcher_icon.png ├── values-night/ ├── styles.xml ├── values-zh/ ├── strings.xml ├── values/ ├── strings.xml ├── styles.xml ├── xml/ ├── network_security_config.xml ├── profile/ ├── AndroidManifest.xml ├── build.gradle ├── gradle.properties ├── gradle/ ├── wrapper/ ├── gradle-wrapper.properties ├── settings.gradle ├── assets/ ├── .gitignore ├── foliate-js/ ├── .gitattributes ├── .gitignore ├── README.md ├── book.js ├── comic-book.js ├── dict.js ├── epub.js ├── epubcfi.js ├── eslint.config.js ├── fb2.js ├── fixed-layout.js ├── footnotes.js ├── index.html ├── mobi.js ├── opds.js ``` ## /.gitattributes ```gitattributes path="/.gitattributes" *.js linguist-detectable=false # ignore diff for generated files *.gen.dart binary *.gen.dart linguist-generated=true ``` ## /.github/ISSUE_TEMPLATE/bug-report.yaml ```yaml path="/.github/ISSUE_TEMPLATE/bug-report.yaml" name: Bug report description: Report a bug!|我遇到了问题! title: "[Bug]: " labels: ["bug"] body: - type: checkboxes id: bug_report_is_crash attributes: label: Prerequisites|前提条件 options: - label: I have checked the [troubleshooting](https://github.com/Anxcye/anx-reader/blob/develop/docs/troubleshooting.md)|我已经查看过[故障排除](https://github.com/Anxcye/anx-reader/blob/develop/docs/troubleshooting.md) required: true - label: I have searched for similar issues and did not find any|我已经搜索过没有相同的问题 required: true - type: textarea id: bug_report_description attributes: label: Describe the bug|描述问题 description: | A clear and concise description of what the bug is 一个清晰且简洁的描述问题 validations: required: true - type: textarea id: bug_report_reproduce attributes: label: To reproduce|重现步骤 description: | Steps to reproduce the behavior 如何重现问题 value: | 1. Go to '...' | 前往... 2. Click on '...' | 点击... 3. Scroll down to '...' | 向下滚动到... 4. See error | 看到错误 validations: required: true - type: textarea id: bug_report_expected_behavior attributes: label: Expected behavior|预期行为 description: | A clear and concise description of what you expected to happen 一个清晰且简洁的描述你期望发生的事情 validations: required: true - type: textarea id: bug_report_screenshots attributes: label: Screenshots|截图 description: | If applicable, add screenshots to help explain your problem Tip: You can attach images by clicking this area to highlight it and then dragging files in. 如果可以,添加截图以帮助解释问题 提示:你可以通过点击此区域来高亮它,然后拖动文件来添加图片。 - type: textarea id: bug_report_desktop attributes: label: Platform (please complete the following information)|平台(请填写以下信息) value: | - Platform: | 平台: [e.g. Android] - OS: | 操作系统: [e.g. Android13(MIUI14)] - AnxReader Version: | 安读版本: [e.g. 1.2.0+2033] - Device: | 设备: [e.g. Xiaomi 13] validations: required: true - type: textarea id: bug_report_additional_context attributes: label: Additional context|其他信息 description: | Add any other context about the problem here, e.g. logs 添加任何其他关于问题的信息,如日志等 ``` ## /.github/ISSUE_TEMPLATE/feature_request.md --- name: Feature request about: Suggest an idea! title: '' labels: enhancement assignees: '' --- ## /.github/ISSUE_TEMPLATE/other-issues.md --- name: Other issues about: Other issues. title: '' labels: '' assignees: '' --- ## /.github/workflows/build.yaml ```yaml path="/.github/workflows/build.yaml" name: Build & Release on: push: tags: - 'v*' - 'beta-*' - 'alpha-*' permissions: contents: write jobs: build: runs-on: ${{ matrix.os }} strategy: matrix: include: - platform: android os: ubuntu-latest - platform: windows os: windows-latest - platform: macos os: macos-latest - platform: ios os: macos-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Get version from pubspec.yaml and determine release type id: get_version shell: bash run: | VERSION=$( grep '^version:' pubspec.yaml | cut -d ' ' -f 2 | cut -d '+' -f 1 ) if [[ ${{ github.ref }} == refs/tags/alpha-* ]]; then echo "IS_ALPHA=true" >> $GITHUB_ENV echo "IS_BETA=false" >> $GITHUB_ENV echo "VERSION=alpha-${VERSION}" >> $GITHUB_ENV elif [[ ${{ github.ref }} == refs/tags/beta-* ]]; then echo "IS_ALPHA=false" >> $GITHUB_ENV echo "IS_BETA=true" >> $GITHUB_ENV echo "VERSION=beta-${VERSION}" >> $GITHUB_ENV else echo "IS_ALPHA=false" >> $GITHUB_ENV echo "IS_BETA=false" >> $GITHUB_ENV echo "VERSION=${VERSION}" >> $GITHUB_ENV fi - name: Set up Flutter uses: subosito/flutter-action@v2 with: channel: stable flutter-version: '3.29.0' - name: Get Dependencies run: | flutter --version flutter pub get - name: Setup Java if: matrix.platform == 'android' uses: actions/setup-java@v4 with: distribution: 'zulu' java-version: '17' cache: 'gradle' check-latest: true # - name: Setup for Windows # if: matrix.platform == 'windows' # uses: msys2/setup-msys2@v2 # with: # msystem: mingw64 # install: mingw-w64-x86_64-gcc # update: true # - name: Set Mingw64 Env # if: matrix.platform == 'windows' # shell: bash # run: | # echo "${{ runner.temp }}\msys64\mingw64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append # gcc -v - name: Setup Android signing if: matrix.platform == 'android' run: | echo "${{ secrets.KEYSTORE_BASE64 }}" | base64 --decode > android/app/keystore.jks echo "storePassword=${{ secrets.KEYSTORE_PASSWORD }}" > android/key.properties echo "keyPassword=${{ secrets.KEY_PASSWORD }}" >> android/key.properties echo "keyAlias=${{ secrets.KEY_ALIAS }}" >> android/key.properties echo "storeFile=keystore.jks" >> android/key.properties - name: Prepare for Windows if: matrix.platform == 'windows' shell: bash run: | flutter config --enable-windows-desktop sed -i "1i #define MyAppVersion \"${{ env.VERSION }}\"" scripts/compile_windows_setup-inno.iss - name: Prepare for macOS if: matrix.platform == 'macos' shell: bash run: | flutter config --enable-macos-desktop - name: Build for Android if: matrix.platform == 'android' run: | flutter gen-l10n dart run build_runner build --delete-conflicting-outputs flutter build apk --release flutter build apk --split-per-abi shell: bash - name: Build for iOS if: matrix.platform == 'ios' run: | sudo xcode-select -switch /Applications/Xcode_16.2.app/Contents/Developer flutter gen-l10n dart run build_runner build --delete-conflicting-outputs flutter build ios --release --no-codesign cd build/ios/Release-iphoneos mkdir -p Payload cp -r Runner.app Payload/ zip -r Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-unsigned.zip Payload cp Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-unsigned.zip Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-unsigned.ipa mkdir -p ../../ios cp Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-unsigned.ipa ../../ios/ cp Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-unsigned.zip ../../ios/ shell: bash - name: Install winget if: matrix.platform == 'windows' uses: Cyberboss/install-winget@v1 - name: Build for Windows if: matrix.platform == 'windows' shell: pwsh run: ./scripts/build_windows.ps1 - name: Upload unsigned Windows zip if: matrix.platform == 'windows' uses: actions/upload-artifact@v4 id: upload-unsigned-zip with: name: windows-unsigned-zip path: build/windows/unsigned/app.zip if-no-files-found: error - name: Sign Windows zip if: matrix.platform == 'windows' uses: signpath/github-action-submit-signing-request@v1.1 with: api-token: ${{ secrets.SIGNPATH_API_TOKEN }} organization-id: '254a26d6-6c3a-4a55-9ca6-890d0d34deb1' project-slug: 'anx-reader' signing-policy-slug: 'release-signing' artifact-configuration-slug: 'initial_zip' github-artifact-id: ${{ steps.upload-unsigned-zip.outputs.artifact-id }} wait-for-completion: true output-artifact-directory: 'build/windows' - name: Build unsigned Windows exe if: matrix.platform == 'windows' shell: pwsh run: ./scripts/build_windows_exe.ps1 - name: Upload unsigned Windows exe if: matrix.platform == 'windows' uses: actions/upload-artifact@v4 id: upload-unsigned-exe with: name: windows-unsigned-exe path: build/windows/unsigned/app.exe if-no-files-found: error - name: Sign Windows exe if: matrix.platform == 'windows' uses: signpath/github-action-submit-signing-request@v1.1 with: api-token: ${{ secrets.SIGNPATH_API_TOKEN }} organization-id: 254a26d6-6c3a-4a55-9ca6-890d0d34deb1 project-slug: 'anx-reader' signing-policy-slug: 'release-signing' artifact-configuration-slug: 'initial_exe' github-artifact-id: ${{ steps.upload-unsigned-exe.outputs.artifact-id }} wait-for-completion: true output-artifact-directory: 'build/windows' - name: Build for macOS if: matrix.platform == 'macos' shell: bash run: | sudo xcode-select -switch /Applications/Xcode_16.2.app/Contents/Developer flutter gen-l10n dart run build_runner build --delete-conflicting-outputs chmod +x ./scripts/macos_nosign.sh ./scripts/macos_nosign.sh flutter build macos cd build/macos/Build/Products/Release mkdir -p "Anx Reader" cp -r "Anx Reader.app" "Anx Reader/AnxReader.app" ln -s /Applications "Anx Reader/Applications" hdiutil create -volname "Anx Reader" -srcfolder "Anx Reader" -ov -format UDZO Anx-Reader-${{ env.VERSION }}.dmg ditto -c -k --keepParent "Anx Reader.app" "Anx-Reader-${{ env.VERSION }}.zip" - name: Rename builds run: | cd build if [ "${{ matrix.platform }}" == "android" ]; then cd app/outputs/flutter-apk mv app-armeabi-v7a-release.apk Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-armeabi-v7a.apk mv app-arm64-v8a-release.apk Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-arm64-v8a.apk mv app-x86_64-release.apk Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-x86_64.apk mv app-release.apk Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}-universal.apk elif [ "${{ matrix.platform }}" == "windows" ]; then mv windows/app.zip windows/Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}.zip mv windows/app.exe windows/Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}.exe elif [ "${{ matrix.platform }}" == "macos" ]; then mkdir -p macos mv macos/Build/Products/Release/Anx-Reader-${{ env.VERSION }}.dmg macos/Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}.dmg mv macos/Build/Products/Release/Anx-Reader-${{ env.VERSION }}.zip macos/Anx-Reader-${{ matrix.platform }}-${{ env.VERSION }}.zip fi shell: bash - name: Extract release notes id: extract_release_notes run: | VERSION=${{ env.VERSION }} if [[ $VERSION == beta-* ]] || [[ $VERSION == alpha-* ]]; then MAIN_VERSION=$(echo $VERSION | cut -d'-' -f2 | cut -d'-' -f1) else MAIN_VERSION=$VERSION fi CHANGELOG_CONTENT=$(sed -n "/## $MAIN_VERSION/,/## /p" CHANGELOG.md | sed '$d') echo "RELEASE_NOTES<> $GITHUB_ENV echo "$CHANGELOG_CONTENT" >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV shell: bash - name: Upload artifacts if: env.IS_ALPHA == 'true' uses: actions/upload-artifact@v4 with: name: ${{ matrix.platform }}-artifacts path: | build/app/outputs/flutter-apk/*.apk build/windows/*.zip build/windows/*.exe build/macos/*.dmg build/macos/*.zip build/ios/*.ipa build/ios/*.zip if-no-files-found: warn - name: Create Release if: env.IS_ALPHA != 'true' uses: softprops/action-gh-release@v2 with: files: | build/app/outputs/flutter-apk/*.apk build/windows/*.zip build/windows/*.exe build/macos/*.dmg build/macos/*.zip build/ios/*.ipa build/ios/*.zip body: ${{ env.RELEASE_NOTES }} generate_release_notes: false prerelease: ${{ env.IS_BETA == 'true' }} ``` ## /.gitignore ```gitignore path="/.gitignore" # Miscellaneous *.class *.log *.pyc *.swp .DS_Store .atom/ .build/ .buildlog/ .history .svn/ .swiftpm/ migrate_working_dir/ # IntelliJ related *.iml *.ipr *.iws .idea/ # The .vscode folder contains launch configuration and tasks you configure in # VS Code which you may wish to be included in version control, so this line # is commented out by default. #.vscode/ # Flutter/Dart/Pub related **/doc/api/ **/ios/Flutter/.last_build_id .dart_tool/ .flutter-plugins .flutter-plugins-dependencies .pub-cache/ .pub/ /build/ # Symbolication related app.*.symbols # Obfuscation related app.*.map.json # Android Studio will place build artifacts here /android/app/debug /android/app/profile /android/app/release # riverpod generated files lib/gen/ release.sh *.g.dart *.freezed.dart /.vscode/ ``` ## /.metadata ```metadata path="/.metadata" # This file tracks properties of this Flutter project. # Used by Flutter tool to assess capabilities and perform upgrades etc. # # This file should be version controlled and should not be manually edited. version: revision: "761747bfc538b5af34aa0d3fac380f1bc331ec49" channel: "stable" project_type: app # Tracks metadata for the flutter migrate command migration: platforms: - platform: root create_revision: 761747bfc538b5af34aa0d3fac380f1bc331ec49 base_revision: 761747bfc538b5af34aa0d3fac380f1bc331ec49 - platform: android create_revision: 761747bfc538b5af34aa0d3fac380f1bc331ec49 base_revision: 761747bfc538b5af34aa0d3fac380f1bc331ec49 # User provided section # List of Local paths (relative to this file) that should be # ignored by the migrate tool. # # Files that are not part of the templates will be ignored by default. unmanaged_files: - 'lib/main.dart' - 'ios/Runner.xcodeproj/project.pbxproj' ``` ## /CHANGELOG.md # Changelog ## Todo ## 1.4.5 - Fix: Incorrect click position detection on macOS - Fix: Sort menu sometimes fails to open - 修复:macOS 端无法正确判断点击位置的问题 - 修复:排序菜单有时无法打开的问题 ## 1.4.4 - Feat: Import pdf files - Feat: Sort books - Feat: More available fonts - Feat: Delete reading records of a book - Feat: Add webdav sync direction dialog - Feat: Add font delete - Fix: Webdav sync aborted dialog content - Fix: if webdav is empty, sync will upload - Fix: avoid image following paragraph indent - Fix: optimize book loading speed - Fix: sync custom book cover - 新增:导入 pdf 文件 - 新增:书架排序功能 - 新增:更多可选字体 - 新增:删除一本书的阅读记录 - 新增:添加 WebDAV 同步方向对话框 - 新增:添加字体删除功能 - 修复:WebDAV 同步中止对话框内容 - 修复:如果 WebDAV 为空,则同步时默认上传 - 修复:避免图片跟随段落缩进 - 修复:提升图书加载速度 - 修复:同步自定义的书籍封面 ## 1.4.3 - Feat: Storage space management - Feat: Add auto translate selection switch in translate settings(#217) - Feat: Handle txt files with failed chapter division by word count - Feat: Import txt file with utf-16 or utf-32 encoding - Feat: recover system TTS(#197) - Fix: TTS cannot play after resume from background(#196) - Fix: TTS cannot play when encountering images or cross-chapter - Fix: System TTS continuous backward movement(#197) - Fix: Copy translated text instead of original text(#190) - Fix: Cross-segment highlight cannot be displayed immediately - Fix: Highlight only the first word of the selection on Android(#189) - Fix: Scroll page turn cannot be used in scroll mode(#201) - 新增:存储空间查看和管理 - 新增:翻译设置页增加自动翻译开关(#217) - 新增:按字数对分章失败的txt文件进行处理 - 新增:支持导入UTF-16、UTF-32编码的txt文件 - 新增:重新引入了系统 TTS(#197) - 修复:TTS 无法在从后台恢复后播放(#196) - 修复:集成 TTS 遇到图片或跨章节时无法播放 - 修复:系统 TTS 连续向后移动 - 修复:复制翻译内容而不是原文(#190) - 修复:跨段划线无法立即显示 - 修复:安卓设备有时划线只能显示第一个字词(#189) - 修复:滚动翻页模式下,鼠标滚轮翻页一次翻一整页的问题(#201) ## 1.4.2 - Feat: add link icon - Feat: AI chat regenerate and copy - Feat: TTS integration - Feat: Reading info custom - Feat: Navigation bar custom - Feat: Sync completed toast - Fix: Some old versions of Webview cannot import books - Fix: Footnotes cannot be displayed on some devices - Fix: Image as link cannot be clicked - Fix: Reading information does not follow style changes - Fix: First line indent affects images - Fix: Context menu position error when it exceeds the screen - Fix: Optimize book opening speed - Fix: some device cant open footnote - Fix: Android click page turn button may return to previous page - Fix: iOS blank screen after resume from background - Fix: note input box may be covered by keyboard(#183) - Fix: txt file with special characters cannot be imported - Fix: some Android devices cannot export backup file - 新增:添加更多社区链接 - 新增:AI 对话可重新生成和复制 - 新增:集成的 TTS - 新增:阅读信息自定义 - 新增:导航栏自定义 - 新增:同步完成是否显示提示 - 修复:部分旧版本Webview无法导入图书 - 修复:部分设备无法显示脚注 - 修复:以图片作为脚注时无法点击 - 修复:阅读信息无法跟随样式变化 - 修复:首行缩进影响图片 - 修复:上下文菜单超出屏幕时位置错误 - 修复: 优化书籍打开速度 - 修复: 部分设备无法打开脚注 - 修复:Android 跨章节后无法点击翻页的问题 - 修复:iOS 设备从后台恢复后有时白屏的问题 - 修复:写想法的输入框有时被键盘遮挡(#183) - 修复:部分含有特殊字符的 txt 文件无法导入的问题 - 修复:部分 Android 设备无法导出备份文件 ## 1.4.1 - Feat: excerpt AI chat - Feat: add AI chat in reading page - Feat: control webdav sync only when wifi is connected - Feat: manage open book animation - Feat: add text for context menu - Feat: add text for slider(#48) - Feat: add tips for AI configuration - Feat: custom shelf cover width - Feat: toc item scroll to current chapter(#141) - Fix: save image on iOS - Fix: click page turn button may return to previous page - Fix: scroll page turn cannot set margin(#139) - 新增:划线 AI 对话 - 新增:阅读界面可以与 AI 对话 - 新增:控制 WebDAV 是否仅在 WiFi 下同步 - 新增:管理打开书的动画 - 新增:上下文菜单文字提示 - 新增:样式调节滑块的文字说明(#48) - 新增:AI 配置提示 - 新增:自定义书架封面宽度 - 新增:目录项滚动到当前章节(#141) - 修复:iOS 端保存图片 - 修复:有时点击翻页会返回上一页 - 修复:滚动翻页无法设置上下边距(#139) ## 1.3.1 > MacOs 版本处于测试阶段 > MacOS version in beta - Fix: Some Android devices cannot import txt format books - 修复:部分安卓设备无法导入 txt 格式的书籍 ## 1.3.0 > MacOs 版本处于测试阶段 > MacOS version in beta - Feat: Add font weight slider - Fix: AI answer cache(#124) - Feat: Expand the range of custom font size - Feat: Add volume key page turn switch - Feat: Add custom Gemini api url - Fix: Android TTS slider value not updating - Fix: Txt file chapter title detection(#107) - Fix: DeepSeek default model name(#123) - Fix: Sync problem(#94,#89) - 新增:调整字体粗细 - 新增:AI 回答缓存 - 新增:扩大自定义字体大小范围 - 新增:音量键翻页开关 - 新增:自定义 Gemini api url - 修复:Android TTS 滑块数值不更新 - 修复:txt 文件章节标题检测(#107) - 修复:DeepSeek 默认模型名称(#123) - 修复:无法同步的问题(#94,#89) ## 1.2.6 - Fix: Fix ai stream error - 修复:修复 AI 流错误 ## 1.2.5 - Feat: Add volume key page turn(#95) - Feat: Add auto background color(#78) - Feat: Add OpenAI、Claude、DeepSeek AI models(#100) - Perf: Optimize txt file import speed - UI: Optimize multiple UI interfaces - 新增:音量键翻页(#95) - 功能:自动背景色(#78) - 功能:接入 OpenAI、Claude、DeepSeek 等多个 AI 模型 - 性能:大幅提高了 txt 文件的导入速度 - UI: 优化多个 UI 界面 ## 1.2.4 2025-01-21 * Feat: Remember last window position and size(#67) * Feat: Color picker input hex code(#69) * Feat: Export notes in CSV format(#71) * Feat: Add TTS stop timer(#81) * Feat: Add heat map to show reading time(#69) * Feat: Import progress prompt(#61) * Feat: Add statistics chart switch time * Fix: some Windows systems cannot import books(#75) * Fix: enhance Webdav sync stability * Fix: Reading settings interface is incomplete on some devices(#70) * 新增:记忆上次窗口位置和大小(#67) * 新增:选择颜色时能够输入十六进制代码(#69) * 新增:以 CSV 格式导出笔记(#71) * 新增:TTS 定时停止(#81) * 新增:用热力图展示阅读时长(#69) * 新增:导入进度提示(#61) * 新增:统计图表切换时间 * 修复:部分 Windows 系统下无法导入图书(#75) * 修复:增强 Webdav 同步稳定性 * 修复:部分设备下阅读设置界面显示不完整(#70) ## 1.2.3 2024-12-26 * Feat: Reader could add notes * Feat: Search books * Feat(Android): Display TTS control buttons in the notification screen * Feat(Android): Import books through system sharing * Feat(Windows): Drag to import books * Feat(Windows): Webview2 check and prompt * Fix: Fixed garbled text when importing txt files * Fix: Optimized import efficiency * Fix(Windows):Fixed crash issue when opening books on some Windows devices * 新增:读者添加批注 * 新增:书籍搜索 * 新增(Android):在通知栏中显示 TTS 控制按钮 * 新增(Android):通过系统分享导入书籍 * 新增(Windows):拖拽导入书籍 * 新增(Windows):Webview2 检查和提示 * 修复:txt 文件导入时乱码问题(添加了 GBK 解码) * 修复:大幅优化导入效率 * 修复(Windows):部分Windows 端打开书时闪退问题 ## 1.2.2 2024-12-02 🚀 Support txt files now! 🚀 支持了 txt 文件导入 - Feat: Setting reading column count - Feat: Import txt format books - Fix: Book progress record is not timely - Fix: Windows import book error - 新增:设置阅读栏数 - 新增:导入 txt 格式书籍 - 修复:书籍进度记录不及时 - 修复:Windows 端部分设备无法导入书籍 ## 1.2.1 2024-11-23 - Feat: Drag to group books - Fix: Bottom navigation bar covers menu bar - Fix: Windows no longer deletes original files when importing - Fix: Books with single quotes cannot be opened - 新增:拖拽实现书籍分组 - 修复:底部导航栏覆盖菜单栏 - 修复: Windows 端导入时删除原文件的问题 - 修复: 包含单引号的书籍无法打开 ## 1.2.0 2024-11-17 ❗Anx-Reader has changed the Android version signature, please back up and reinstall Anx-Reader❗ ❗安读更换了 Android 版本的签名, 请做好备份重新安装安读❗ 🚀You can now use Anx-Reader on Windows! 🚀现在可以在 Windows 上使用安读了! - Feat: Translate selected content - Feat: Note add time - Feat: Webview version check - Feat: convert chinese mode - UI: Optimized the statistic card - Fix: Context menu cannot be closed once - Fix: Cannot correctly judge the version when checking for updates - 新增:翻译选中内容 - 新增:简繁转换 - 新增:Webview版本检查 - 新增:显示笔记添加时间 - UI:优化了统计卡片 - 修复:上下文菜单不能一次关闭 - 修复: 检查更新时不能正确判断版本 ## 1.1.8 2024-10-23 - Added: Modify import/export file structure - Fixed: Book font size cannot maintain relative relationship - Fixed: Can be used in lower webview versions (about 92.0.0.0 and above) - 修改:修改了导入导出的文件结构 - 修复:书籍字体大小不能保持相对关系 - 修复:能够在较低的 webview 版本中使用(约92.0.0.0及以上) Windows version is coming soon! Windows端即将发布,敬请期待! ## 1.1.7 2024-09-11 - Backup: Export/import all data - Ability to click and view large images - Convenient back arrow after navigation - Multiple pop-up annotations within a pop-up annotation - Customizable text indentation size - Text selection within pop-up annotations - Optimization of status bar and navigation key areas to avoid obstruction by navigation keys - Fixed white screen issue when opening files - Fixed issue with importing font files with Chinese filenames - Shortened TTS reading intervals, especially when using TTS-Server - 备份:导出/导入全部数据 - 能够点击查看大图了 - 跳转后能够有方便地返回箭头 - 弹注中多次弹注 - 弹注字体跟随设置 - 自定义文本缩进大小 - 弹注中选择文字 - 状态栏和导航键区域优化,避免了被导航键遮盖 - 修复打开文件白屏 - 修复字体文件中中文文件名无法导入 - 缩短了TTS朗读间隔,尤其是使用TTS-Server时 - 根据弹注内容调整弹注框大小 ## 1.1.6 2024-09-03 This release includes a number of new features and improvements, as well as bug fixes. Feature: Added support for importing books in mobi, azw3, and fb2 formats Feature: Added TTS (Text-to-Speech) voice reading functionality Feature: Added filter, sort, and open book at the note location features in the note list Feature: Added more page-turning methods Feature: Added support for importing custom fonts Feature: Added full-text search functionality Fix: Resolved issues where book styles were not applied (#24, #28) Other: For more new features and bug fixes 众多新增功能! 功能:新增mobi、azw3、fb2格式书籍导入 功能:新增TTS语音朗读 功能:笔记列表可筛选、排序、打开书到笔记的位置 功能:新增更多翻页方式 功能:导入自定义字体 功能:全文搜索 修复:书籍样式不生效 #24,#28 以及其他众多新功能和修复 ## /CONTRIBUTING.md [English](#Contributing-to-Anx-Reader) | [简体中文](#让安读更好) # Contributing to Anx Reader Anx Reader is an open-source project, and we welcome any contributions from you. You can help by translating, fixing bugs, adding new features, writing documentation, and more. If you want to contribute, the following guide may be helpful. Let's get started! ### Running - Install [Flutter](https://flutter.dev). - Clone and navigate to the project directory. Execute the following commands: ```bash flutter pub get flutter gen-l10n dart run build_runner watch flutter run # or click the run button in your IDE ``` ### Contributing to Development 1. Fork this repository 2. Create your feature branch (`git checkout -b feature/AmazingFeature`) 3. Make changes (e.g., adding new features, fixing bugs, translating, etc.) 4. Commit your changes (`git commit -m 'Add some AmazingFeature'`) 5. Push to the branch (`git push origin feature/AmazingFeature`) 6. Submit a Pull Request ### Building You will need a signing key to generate an APK. You can generate one or use the debug signing option If you want to generate a signing key, please refer to [here](https://developer.android.com/studio/publish/app-signing). Using the debug signing option, you can modify the following in the `android/app/build.gradle` file: ```gradle android { // ... buildTypes { release { signingConfig signingConfigs.debug // using debug signing } } // ... } ``` ### Translation If you'd like Anx Reader to support your language, let's work together on the translation! You can translate project documentation or the app interface. **Translate Documentation** Currently, we have the following documents that need translation: - [README.md](README.md) Please copy README.md as README_language_code.md, translate it, and place the translated file in the project root directory. Then, add a link to the translation at the top of the README.md. **Translate the App Interface** - Anx Reader uses [intl](https://pub.dev/packages/intl) for multilingual support. You can find the localization files in the `lib/l10n` directory. Please copy `app_en.arb` to `app_language_code.arb`, and then translate it. - You can translate missing fields or modify existing translations. - Place the translated file in the `lib/l10n` directory and run `flutter gen-l10n` to generate the localization files. - Add your `language name` and `code` to the [Settings Page](lib/page/settings_page/appearance.dart#L83). - Refer to [locale](https://saimana.com/list-of-country-locale-code/) for language codes. - Make sure to run the app at least once after translation to ensure everything works fine. - Submit a Pull Request. ### Fixing Bugs and Adding New Features Anx Reader uses [Flutter_inappwebview](https://pub.dev/packages/flutter_inappwebview) to render eBooks, so the project relies on `JavaScript` to handle eBook rendering. The `JavaScript` code can be found in the `assets/foliate-js` directory, where you'll find the code responsible for rendering eBooks. The app loads the `index.html` file from the `assets/foliate-js` directory using a [built-in server](lib/service/book_player/book_player_server.dart). When debugging the `js`, you can find some commented code at the top of `book.js` in the `assets/foliate-js` directory. You can uncomment this code, place a test book in the `assets/local` directory, modify `book.js` with `let url = '../local/***.epub'`, and open the `index.html` file in your browser to see the result. To adjust the rendering of books, you can modify the `getCSS` function in the `book.js` file. These CSS styles will be applied to the book. The main part of the communication between `js` and `dart` is in [epub_player.dart](lib/page/book_player/epub_player.dart), where you'll find the code that handles `js` communication. The webview interface is also loaded here. After re-commenting the code in `book.js`, rerun the application. Please submit a Pull Request after re-commenting the code in `book.js`. # 让安读更好 安读是一款开源项目,我们欢迎您的任何贡献,您可以对项目进行翻译、修复 bug、添加新功能,编写文档等。如果您想要贡献,以下内容可能会对您有所帮助。 让我们开始吧! ### 运行 - 安装 [Flutter](https://flutter.dev)。 - 克隆并进入项目目录。 执行以下命令: ```bash flutter pub get flutter gen-l10n dart run build_runner watch flutter run # 或点击IDE运行按钮 ``` ### 参与开发 1. Fork 本仓库 2. 创建您的特性分支 (`git checkout -b feature/AmazingFeature`) 3. 在这里做些更改(如添加新功能、修复 bug、翻译等) 4. 提交您的更改 (`git commit -m 'Add some AmazingFeature'`) 5. 推送到分支 (`git push origin feature/AmazingFeature`) 6. 提交一个 Pull Request ### 编译 您将需要签名密钥来生成 APK。您可以生成一个或使用调试签名选项 如果您想要生成一个签名密钥,请参考 [这里](https://developer.android.com/studio/publish/app-signing)。 使用调试签名选项,您可以在 `android/app/build.gradle` 文件中修改这些内容: ```gradle android { // ... buildTypes { release { signingConfig signingConfigs.debug // using debug signing } } // ... } ``` ### 翻译 想要让安读支持您的语言,让我们一起来翻译吧! 您可以翻译项目文档,也可以翻译应用程序的界面。 **翻译文档** 目前,我们有以下文档需要翻译: - [README.md](README.md) 请复制 README.md 为 README_语言代码.md,然后进行翻译,翻译后的文件请放在项目根目录下,然后在 README.md 头部添加链接。 **翻译应用程序界面** - 安读使用 [intl](https://pub.dev/packages/intl) 进行多语言支持,您可以在`lib/l10n`目录下找到多语言文件,请复制`app_en.arb`为`app_语言代码.arb`,然后进行翻译。 - 您可以翻译缺失的字段,或者对现有翻译进行修改。 - 翻译后的文件请放在`lib/l10n`目录下,然后运行`flutter gen-l10n`生成多语言文件。 - 在[设置界面](lib/page/settings_page/appearance.dart#L83)添加您的`语言名称`和`代码`。 - 关于语言代码,请参考 [locale](https://saimana.com/list-of-country-locale-code/) - 请确保翻译后您至少运行一次应用程序,以确保翻译没有问题。 - 提交一个 Pull Request。 ### 修复 bug 和添加新功能 安读使用 [Flutter_inappwebview](https://pub.dev/packages/flutter_inappwebview) 来渲染电子书,因此项目在渲染电子书的部分使用`JavaScript`编写。 `JavaScript`代码位于`assets/foliate-js`目录下,您可以在这里找到渲染电子书的代码。软件通过 [内置服务器](lib/service/book_player/book_player_server.dart) 加载`assets/foliate-js`目录下的`index.html`文件。 在调试`js`时,您可以在`assets/foliate-js`目录下的`book.js`头部看到一些被注释的代码,您可以取消注释这些代码,然后将测试书籍放入`assets/local`目录下,修改`book.js`中的`let url = '../local/***.epub'`然后在浏览器中打开`index.html`文件,即可看到效果。 如果想要调整书籍渲染效果可以修改`book.js`文件中的`getCSS`函数,这些css将会被应用到书籍中。 `js`与`dart`之间的通信的主要部分在[epub_player.dart](lib/page/book_player/epub_player.dart)中,您可以在这里找到与`js`通信的代码,wenview 界面也是在这里加载的。 在重新注释`book.js`中的代码后,重新运行应用程序即可。 请在重新注释`book.js`中的代码后提交 Pull Request。 ## /LICENSE ``` path="/LICENSE" MIT License Copyright (c) 2025 Anxcye Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ``` ## /README.md **English** | [简体中文](README_zh.md) | [Türkçe](README_tr.md)

Anx-logo

Anx Reader

Anxcye%2Fanx-reader | Trendshift

Platforms Supported Formats

Latest Release Pre-release License Downloads Featured|HelloGitHub stars

Anx Reader, a thoughtfully crafted e-book reader for book lovers. Featuring powerful AI capabilities and supporting various e-book formats, it makes reading smarter and more focused. With its modern interface design, we're committed to delivering pure reading pleasure. ![](./docs/images/main.jpg) **Cross-Platform** iOS/macOS/Windows/Android **Full Sync** Reading Progress/Book Files/Highlighted Notes **Multiple AI** OpenAI/Claude/Gemini/DeepSeek **Insightful Data** Detailed and Intuitive Charts - Weekly/Monthly/Yearly/Heatmap **Notes** Multiple Ways to Filter/Sort - Export as TXT, Markdown, CSV **Highly Customizable Styles** Line Spacing/Paragraph Spacing/Margins/Fonts/Color Schemes/Page Turning Methods **Practical Features** Translation/Search/Quick Ask AI/Write Ideas/Text-to-Speech
OS Source
iOS App Store
macOS Mac App Store GitHub
Windows GitHub
Android GitHub
📚 **Rich Format Support** - Support for major e-book formats: EPUB, MOBI, AZW3, FB2, TXT - Perfect parsing for optimal reading experience ☁️ **Seamless Sync** - Cross-device synchronization of reading progress, notes, and books via WebDAV - Continue your reading journey anywhere, anytime 🤖 **Smart AI Assistant** - Integration with leading AI services: OpenAI, DeepSeek, Claude, Gemini - Intelligent content summarization and reading position recall for enhanced efficiency 🎨 **Personalized Reading Experience** - Carefully designed theme colors with customization options - Switch freely between scrolling and pagination modes - Import custom fonts to create your personal reading space 📊 **Professional Reading Analytics** - Comprehensive reading statistics - Weekly, monthly, and yearly reading reports - Intuitive reading heatmap to track every moment of your reading journey 📝 **Powerful Note System** - Flexible text annotation features - Export options in TXT, CSV, and Markdown formats - Easily organize and share your reading insights 🛠️ **Practical Tools** - Smart TTS reading to rest your eyes - Full-text search for quick content location - Instant word translation to enhance reading efficiency 💻 **Cross-Platform Support** - Seamless experience on Android / Windows / MacOS / iOS - Consistent user interface across devices ### TODO - [X] UI adaptation for tablets - [X] Page-turning animation - [X] TTS voice reading - [X] Reading fonts - [X] Translation - [ ] Full-text translation - [ ] Support for more file types (pdf) - [X] Support for WebDAV synchronization - [ ] Support for Linux ### I Encountered a Problem, What Should I Do? Check [Troubleshooting](./docs/troubleshooting.md#English) Submit an [issue](https://github.com/Anxcye/anx-reader/issues/new/choose), and we will respond as soon as possible. Telegram Group: [https://t.me/AnxReader](https://t.me/AnxReader) QQ Group:1042905699 ### Screenshots | ![](./docs/images/wide_main.png) | ![](./docs/images/wide_ai.png) | | :------------------------------: | :----------------------------: | | ![](./docs/images/wide1.png) | ![](./docs/images/wide2.png) | | ![](./docs/images/wide3.png) | ![](./docs/images/wide4.png) | | ![](./docs/images/wide5.png) | ![](./docs/images/wide6.png) | | ![](./docs/images/mobile1.png) | ![](./docs/images/mobile2.png) | ![](./docs/images/mobile3.png) | | :----------------------------: | :----------------------------: | :----------------------------: | | ![](./docs/images/mobile4.png) | ![](./docs/images/mobile5.png) | ![](./docs/images/mobile6.png) | ## Donations If you like Anx Reader, please consider supporting the project by donating. Your donation will help me maintain and improve the project. ❤️ [Donate](https://anxcye.com/home/7) ## Building Want to build Anx Reader from source? Please follow these steps: - Install [Flutter](https://flutter.dev). - Clone and enter the project directory. - Run `flutter pub get`. - Run `flutter gen-l10n` to generate multi-language files. - Run `dart run build_runner build --delete-conflicting-outputs` to generate the Riverpod code. - Run `flutter run` to launch the application. You may encounter Flutter version incompatibility issues. Please refer to the [Flutter documentation](https://flutter.dev/docs/get-started/install). ## Code signing policy - Committers and reviewers: [Members team](https://github.com/anxcye/anx-reader/graphs/contributors) - Approvers: [Owners](https://github.com/anxcye) - [Privacy Policy](https://anx.anxcye.com/privacy.html) - [Terms of Service](https://anx.anxcye.com/terms.html) ### Sponsors | ![signpath](https://signpath.org/assets/favicon-50x50.png) | Free code signing on Windows provided by [SignPath.io](https://about.signpath.io/),certficate by [SignPath Foundation](https://signpath.org/) | |------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| ## License This project is licensed under the [MIT License](./LICENSE). Starting from version 1.1.4, the open source license for the Anx Reader project has been changed from the MIT License to the GNU General Public License version 3 (GPLv3). After version 1.2.6, the selection and highlight feature has been rewritten, and the open source license has been changed from the GPL-3.0 License to the MIT License. All contributors agree to this change(#116). ## Thanks [foliate-js](https://github.com/johnfactotum/foliate-js), which is MIT licensed, it used as the ebook renderer. Thanks to the author for providing such a great project. [foliate](https://github.com/johnfactotum/foliate), which is GPL-3.0 licensed, selection and highlight feature is inspired by this project. But since 1.2.6, the selection and highlight feature has been rewritten. And many [other open source projects](./pubspec.yaml), thanks to all the authors for their contributions. ## /README_tr.md # Anx Reader **[English](README.md)** | **Türkçe** | [简体中文](README_zh.md) > [!WARNING] > Some features or descriptions may be outdated. Please check the [English README](README.md) for the latest information. > Feel free to help us update this translation by submitting a PR!

Anx-logo

Anx Reader

Lisans İndirmeler Öne Çıkan|HelloGitHub Yıldızlar

Anx Reader, çevrimiçi tanıtım içeriği olmadan tamamen okuma odaklı bir uygulamadır. Daha iyi odaklanmanıza yardımcı olarak okuma verimliliğinizi artırır. **Desteklenen formatlar:** **epub / mobi / azw3 / fb2 / txt** Android ve Windows için kullanılabilir. ![](./docs/images/9.jpg) - Daha kapsamlı senkronizasyon özellikleri. WebDAV kullanarak okuma ilerlemesini, notları ve kitap dosyalarını senkronize edebilirsiniz. - Konforlu bir okuma deneyimi için zengin ve özelleştirilebilir renk şemaları. - Okuma alışkanlıklarınızı takip etmenizi sağlayan güçlü okuma istatistikleri. - Derinlemesine okuma için gelişmiş not alma özellikleri. - Telefon ve tabletler için optimize edilmiş arayüz. ## TODO (Yapılacaklar) - [X] Tabletler için UI uyarlaması - [X] Sayfa çevirme animasyonu - [X] TTS (Metinden sese) okuma - [X] Okuma yazı tipleri - [X] Çeviri - [ ] Tam metin çeviri desteği - [ ] Daha fazla dosya türü desteği (PDF) - [X] WebDAV senkronizasyon desteği - [ ] Linux ve MacOS desteği ## Bir Sorunla Karşılaştım, Ne Yapmalıyım? - [Sorun Giderme](./docs/troubleshooting.md#English) bölümüne göz atın. - Bir [sorun bildirin](https://github.com/Anxcye/anx-reader/issues/new/choose), en kısa sürede yanıtlayacağız. **Telegram Grubu:** [https://t.me/AnxReader](https://t.me/AnxReader) ## Ekran Görüntüleri | ![](./docs/images/wide_main.png) | ![](./docs/images/wide_ai.png) | | :------------------------------: | :----------------------------: | | ![](./docs/images/wide1.png) | ![](./docs/images/wide2.png) | | ![](./docs/images/wide3.png) | ![](./docs/images/wide4.png) | | ![](./docs/images/wide5.png) | ![](./docs/images/wide6.png) | | ![](./docs/images/mobile1.png) | ![](./docs/images/mobile2.png) | ![](./docs/images/mobile3.png) | | :----------------------------: | :----------------------------: | :----------------------------: | | ![](./docs/images/mobile4.png) | ![](./docs/images/mobile5.png) | ![](./docs/images/mobile6.png) | ## Bağış Yapın Eğer Anx Reader’ı beğendiyseniz, projeyi desteklemek için bağış yapmayı düşünebilirsiniz. Bağışlarınız, projenin bakımını ve geliştirilmesini sürdürebilmemize yardımcı olur. ❤️ [Bağış Yap](https://anxcye.com/home/7) ## Derleme (Build) İşlemi Anx Reader’ı kaynak kodundan derlemek mi istiyorsunuz? Aşağıdaki adımları takip edin: - [Flutter](https://flutter.dev) yükleyin. - Proje dizinine gidin ve klonlayın. - `flutter pub get` komutunu çalıştırın. - Çok dilli destek dosyalarını oluşturmak için `flutter gen-l10n` çalıştırın. - Riverpod kodunu oluşturmak için `dart run build_runner build --delete-conflicting-outputs` çalıştırın. - Uygulamayı başlatmak için `flutter run` komutunu çalıştırın. Flutter sürüm uyumsuzlukları ile karşılaşabilirsiniz. Lütfen [Flutter belgelerine](https://flutter.dev/docs/get-started/install) göz atın. ## Lisans Anx Reader, [GPL-3.0 Lisansı](./LICENSE) altında lisanslanmıştır. Sürüm 1.1.4'ten itibaren, Anx Reader projesinin açık kaynak lisansı, MIT Lisansından GNU Genel Kamu Lisansı sürüm 3 (GPLv3) olarak değiştirilmiştir. ## Teşekkürler - [foliate-js](https://github.com/johnfactotum/foliate-js) (MIT lisanslı), e-kitap görüntüleyici olarak kullanılmaktadır. Bu harika projeyi sağladığı için yazara teşekkür ederiz. - [foliate](https://github.com/johnfactotum/foliate) (GPL-3.0 lisanslı), seçim ve vurgulama özelliği bu projeden ilham almıştır. - Ve birçok [diğer açık kaynak projeler](./pubspec.yaml), katkıda bulunan tüm yazarlara teşekkürler! ## /README_zh.md [English](README.md) | **简体中文** | [Türkçe](README_tr.md)

Anx-logo

Anx Reader - 让阅读更专注

License Downloads Featured|HelloGitHub stars

Anx Reader,一款为热爱阅读的你精心打造的电子书阅读器。集成多种 AI 能力,支持丰富的电子书格式,让阅读更智能、更专注。现代化界面设计,只为提供纯粹的阅读体验。 ![](./docs/images/9_zh.jpg)
OS Source
iOS App Store
macOS Mac App Store GitHub
Windows GitHub
Android GitHub
📚 **丰富的格式支持** - 支持主流电子书格式:EPUB、MOBI、AZW3、FB2、TXT - 完美解析,确保最佳阅读体验 ☁️ **无缝同步体验** - 通过 WebDAV 跨设备同步阅读进度、笔记和书籍 - 随时随地继续您的阅读之旅 🤖 **智能 AI 助手** - 集成多款顶尖 AI 服务:OpenAI、DeepSeek、Claude、Gemini - 智能总结内容、回忆阅读位置,让阅读更有效率 🎨 **个性化阅读体验** - 精心设计的主题配色,支持自定义 - 滚动/分页模式自由切换 - 支持导入自定义字体,打造专属阅读空间 📊 **专业的阅读追踪** - 详尽的阅读数据统计 - 支持周、月、年度阅读报告 - 直观的阅读热力图,记录每一刻阅读时光 📝 **强大的笔记系统** - 灵活的文本批注功能 - 支持导出为 TXT、CSV、Markdown 格式 - 轻松整理和分享您的读书心得 🛠️ **实用工具集** - TTS 智能朗读,解放双眼 - 全文搜索,快速定位内容 - 即时划词翻译,提升阅读效率 💻 **跨平台支持** - 完美支持 Android 与 Windows 系统 - 统一的用户体验,无缝切换设备 ### TODO - [X] UI 适配 Tab 端 - [X] 翻页动画 - [X] TTS 语音朗读 - [X] 阅读字体 - [X] 翻译 - [ ] 全文翻译 - [ ] 支持更多文件类型(pdf) - [X] 支持 WebDAV 同步 - [ ] 支持 Linux, MacOS ### 我遇到了问题,怎么办? 查看[故障排除](./docs/troubleshooting.md#简体中文) 提出一个[issue](https://github.com/Anxcye/anx-reader/issues/new/choose),将会尽快回复。 Telegram 群组:[https://t.me/AnxReader](https://t.me/AnxReader) QQ群:1042905699 ### 截图 | ![](./docs/images/wide_main.png) | ![](./docs/images/wide_ai.png) | | :------------------------------: | :----------------------------: | | ![](./docs/images/wide1.png) | ![](./docs/images/wide2.png) | | ![](./docs/images/wide3.png) | ![](./docs/images/wide4.png) | | ![](./docs/images/wide5.png) | ![](./docs/images/wide6.png) | | ![](./docs/images/mobile1.png) | ![](./docs/images/mobile2.png) | ![](./docs/images/mobile3.png) | | :----------------------------: | :----------------------------: | :----------------------------: | | ![](./docs/images/mobile4.png) | ![](./docs/images/mobile5.png) | ![](./docs/images/mobile6.png) | ## 捐赠 如果你喜欢安读,请考虑捐赠支持项目。您的支持将帮助我优化功能、修复问题,并为您带来更好的阅读体验!感谢您的慷慨支持! ❤️ [捐赠](https://anxcye.com/home/7) ## 构建 希望从源码构建安读?请参考以下步骤: - 安装 [Flutter](https://flutter.dev)。 - 克隆并进入项目目录。 - 运行 `flutter pub get` 。 - 运行 `flutter gen-l10n` 生成多语言文件。 - 运行 `dart run build_runner build --delete-conflicting-outputs` 生成 Riverpod 代码。 - 运行 `flutter run` 启动应用。 您可能遇到 Flutter 版本不兼容的问题,请参考 [Flutter 文档](https://flutter.dev/docs/get-started/install)。 ## /analysis_options.yaml ```yaml path="/analysis_options.yaml" # This file configures the analyzer, which statically analyzes Dart code to # check for errors, warnings, and lints. # # The issues identified by the analyzer are surfaced in the UI of Dart-enabled # IDEs (https://dart.dev/tools#ides-and-editors). The analyzer can also be # invoked from the command line by running `flutter analyze`. # The following line activates a set of recommended lints for Flutter apps, # packages, and plugins designed to encourage good coding practices. include: package:flutter_lints/flutter.yaml linter: # The lint rules applied to this project can be customized in the # section below to disable rules from the `package:flutter_lints/flutter.yaml` # included above or to enable additional rules. A list of all available lints # and their documentation is published at https://dart.dev/lints. # # Instead of disabling a lint rule for the entire project in the # section below, it can also be suppressed for a single line of code # or a specific dart file by using the `// ignore: name_of_lint` and # `// ignore_for_file: name_of_lint` syntax on the line or in the file # producing the lint. rules: # avoid_print: false # Uncomment to disable the `avoid_print` rule # prefer_single_quotes: true # Uncomment to enable the `prefer_single_quotes` rule analyzer: plugins: - custom_lint exclude: - "lib/l10n/generated/*" errors: invalid_annotation_target: ignore # Additional information about this file can be found at # https://dart.dev/guides/language/analysis-options ``` ## /android/.gitignore ```gitignore path="/android/.gitignore" gradle-wrapper.jar /.gradle /captures/ /gradlew /gradlew.bat /local.properties GeneratedPluginRegistrant.java # Remember to never publicly share your keystore. # See https://flutter.dev/docs/deployment/android#reference-the-keystore-from-the-app key.properties **/*.keystore **/*.jks # .cxx app/.cxx/ ``` ## /android/app/build.gradle ```gradle path="/android/app/build.gradle" plugins { id "com.android.application" id "kotlin-android" id "dev.flutter.flutter-gradle-plugin" } def localProperties = new Properties() def localPropertiesFile = rootProject.file('local.properties') if (localPropertiesFile.exists()) { localPropertiesFile.withReader('UTF-8') { reader -> localProperties.load(reader) } } def flutterVersionCode = localProperties.getProperty('flutter.versionCode') if (flutterVersionCode == null) { flutterVersionCode = '1' } def flutterVersionName = localProperties.getProperty('flutter.versionName') if (flutterVersionName == null) { flutterVersionName = '1.0' } def keystoreProperties = new Properties() def keystorePropertiesFile = rootProject.file('key.properties') if (keystorePropertiesFile.exists()) { keystoreProperties.load(new FileInputStream(keystorePropertiesFile)) } android { namespace "com.anxcye.anx_reader" // compileSdk flutter.compileSdkVersion compileSdk 35 ndkVersion flutter.ndkVersion compileOptions { sourceCompatibility JavaVersion.VERSION_1_8 targetCompatibility JavaVersion.VERSION_1_8 } kotlinOptions { jvmTarget = '1.8' } sourceSets { main.java.srcDirs += 'src/main/kotlin' } defaultConfig { applicationId "com.anxcye.anx_reader" // You can update the following values to match your application needs. // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. minSdkVersion 21 targetSdkVersion 34 versionCode flutterVersionCode.toInteger() versionName flutterVersionName } signingConfigs { release { keyAlias keystoreProperties['keyAlias'] keyPassword keystoreProperties['keyPassword'] storeFile keystoreProperties['storeFile'] ? file(keystoreProperties['storeFile']) : null storePassword keystoreProperties['storePassword'] } } buildTypes { release { // signingConfig signingConfigs.debug signingConfig signingConfigs.release } } } flutter { source '../..' } dependencies {} ``` ## /android/app/src/debug/AndroidManifest.xml ```xml path="/android/app/src/debug/AndroidManifest.xml" ``` ## /android/app/src/main/AndroidManifest.xml ```xml path="/android/app/src/main/AndroidManifest.xml" ``` ## /android/app/src/main/kotlin/com/anxcye/anx_reader/MainActivity.kt ```kt path="/android/app/src/main/kotlin/com/anxcye/anx_reader/MainActivity.kt" package com.anxcye.anx_reader import com.ryanheise.audioservice.AudioServiceActivity //import io.flutter.embedding.android.FlutterActivity //class MainActivity: FlutterActivity() class MainActivity: AudioServiceActivity() { } ``` ## /android/app/src/main/kotlin/com/example/anx_reader/MainActivity.kt ```kt path="/android/app/src/main/kotlin/com/example/anx_reader/MainActivity.kt" package com.example.anx_reader import io.flutter.embedding.android.FlutterActivity class MainActivity: FlutterActivity() ``` ## /android/app/src/main/res/drawable-v21/launch_background.xml ```xml path="/android/app/src/main/res/drawable-v21/launch_background.xml" ``` ## /android/app/src/main/res/drawable/launch_background.xml ```xml path="/android/app/src/main/res/drawable/launch_background.xml" ``` ## /android/app/src/main/res/mipmap-hdpi/ic_launcher.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-hdpi/ic_launcher.png ## /android/app/src/main/res/mipmap-hdpi/launcher_icon.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-hdpi/launcher_icon.png ## /android/app/src/main/res/mipmap-mdpi/ic_launcher.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-mdpi/ic_launcher.png ## /android/app/src/main/res/mipmap-mdpi/launcher_icon.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-mdpi/launcher_icon.png ## /android/app/src/main/res/mipmap-xhdpi/ic_launcher.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png ## /android/app/src/main/res/mipmap-xhdpi/launcher_icon.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-xhdpi/launcher_icon.png ## /android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png ## /android/app/src/main/res/mipmap-xxhdpi/launcher_icon.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-xxhdpi/launcher_icon.png ## /android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png ## /android/app/src/main/res/mipmap-xxxhdpi/launcher_icon.png Binary file available at https://raw.githubusercontent.com/Anxcye/anx-reader/refs/heads/main/android/app/src/main/res/mipmap-xxxhdpi/launcher_icon.png ## /android/app/src/main/res/values-night/styles.xml ```xml path="/android/app/src/main/res/values-night/styles.xml" ``` ## /android/app/src/main/res/values-zh/strings.xml ```xml path="/android/app/src/main/res/values-zh/strings.xml" 安读 ``` ## /android/app/src/main/res/values/strings.xml ```xml path="/android/app/src/main/res/values/strings.xml" Anx Reader ``` ## /android/app/src/main/res/values/styles.xml ```xml path="/android/app/src/main/res/values/styles.xml" ``` ## /android/app/src/main/res/xml/network_security_config.xml ```xml path="/android/app/src/main/res/xml/network_security_config.xml" ``` ## /android/app/src/profile/AndroidManifest.xml ```xml path="/android/app/src/profile/AndroidManifest.xml" ``` ## /android/build.gradle ```gradle path="/android/build.gradle" allprojects { repositories { google() mavenCentral() } } rootProject.buildDir = '../build' subprojects { project.buildDir = "${rootProject.buildDir}/${project.name}" } subprojects { project.evaluationDependsOn(':app') } tasks.register("clean", Delete) { delete rootProject.buildDir } ``` ## /android/gradle.properties ```properties path="/android/gradle.properties" org.gradle.jvmargs=-Xmx4G android.useAndroidX=true android.enableJetifier=true kotlin.jvm.target.validation.mode = IGNORE ``` ## /android/gradle/wrapper/gradle-wrapper.properties ```properties path="/android/gradle/wrapper/gradle-wrapper.properties" #Thu Jul 11 14:22:50 CST 2024 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists ``` ## /android/settings.gradle ```gradle path="/android/settings.gradle" pluginManagement { def flutterSdkPath = { def properties = new Properties() file("local.properties").withInputStream { properties.load(it) } def flutterSdkPath = properties.getProperty("flutter.sdk") assert flutterSdkPath != null, "flutter.sdk not set in local.properties" return flutterSdkPath } settings.ext.flutterSdkPath = flutterSdkPath() includeBuild("${settings.ext.flutterSdkPath}/packages/flutter_tools/gradle") repositories { google() mavenCentral() gradlePluginPortal() } } plugins { id "dev.flutter.flutter-plugin-loader" version "1.0.0" id "com.android.application" version "8.1.0" apply false id "org.jetbrains.kotlin.android" version "2.0.0" apply false } include ":app" ``` ## /assets/.gitignore ```gitignore path="/assets/.gitignore" local/ ``` ## /assets/foliate-js/.gitattributes ```gitattributes path="/assets/foliate-js/.gitattributes" vendor/* linguist-vendored=true ``` ## /assets/foliate-js/.gitignore ```gitignore path="/assets/foliate-js/.gitignore" node_modules/ ``` ## /assets/foliate-js/README.md This folder forked from [Foliate-js](https://github.com/johnfactotum/foliate-js) which is MIT licensed. ## /assets/foliate-js/book.js ```js path="/assets/foliate-js/book.js" // //////// use for test ////////// // const importing = false // const allAnnotations = [ // { id: 1, type: 'highlight', value: "epubcfi(/6/12!/4/2[pgepubid00006]/4,/1:1,/1:7)", color: 'blue', note: 'this is' }, // // { id: 2, type: 'highlight', value: "epubcfi(/6/6!/4/576,/1:2,/1:3)", color: 'yellow', note: 'this is' }, // // { id: 3, type: 'underline', value: "epubcfi(/6/4!/4/4,/1:294,/1:301)", color: 'red', note: 'this is' }, // ] // let url = '../local/shj.epub' // let initialCfi = "epubcfi(/6/12!/4,/2[CHP3],/8/1:29)" // // let initialCfi = null // let style = { // fontSize: 1.2, // fontName: 'customFont0', // fontPath: 'http://localhost:40443/fonts/Fast_Sans.ttf', // fontWeight: 400, // letterSpacing: 0, // spacing: 1.7, // paragraphSpacing: 1, // textIndent: 5, // fontColor: '#0000ff', // backgroundColor: '#ffffff', // topMargin: 100, // bottomMargin: 100, // sideMargin: 5, // justify: true, // hyphenate: true, // // scroll: false, // // animated: true, // pageTurnStyle: 'scroll', // maxColumnCount: 2, // } // window.flutter_inappwebview = {} // window.flutter_inappwebview.callHandler = (name, data) => { // console.log(name, data) // } // setTimeout(() => { // reader.renderAnnotation() // }, 100) // let readingRules = { // // 'none', 's2t', 't2s' // convertChineseMode: 's2t', // bionicReadingMode: true, // } // /////////////////////////////// console.log('book.js') import './view.js' import { FootnoteHandler } from './footnotes.js' import { Overlayer } from './overlayer.js' const { configure, ZipReader, BlobReader, TextWriter, BlobWriter } = await import('./vendor/zip.js') const { EPUB } = await import('./epub.js') var isPdf = false; const getPosition = (target) => { const pointIsInView = (point) => { const { x, y } = point; return x >= 0 && y >= 0 && x <= window.innerWidth && y <= window.innerHeight; }; const frameRect = (framePos, elementRect, scaleX = 1, scaleY = 1) => { return { left: scaleX * elementRect.left + framePos.left, right: scaleX * elementRect.right + framePos.left, top: scaleY * elementRect.top + framePos.top, bottom: scaleY * elementRect.bottom + framePos.top }; }; const rootNode = target.getRootNode?.() ?? target?.endContainer?.getRootNode?.(); const frameElement = rootNode?.defaultView?.frameElement; let scaleX = 1, scaleY = 1; if (frameElement) { const transform = getComputedStyle(frameElement).transform; const matches = transform.match(/matrix\((.+)\)/); if (matches) { [scaleX, , , scaleY] = matches[1].split(/\s*,\s*/).map(Number); } } const frame = frameElement?.getBoundingClientRect() ?? { top: 0, left: 0 }; const rects = Array.from(target.getClientRects()); const firstRect = frameRect(frame, rects[0], scaleX, scaleY); const lastRect = frameRect(frame, rects[rects.length - 1], scaleX, scaleY); const screenWidth = window.innerWidth; const screenHeight = window.innerHeight; const startPoint = { point: { x: ((firstRect.left + firstRect.right) / 2) / screenWidth, y: firstRect.top / screenHeight }, dir: 'up' }; const endPoint = { point: { x: ((lastRect.left + lastRect.right) / 2) / screenWidth, y: lastRect.bottom / screenHeight }, dir: 'down' }; const isStartInView = pointIsInView(startPoint.point); const isEndInView = pointIsInView(endPoint.point); if (!isStartInView && !isEndInView) { return { point: { x: 0, y: 0 } }; } if (!isStartInView) return endPoint; if (!isEndInView) return startPoint; return (startPoint.point.y * screenHeight > screenHeight - endPoint.point.y * screenHeight) ? startPoint : endPoint; }; const getSelectionRange = (selection) => { if (!selection?.rangeCount) return null; const range = selection.getRangeAt(0); return range.collapsed ? null : range; }; const handleSelection = (view, doc, index) => { const selection = doc.getSelection(); const range = getSelectionRange(selection); if (!range) return; const position = getPosition(range); const cfi = view.getCFI(index, range); const lang = 'en-US' let text = selection.toString(); if (!text) { const newSelection = range.startContainer.ownerDocument.getSelection(); newSelection.removeAllRanges(); newSelection.addRange(range); text = newSelection.toString(); } onSelectionEnd({ index, range, lang, cfi, pos: position, text }); }; const setSelectionHandler = (view, doc, index) => { // doc.addEventListener('pointerdown', () => isSelecting = true); // if windows or macos or iOS if (navigator.platform.includes('Win') || navigator.platform.includes('Mac') || navigator.platform.includes('iPhone') || navigator.platform.includes('iPad') ) { doc.addEventListener('pointerup', () => handleSelection(view, doc, index)); } else { doc.addEventListener('contextmenu', () => handleSelection(view, doc, index)); } // doc.addEventListener('selectionchange', () => handleSelection(view, doc, index)); if (!view.isFixedLayout) { // go to the next page when selecting to the end of a page // this makes it possible to select across pages doc.addEventListener('selectstart', () => { const container = view.shadowRoot.querySelector('foliate-paginator').shadowRoot.querySelector("#container"); if (!container) return; globalThis.originalScrollLeft = container.scrollLeft; }); doc.addEventListener('selectionchange', () => { if (view.renderer.getAttribute('flow') !== 'paginated') return const { lastLocation } = view if (!lastLocation) return const selRange = getSelectionRange(doc.getSelection()) if (!selRange) return if (globalThis.pageDebounceTimer) { clearTimeout(globalThis.pageDebounceTimer); globalThis.pageDebounceTimer = null; } const container = view.shadowRoot.querySelector('foliate-paginator').shadowRoot.querySelector("#container"); if (selRange.compareBoundaryPoints(Range.END_TO_END, lastLocation.range) >= 0) { globalThis.pageDebounceTimer = setTimeout(async () => { await view.next(); globalThis.originalScrollLeft = container.scrollLeft; globalThis.pageDebounceTimer = null; }, 1000); return } const preventScroll = () => { const selRange = getSelectionRange(doc.getSelection()); if (!selRange || !view.lastLocation || !view.lastLocation.range) return; if (view.lastLocation.range.startContainer === selRange.endContainer) { container.scrollLeft = globalThis.originalScrollLeft; } }; container.addEventListener('scroll', preventScroll); doc.addEventListener('pointerup', () => { container.removeEventListener('scroll', preventScroll); }, { once: true }); }) } } const isZip = async file => { const arr = new Uint8Array(await file.slice(0, 4).arrayBuffer()) return arr[0] === 0x50 && arr[1] === 0x4b && arr[2] === 0x03 && arr[3] === 0x04 } const isPDF = async file => { const arr = new Uint8Array(await file.slice(0, 5).arrayBuffer()) return arr[0] === 0x25 && arr[1] === 0x50 && arr[2] === 0x44 && arr[3] === 0x46 && arr[4] === 0x2d } const makeZipLoader = async file => { configure({ useWebWorkers: false }) const reader = new ZipReader(new BlobReader(file)) const entries = await reader.getEntries() const map = new Map(entries.map(entry => [entry.filename, entry])) const load = f => (name, ...args) => map.has(name) ? f(map.get(name), ...args) : null const loadText = load(entry => entry.getData(new TextWriter())) const loadBlob = load((entry, type) => entry.getData(new BlobWriter(type))) const getSize = name => map.get(name)?.uncompressedSize ?? 0 return { entries, loadText, loadBlob, getSize } } const getFileEntries = async entry => entry.isFile ? entry : (await Promise.all(Array.from( await new Promise((resolve, reject) => entry.createReader() .readEntries(entries => resolve(entries), error => reject(error))), getFileEntries))).flat() const makeDirectoryLoader = async entry => { const entries = await getFileEntries(entry) const files = await Promise.all( entries.map(entry => new Promise((resolve, reject) => entry.file(file => resolve([file, entry.fullPath]), error => reject(error))))) const map = new Map(files.map(([file, path]) => [path.replace(entry.fullPath + '/', ''), file])) const decoder = new TextDecoder() const decode = x => x ? decoder.decode(x) : null const getBuffer = name => map.get(name)?.arrayBuffer() ?? null const loadText = async name => decode(await getBuffer(name)) const loadBlob = name => map.get(name) const getSize = name => map.get(name)?.size ?? 0 return { loadText, loadBlob, getSize } } const isCBZ = ({ name, type }) => type === 'application/vnd.comicbook+zip' || name.endsWith('.cbz') const isFB2 = ({ name, type }) => type === 'application/x-fictionbook+xml' || name.endsWith('.fb2') const isFBZ = ({ name, type }) => type === 'application/x-zip-compressed-fb2' || name.endsWith('.fb2.zip') || name.endsWith('.fbz') const getView = async file => { let book if (file.isDirectory) { const loader = await makeDirectoryLoader(file) const { EPUB } = await import('./epub.js') book = await new EPUB(loader).init() } else if (!file.size) throw new Error('File not found') else if (await isZip(file)) { const loader = await makeZipLoader(file) if (isCBZ(file)) { const { makeComicBook } = await import('./comic-book.js') book = makeComicBook(loader, file) } else if (isFBZ(file)) { const { makeFB2 } = await import('./fb2.js') const { entries } = loader const entry = entries.find(entry => entry.filename.endsWith('.fb2')) const blob = await loader.loadBlob((entry ?? entries[0]).filename) book = await makeFB2(blob) } else { book = await new EPUB(loader).init() } } else if (await isPDF(file)) { isPdf = true; const { makePDF } = await import('./pdf.js') book = await makePDF(file) } else { const { isMOBI, MOBI } = await import('./mobi.js') if (await isMOBI(file)) { const fflate = await import('./vendor/fflate.js') book = await new MOBI({ unzlib: fflate.unzlibSync }).open(file) } else if (isFB2(file)) { const { makeFB2 } = await import('./fb2.js') book = await makeFB2(file) } } if (!book) throw new Error('File type not supported') const view = document.createElement('foliate-view') document.body.append(view) await view.open(book) return view } const getCSS = ({ fontSize, fontName, fontPath, fontWeight, letterSpacing, spacing, textIndent, paragraphSpacing, fontColor, backgroundColor, justify, hyphenate }) => { const fontFamily = fontName === 'book' ? '' : fontName === 'system' ? 'font-family: system-ui !important;' : `font-family: ${fontName} !important;` return ` @namespace epub "http://www.idpf.org/2007/ops"; @font-face { font-family: ${fontName}; src: url('${fontPath}'); font-display: swap; } html { color: ${fontColor} !important; background: none !important; // background-color: ${backgroundColor} !important; background-color: transparent !important; letter-spacing: ${letterSpacing}px; font-size: ${fontSize}em; } body { background: none !important; background-color: transparent; } img { max-width: 100% !important; object-fit: contain !important; break-inside: avoid !important; box-sizing: border-box !important; } a:link { color:rgb(167, 96, 52) !important; } * { line-height: ${spacing}em !important; ${fontFamily} } p, li, blockquote, dd, div, font { color: ${fontColor} !important; // line-height: ${spacing} !important; font-weight: ${fontWeight} !important; padding-bottom: ${paragraphSpacing}em !important; text-align: ${justify ? 'justify' : 'start'}; -webkit-hyphens: ${hyphenate ? 'auto' : 'manual'}; hyphens: ${hyphenate ? 'auto' : 'manual'}; -webkit-hyphenate-limit-before: 3; -webkit-hyphenate-limit-after: 2; -webkit-hyphenate-limit-lines: 2; hanging-punctuation: allow-end last; widows: 2; } p, li, blockquote, dd, font { text-indent: ${textIndent}em !important; } p img { margin-left: -${textIndent}em; } /* prevent the above from overriding the align attribute */ [align="left"] { text-align: left; } [align="right"] { text-align: right; } [align="center"] { text-align: center; } [align="justify"] { text-align: justify; } pre { white-space: pre-wrap !important; } aside[epub|type~="endnote"], aside[epub|type~="footnote"], aside[epub|type~="note"], aside[epub|type~="rearnote"] { display: none; } `} const convertChineseHandler = (mode, doc) => { console.log('convertChinese', mode) const zh_s = '皑蔼碍爱翱袄奥坝罢摆败颁办绊帮绑镑谤剥饱宝报鲍辈贝钡狈备惫绷笔毕毙闭边编贬变辩辫鳖瘪濒滨宾摈饼拨钵铂驳卜补参蚕残惭惨灿苍舱仓沧厕侧册测层诧搀掺蝉馋谗缠铲产阐颤场尝长偿肠厂畅钞车彻尘陈衬撑称惩诚骋痴迟驰耻齿炽冲虫宠畴踌筹绸丑橱厨锄雏础储触处传疮闯创锤纯绰辞词赐聪葱囱从丛凑窜错达带贷担单郸掸胆惮诞弹当挡党荡档捣岛祷导盗灯邓敌涤递缔点垫电淀钓调迭谍叠钉顶锭订东动栋冻斗犊独读赌镀锻断缎兑队对吨顿钝夺鹅额讹恶饿儿尔饵贰发罚阀珐矾钒烦范贩饭访纺飞废费纷坟奋愤粪丰枫锋风疯冯缝讽凤肤辐抚辅赋复负讣妇缚该钙盖干赶秆赣冈刚钢纲岗皋镐搁鸽阁铬个给龚宫巩贡钩沟构购够蛊顾剐关观馆惯贯广规硅归龟闺轨诡柜贵刽辊滚锅国过骇韩汉阂鹤贺横轰鸿红后壶护沪户哗华画划话怀坏欢环还缓换唤痪焕涣黄谎挥辉毁贿秽会烩汇讳诲绘荤浑伙获货祸击机积饥讥鸡绩缉极辑级挤几蓟剂济计记际继纪夹荚颊贾钾价驾歼监坚笺间艰缄茧检碱硷拣捡简俭减荐槛鉴践贱见键舰剑饯渐溅涧浆蒋桨奖讲酱胶浇骄娇搅铰矫侥脚饺缴绞轿较秸阶节茎惊经颈静镜径痉竞净纠厩旧驹举据锯惧剧鹃绢杰洁结诫届紧锦仅谨进晋烬尽劲荆觉决诀绝钧军骏开凯颗壳课垦恳抠库裤夸块侩宽矿旷况亏岿窥馈溃扩阔蜡腊莱来赖蓝栏拦篮阑兰澜谰揽览懒缆烂滥捞劳涝乐镭垒类泪篱离里鲤礼丽厉励砾历沥隶俩联莲连镰怜涟帘敛脸链恋炼练粮凉两辆谅疗辽镣猎临邻鳞凛赁龄铃凌灵岭领馏刘龙聋咙笼垄拢陇楼娄搂篓芦卢颅庐炉掳卤虏鲁赂禄录陆驴吕铝侣屡缕虑滤绿峦挛孪滦乱抡轮伦仑沦纶论萝罗逻锣箩骡骆络妈玛码蚂马骂吗买麦卖迈脉瞒馒蛮满谩猫锚铆贸么霉没镁门闷们锰梦谜弥觅绵缅庙灭悯闽鸣铭谬谋亩钠纳难挠脑恼闹馁腻撵捻酿鸟聂啮镊镍柠狞宁拧泞钮纽脓浓农疟诺欧鸥殴呕沤盘庞国爱赔喷鹏骗飘频贫苹凭评泼颇扑铺朴谱脐齐骑岂启气弃讫牵扦钎铅迁签谦钱钳潜浅谴堑枪呛墙蔷强抢锹桥乔侨翘窍窃钦亲轻氢倾顷请庆琼穷趋区躯驱龋颧权劝却鹊让饶扰绕热韧认纫荣绒软锐闰润洒萨鳃赛伞丧骚扫涩杀纱筛晒闪陕赡缮伤赏烧绍赊摄慑设绅审婶肾渗声绳胜圣师狮湿诗尸时蚀实识驶势释饰视试寿兽枢输书赎属术树竖数帅双谁税顺说硕烁丝饲耸怂颂讼诵擞苏诉肃虽绥岁孙损笋缩琐锁獭挞抬摊贪瘫滩坛谭谈叹汤烫涛绦腾誊锑题体屉条贴铁厅听烃铜统头图涂团颓蜕脱鸵驮驼椭洼袜弯湾顽万网韦违围为潍维苇伟伪纬谓卫温闻纹稳问瓮挝蜗涡窝呜钨乌诬无芜吴坞雾务误锡牺袭习铣戏细虾辖峡侠狭厦锨鲜纤咸贤衔闲显险现献县馅羡宪线厢镶乡详响项萧销晓啸蝎协挟携胁谐写泻谢锌衅兴汹锈绣虚嘘须许绪续轩悬选癣绚学勋询寻驯训讯逊压鸦鸭哑亚讶阉烟盐严颜阎艳厌砚彦谚验鸯杨扬疡阳痒养样瑶摇尧遥窑谣药爷页业叶医铱颐遗仪彝蚁艺亿忆义诣议谊译异绎荫阴银饮樱婴鹰应缨莹萤营荧蝇颖哟拥佣痈踊咏涌优忧邮铀犹游诱舆鱼渔娱与屿语吁御狱誉预驭鸳渊辕园员圆缘远愿约跃钥岳粤悦阅云郧匀陨运蕴酝晕韵杂灾载攒暂赞赃脏凿枣灶责择则泽贼赠扎札轧铡闸诈斋债毡盏斩辗崭栈战绽张涨帐账胀赵蛰辙锗这贞针侦诊镇阵挣睁狰帧郑证织职执纸挚掷帜质钟终种肿众诌轴皱昼骤猪诸诛烛瞩嘱贮铸筑驻专砖转赚桩庄装妆壮状锥赘坠缀谆浊兹资渍踪综总纵邹诅组钻致钟么为只凶准启板里雳余链泄'; const zh_t = '皚藹礙愛翺襖奧壩罷擺敗頒辦絆幫綁鎊謗剝飽寶報鮑輩貝鋇狽備憊繃筆畢斃閉邊編貶變辯辮鼈癟瀕濱賓擯餅撥缽鉑駁蔔補參蠶殘慚慘燦蒼艙倉滄廁側冊測層詫攙摻蟬饞讒纏鏟産闡顫場嘗長償腸廠暢鈔車徹塵陳襯撐稱懲誠騁癡遲馳恥齒熾沖蟲寵疇躊籌綢醜櫥廚鋤雛礎儲觸處傳瘡闖創錘純綽辭詞賜聰蔥囪從叢湊竄錯達帶貸擔單鄲撣膽憚誕彈當擋黨蕩檔搗島禱導盜燈鄧敵滌遞締點墊電澱釣調叠諜疊釘頂錠訂東動棟凍鬥犢獨讀賭鍍鍛斷緞兌隊對噸頓鈍奪鵝額訛惡餓兒爾餌貳發罰閥琺礬釩煩範販飯訪紡飛廢費紛墳奮憤糞豐楓鋒風瘋馮縫諷鳳膚輻撫輔賦複負訃婦縛該鈣蓋幹趕稈贛岡剛鋼綱崗臯鎬擱鴿閣鉻個給龔宮鞏貢鈎溝構購夠蠱顧剮關觀館慣貫廣規矽歸龜閨軌詭櫃貴劊輥滾鍋國過駭韓漢閡鶴賀橫轟鴻紅後壺護滬戶嘩華畫劃話懷壞歡環還緩換喚瘓煥渙黃謊揮輝毀賄穢會燴彙諱誨繪葷渾夥獲貨禍擊機積饑譏雞績緝極輯級擠幾薊劑濟計記際繼紀夾莢頰賈鉀價駕殲監堅箋間艱緘繭檢堿鹼揀撿簡儉減薦檻鑒踐賤見鍵艦劍餞漸濺澗漿蔣槳獎講醬膠澆驕嬌攪鉸矯僥腳餃繳絞轎較稭階節莖驚經頸靜鏡徑痙競淨糾廄舊駒舉據鋸懼劇鵑絹傑潔結誡屆緊錦僅謹進晉燼盡勁荊覺決訣絕鈞軍駿開凱顆殼課墾懇摳庫褲誇塊儈寬礦曠況虧巋窺饋潰擴闊蠟臘萊來賴藍欄攔籃闌蘭瀾讕攬覽懶纜爛濫撈勞澇樂鐳壘類淚籬離裏鯉禮麗厲勵礫曆瀝隸倆聯蓮連鐮憐漣簾斂臉鏈戀煉練糧涼兩輛諒療遼鐐獵臨鄰鱗凜賃齡鈴淩靈嶺領餾劉龍聾嚨籠壟攏隴樓婁摟簍蘆盧顱廬爐擄鹵虜魯賂祿錄陸驢呂鋁侶屢縷慮濾綠巒攣孿灤亂掄輪倫侖淪綸論蘿羅邏鑼籮騾駱絡媽瑪碼螞馬罵嗎買麥賣邁脈瞞饅蠻滿謾貓錨鉚貿麽黴沒鎂門悶們錳夢謎彌覓綿緬廟滅憫閩鳴銘謬謀畝鈉納難撓腦惱鬧餒膩攆撚釀鳥聶齧鑷鎳檸獰甯擰濘鈕紐膿濃農瘧諾歐鷗毆嘔漚盤龐國愛賠噴鵬騙飄頻貧蘋憑評潑頗撲鋪樸譜臍齊騎豈啓氣棄訖牽扡釺鉛遷簽謙錢鉗潛淺譴塹槍嗆牆薔強搶鍬橋喬僑翹竅竊欽親輕氫傾頃請慶瓊窮趨區軀驅齲顴權勸卻鵲讓饒擾繞熱韌認紉榮絨軟銳閏潤灑薩鰓賽傘喪騷掃澀殺紗篩曬閃陝贍繕傷賞燒紹賒攝懾設紳審嬸腎滲聲繩勝聖師獅濕詩屍時蝕實識駛勢釋飾視試壽獸樞輸書贖屬術樹豎數帥雙誰稅順說碩爍絲飼聳慫頌訟誦擻蘇訴肅雖綏歲孫損筍縮瑣鎖獺撻擡攤貪癱灘壇譚談歎湯燙濤縧騰謄銻題體屜條貼鐵廳聽烴銅統頭圖塗團頹蛻脫鴕馱駝橢窪襪彎灣頑萬網韋違圍爲濰維葦偉僞緯謂衛溫聞紋穩問甕撾蝸渦窩嗚鎢烏誣無蕪吳塢霧務誤錫犧襲習銑戲細蝦轄峽俠狹廈鍁鮮纖鹹賢銜閑顯險現獻縣餡羨憲線廂鑲鄉詳響項蕭銷曉嘯蠍協挾攜脅諧寫瀉謝鋅釁興洶鏽繡虛噓須許緒續軒懸選癬絢學勳詢尋馴訓訊遜壓鴉鴨啞亞訝閹煙鹽嚴顔閻豔厭硯彥諺驗鴦楊揚瘍陽癢養樣瑤搖堯遙窯謠藥爺頁業葉醫銥頤遺儀彜蟻藝億憶義詣議誼譯異繹蔭陰銀飲櫻嬰鷹應纓瑩螢營熒蠅穎喲擁傭癰踴詠湧優憂郵鈾猶遊誘輿魚漁娛與嶼語籲禦獄譽預馭鴛淵轅園員圓緣遠願約躍鑰嶽粵悅閱雲鄖勻隕運蘊醞暈韻雜災載攢暫贊贓髒鑿棗竈責擇則澤賊贈紮劄軋鍘閘詐齋債氈盞斬輾嶄棧戰綻張漲帳賬脹趙蟄轍鍺這貞針偵診鎮陣掙睜猙幀鄭證織職執紙摯擲幟質鍾終種腫衆謅軸皺晝驟豬諸誅燭矚囑貯鑄築駐專磚轉賺樁莊裝妝壯狀錐贅墜綴諄濁茲資漬蹤綜總縱鄒詛組鑽緻鐘麼為隻兇準啟闆裡靂餘鍊洩'; const from = mode === 's2t' ? zh_s : zh_t const to = mode === 's2t' ? zh_t : zh_s const convertTextNode = (node, from, to) => { if (node.nodeType === Node.TEXT_NODE) { node.textContent = node.textContent.replace(/[\u4e00-\u9fa5]/g, (match) => { return to[from.indexOf(match)] ?? match }); } else { node.childNodes.forEach(child => convertTextNode(child, from, to)); } }; doc.body.childNodes.forEach(node => { convertTextNode(node, from, to); }); } const bionicReadingHandler = (doc) => { return; }; const readingFeaturesDocHandler = (doc) => { if (readingRules.convertChineseMode !== 'none') { convertChineseHandler(readingRules.convertChineseMode, doc) } if (readingRules.bionicReadingMode) { bionicReadingHandler(doc) } } const footnoteDialog = document.getElementById('footnote-dialog') footnoteDialog.style.display = 'none' footnoteDialog.addEventListener('click', () => { // display none footnoteDialog.style.display = 'none' callFlutter("onFootnoteClose") }) const replaceFootnote = (view) => { clearSelection() footnoteDialog.querySelector('main').replaceChildren(view) view.addEventListener('load', (e) => { const { doc, index } = e.detail globalThis.footnoteSelection = () => handleSelection(view, doc, index) setSelectionHandler(view, doc, index) // convertChineseHandler(convertChineseMode, doc) readingFeaturesDocHandler(doc) setTimeout(() => { const dialog = document.getElementById('footnote-dialog') const content = document.querySelector("#footnote-dialog > main > foliate-view") .shadowRoot.querySelector("foliate-paginator") .shadowRoot.querySelector("#container > div > iframe") dialog.style.display = 'block' // dialog.style.width = 'auto' // dialog.style.height = 'auto' // const contentWidth = content.clientWidth // const contentHeight = content.clientHeight // const squareSize = contentWidth * contentHeight // dialog.style.height = 100 + 'px' // dialog.style.width = squareSize / 100 + 'px' // if (squareSize > window.innerWidth * 100 * 0.8) { // dialog.style.width = window.innerWidth * 0.8 + 'px' // dialog.style.height = squareSize / (window.innerWidth * 3.0) + 'px' // } //dialog.style.width = `${Math.min(Math.max(contentWidth, 200), window.innerWidth * 0.8)}px` //dialog.style.height = `${Math.min(Math.max(contentHeight, 100), window.innerHeight * 0.8)}px` }, 0) }) const { renderer } = view renderer.setAttribute('flow', 'scrolled') renderer.setAttribute('gap', '5%') renderer.setAttribute('top-margin', '0px') renderer.setAttribute('bottom-margin', '0px') const footNoteStyle = { fontSize: style.fontSize, fontName: style.fontName, fontPath: style.fontPath, letterSpacing: style.letterSpacing, spacing: style.spacing, textIndent: style.textIndent, fontColor: style.fontColor, backgroundColor: 'transparent', justify: true, hyphenate: true, } renderer.setStyles(getCSS(footNoteStyle)) // set background color of dialog // if #rrggbbaa, replace aa to ee footnoteDialog.style.backgroundColor = style.backgroundColor.slice(0, 7) + '33' } footnoteDialog.addEventListener('click', e => e.target === footnoteDialog ? footnoteDialog.close() : null) class Reader { annotations = new Map() annotationsByValue = new Map() #footnoteHandler = new FootnoteHandler() #doc #index #originalContent constructor() { this.#footnoteHandler.addEventListener('before-render', e => { const { view } = e.detail this.setView(view) replaceFootnote(view) }) this.#footnoteHandler.addEventListener('render', e => { const { view } = e.detail footnoteDialog.showModal() }) this.#originalContent = null } async open(file, cfi) { this.view = await getView(file, cfi) if (importing) return this.view.addEventListener('load', this.#onLoad.bind(this)) this.view.addEventListener('relocate', this.#onRelocate.bind(this)) this.view.addEventListener('click-view', this.#onClickView.bind(this)) setStyle() if (!cfi) this.view.renderer.next() this.setView(this.view) await this.view.init({ lastLocation: cfi }) } setView(view) { view.addEventListener('create-overlay', e => { const { index } = e.detail const list = this.annotations.get(index) if (list) for (const annotation of list) this.view.addAnnotation(annotation) }) view.addEventListener('draw-annotation', e => { const { draw, annotation } = e.detail const { color, type } = annotation if (type === 'highlight') draw(Overlayer.highlight, { color }) else if (type === 'underline') draw(Overlayer.underline, { color }) }) view.addEventListener('show-annotation', e => { const annotation = this.annotationsByValue.get(e.detail.value) const pos = getPosition(e.detail.range) onAnnotationClick({ annotation, pos }) }) view.addEventListener('external-link', e => { e.preventDefault() onExternalLink(e.detail) }) view.addEventListener('link', e => this.#footnoteHandler.handle(this.view.book, e)?.catch(err => { console.warn(err) this.view.goTo(e.detail.href) })) view.history.addEventListener('pushstate', e => { callFlutter('onPushState', { canGoBack: view.history.canGoBack, canGoForward: view.history.canGoForward }) }) view.addEventListener('click-image', async e => { console.log('click-image', e.detail.img.src) const blobUrl = e.detail.img.src const blob = await fetch(blobUrl).then(r => r.blob()) const base64 = await new Promise((resolve, reject) => { const reader = new FileReader() reader.onloadend = () => resolve(reader.result) reader.onerror = reject reader.readAsDataURL(blob) }) callFlutter('onImageClick', base64) }) } renderAnnotation() { const bookmarks = allAnnotations ?? [] for (const bookmark of bookmarks) { const { value, type, color, note } = bookmark const annotation = { id: bookmark.id, value, type, color, note } this.addAnnotation(annotation) } } showContextMenu() { return handleSelection(this.view, this.#doc, this.#index) } addAnnotation(annotation) { const { value } = annotation const spineCode = (value.split('/')[2].split('!')[0] - 2) / 2 const list = this.annotations.get(spineCode) if (list) list.push(annotation) else this.annotations.set(spineCode, [annotation]) this.annotationsByValue.set(value, annotation) this.view.addAnnotation(annotation) } removeAnnotation(cfi) { const annotation = this.annotationsByValue.get(cfi) const { value } = annotation const spineCode = (value.split('/')[2].split('!')[0] - 2) / 2 const list = this.annotations.get(spineCode) if (list) { const index = list.findIndex(a => a.id === annotation.id) if (index !== -1) list.splice(index, 1) } this.annotationsByValue.delete(value) this.view.addAnnotation(annotation, true) } #onLoad({ detail: { doc, index } }) { this.#doc = doc this.#index = index setSelectionHandler(this.view, doc, index) // if (!this.#originalContent) { // console.log('Saving original content', doc); // this.#originalContent = doc.cloneNode(true) // console.log('Original content saved', this.#originalContent); // } this.#saveOriginalContent() this.readingFeatures(readingRules) } #onRelocate({ detail }) { const { cfi, fraction, location, tocItem, pageItem, chapterLocation } = detail const loc = pageItem ? `Page ${pageItem.label}` : `Loc ${location.current}` onRelocated({ cfi, fraction, loc, tocItem, pageItem, location, chapterLocation }) } #onClickView({ detail: { x, y } }) { const coordinatesX = x / window.innerWidth const coordinatesY = y / window.innerHeight onClickView(coordinatesX, coordinatesY) } get index() { return this.#index } #saveOriginalContent = () => { // this.#originalContent = this.#doc.cloneNode(true) // save original content this.#originalContent = []; const walker = document.createTreeWalker( this.#doc.body, NodeFilter.SHOW_TEXT, null, false ); while (walker.nextNode()) { this.#originalContent.push(walker.currentNode.textContent); } } #restoreOriginalContent = () => { // this.#doc.body.innerHTML = this.#originalContent.body.innerHTML const walker = document.createTreeWalker( this.#doc.body, NodeFilter.SHOW_TEXT, null, false ); let node; let index = 0; while (node = walker.nextNode()) { node.textContent = this.#originalContent[index++]; } } readingFeatures = () => { this.#restoreOriginalContent() readingFeaturesDocHandler(this.#doc) } getChapterContent = () => { return this.#doc.body.textContent } getPreviousContent = (count = 2000) => { let currentContainer = this.view.lastLocation?.range?.endContainer?.parentElement; if (!currentContainer) return ''; let text = ''; while (text.length < count && currentContainer) { text = currentContainer.textContent + text; currentContainer = currentContainer.previousSibling; } return text; } getSelection = () => { const selection = this.#doc.getSelection(); const range = getSelectionRange(selection); return range; } } const open = async (file, cfi) => { const reader = new Reader() globalThis.reader = reader await reader.open(file, cfi) if (!importing) { callFlutter('onLoadEnd') onSetToc() callFlutter('renderAnnotations') } else { getMetadata() } } const callFlutter = (name, data) => { // console.log('callFlutter', name, data) window.flutter_inappwebview.callHandler(name, data) } const setStyle = () => { const turn = { scroll: false, animated: true } switch (style.pageTurnStyle) { case 'slide': turn.scroll = false turn.animated = true break case 'scroll': turn.scroll = true turn.animated = true break case "noAnimation": turn.scroll = false turn.animated = false break } reader.view.renderer.setAttribute('flow', turn.scroll ? 'scrolled' : 'paginated') reader.view.renderer.setAttribute('top-margin', `${style.topMargin}px`) reader.view.renderer.setAttribute('bottom-margin', `${style.bottomMargin}px`) reader.view.renderer.setAttribute('gap', `${style.sideMargin}%`) reader.view.renderer.setAttribute('background-color', style.backgroundColor) reader.view.renderer.setAttribute('max-column-count', style.maxColumnCount) turn.animated ? reader.view.renderer.setAttribute('animated', 'true') : reader.view.renderer.removeAttribute('animated') const newStyle = { fontSize: style.fontSize, fontName: style.fontName, fontPath: style.fontPath, fontWeight: style.fontWeight, letterSpacing: style.letterSpacing, spacing: style.spacing, paragraphSpacing: style.paragraphSpacing, textIndent: style.textIndent, fontColor: style.fontColor, backgroundColor: style.backgroundColor, justify: style.justify, hyphenate: style.hyphenate } reader.view.renderer.setStyles?.(getCSS(newStyle)) } const onRelocated = (currentInfo) => { const chapterTitle = currentInfo.tocItem?.label const chapterHref = currentInfo.tocItem?.href const chapterTotalPages = currentInfo.chapterLocation.total const chapterCurrentPage = currentInfo.chapterLocation.current const bookTotalPages = currentInfo.location.total const bookCurrentPage = currentInfo.location.current const cfi = currentInfo.cfi const percentage = currentInfo.fraction callFlutter('onRelocated', { chapterTitle, chapterHref, chapterTotalPages, chapterCurrentPage, bookTotalPages, bookCurrentPage, cfi, percentage }) } const onAnnotationClick = (annotation) => callFlutter('onAnnotationClick', annotation) const onClickView = (x, y) => callFlutter('onClick', { x, y }) const onExternalLink = (link) => console.log(link) const onSetToc = () => callFlutter('onSetToc', reader.view.book.toc) const getMetadata = async () => { const cover = await reader.view.book.getCover() if (cover) { // cover is a blob, so we need to convert it to base64 const fileReader = new FileReader() fileReader.readAsDataURL(cover) fileReader.onloadend = () => { callFlutter('onMetadata', { ...reader.view.book.metadata, cover: fileReader.result }) } } else { callFlutter('onMetadata', { ...reader.view.book.metadata, cover: null }) } } window.changeStyle = (newStyle) => { style = { ...style, ...newStyle } console.log('changeStyle', JSON.stringify(style)) setStyle() } window.goToHref = href => reader.view.goTo(href) window.goToCfi = cfi => reader.view.goTo(cfi) window.goToPercent = percent => reader.view.goToFraction(percent) window.nextPage = () => reader.view.next() window.prevPage = () => reader.view.prev() window.setScroll = () => { style.scroll = true style.animated = true setStyle() } window.setPaginated = () => { style.scroll = false style.animated = true setStyle() } window.setNoAnimation = () => { style.scroll = false style.animated = false setStyle() } const onSelectionEnd = (selection) => { if (footnoteDialog.open || isPdf) { callFlutter('onSelectionEnd', { ...selection, footnote: true }) } else { callFlutter('onSelectionEnd', { ...selection, footnote: false }) } } window.showContextMenu = () => { if (footnoteDialog.open) { footnoteSelection() } else { reader.showContextMenu() } } window.getSelection = () => reader.getSelection() window.clearSelection = () => reader.view.deselect() window.addAnnotation = (annotation) => reader.addAnnotation(annotation) window.removeAnnotation = (cfi) => reader.removeAnnotation(cfi) window.prevSection = () => reader.view.renderer.prevSection() window.nextSection = () => reader.view.renderer.nextSection() window.initTts = () => reader.view.initTTS() window.ttsStop = () => reader.view.initTTS(true) window.ttsHere = () => { initTts() return reader.view.tts.from(reader.view.lastLocation.range) } window.ttsNextSection = async () => { await nextSection() initTts() return ttsNext() } window.ttsPrevSection = async (last) => { await prevSection() initTts() return last ? reader.view.tts.end() : ttsNext() } window.ttsNext = async () => { const result = reader.view.tts.next(true) if (result) return result return await ttsNextSection() } window.ttsPrev = () => { const result = reader.view.tts.prev(true) if (result) return result return ttsPrevSection(true) } window.ttsPrepare = () => reader.view.tts.prepare() window.clearSearch = () => reader.view.clearSearch() window.search = async (text, opts) => { opts == null && (opts = { 'scope': 'book', 'matchCase': false, 'matchDiacritics': false, 'matchWholeWords': false, }) const query = text.trim() if (!query) return const index = opts.scope === 'section' ? reader.index : null for await (const result of reader.view.search({ ...opts, query, index })) { if (result === 'done') { callFlutter('onSearch', { process: 1.0 }) } else if ('progress' in result) callFlutter('onSearch', { process: result.progress }) else { callFlutter('onSearch', result) } } } window.back = () => reader.view.history.back() window.forward = () => reader.view.history.forward() window.renderAnnotations = () => reader.renderAnnotation() window.theChapterContent = () => reader.getChapterContent() window.previousContent = (count = 2000) => reader.getPreviousContent(count) // window.convertChinese = (mode) => reader.convertChinese(mode) // window.bionicReading = (enable) => reader.bionicReading(enable) window.isFootNoteOpen = () => footnoteDialog.getAttribute('style').includes('display: block') window.closeFootNote = () => { // set zindex to 0 footnoteDialog.style.display = 'none' callFlutter("onFootnoteClose") } window.readingFeatures = (rules) => { readingRules = { ...readingRules, ...rules } reader.readingFeatures() } // get varible from url var urlParams = new URLSearchParams(window.location.search) var importing = JSON.parse(urlParams.get('importing')) var url = JSON.parse(urlParams.get('url')) var initialCfi = JSON.parse(urlParams.get('initialCfi')) var style = JSON.parse(urlParams.get('style')) var readingRules = JSON.parse(urlParams.get('readingRules')) fetch(url) .then(res => res.blob()) .then(blob => open(new File([blob], new URL(url, window.location.origin).pathname), initialCfi)) .catch(e => console.error(e)) ``` ## /assets/foliate-js/comic-book.js ```js path="/assets/foliate-js/comic-book.js" export const makeComicBook = ({ entries, loadBlob, getSize }, file) => { const cache = new Map() const urls = new Map() const load = async name => { if (cache.has(name)) return cache.get(name) const src = URL.createObjectURL(await loadBlob(name)) const page = URL.createObjectURL( new Blob([``], { type: 'text/html' })) urls.set(name, [src, page]) cache.set(name, page) return page } const unload = name => { urls.get(name)?.forEach?.(url => URL.revokeObjectURL(url)) urls.delete(name) cache.delete(name) } const exts = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg', '.jxl', '.avif'] const files = entries .map(entry => entry.filename) .filter(name => exts.some(ext => name.endsWith(ext))) .sort() if (!files.length) throw new Error('No supported image files in archive') const book = {} book.getCover = () => loadBlob(files[0]) book.metadata = { title: file.name } book.sections = files.map(name => ({ id: name, load: () => load(name), unload: () => unload(name), size: getSize(name), })) book.toc = files.map(name => ({ label: name, href: name })) book.rendition = { layout: 'pre-paginated' } book.resolveHref = href => ({ index: book.sections.findIndex(s => s.id === href) }) book.splitTOCHref = href => [href, null] book.getTOCFragment = doc => doc.documentElement book.destroy = () => { for (const arr of urls.values()) for (const url of arr) URL.revokeObjectURL(url) } return book } ``` ## /assets/foliate-js/dict.js ```js path="/assets/foliate-js/dict.js" const decoder = new TextDecoder() const decode = decoder.decode.bind(decoder) const concatTypedArray = (a, b) => { const result = new a.constructor(a.length + b.length) result.set(a) result.set(b, a.length) return result } const strcmp = (a, b) => { a = a.toLowerCase(), b = b.toLowerCase() return a < b ? -1 : a > b ? 1 : 0 } class DictZip { #chlen #chunks #compressed inflate async load(file) { const header = new DataView(await file.slice(0, 12).arrayBuffer()) if (header.getUint8(0) !== 31 || header.getUint8(1) !== 139 || header.getUint8(2) !== 8) throw new Error('Not a DictZip file') const flg = header.getUint8(3) if (!flg & 0b100) throw new Error('Missing FEXTRA flag') const xlen = header.getUint16(10, true) const extra = new DataView(await file.slice(12, 12 + xlen).arrayBuffer()) if (extra.getUint8(0) !== 82 || extra.getUint8(1) !== 65) throw new Error('Subfield ID should be RA') if (extra.getUint16(4, true) !== 1) throw new Error('Unsupported version') this.#chlen = extra.getUint16(6, true) const chcnt = extra.getUint16(8, true) this.#chunks = [] for (let i = 0, chunkOffset = 0; i < chcnt; i++) { const chunkSize = extra.getUint16(10 + 2 * i, true) this.#chunks.push([chunkOffset, chunkSize]) chunkOffset = chunkOffset + chunkSize } // skip to compressed data let offset = 12 + xlen const max = Math.min(offset + 512, file.size) const strArr = new Uint8Array(await file.slice(0, max).arrayBuffer()) if (flg & 0b1000) { // fname const i = strArr.indexOf(0, offset) if (i < 0) throw new Error('Header too long') offset = i + 1 } if (flg & 0b10000) { // fcomment const i = strArr.indexOf(0, offset) if (i < 0) throw new Error('Header too long') offset = i + 1 } if (flg & 0b10) offset += 2 // fhcrc this.#compressed = file.slice(offset) } async read(offset, size) { const chunks = this.#chunks const startIndex = Math.trunc(offset / this.#chlen) const endIndex = Math.trunc((offset + size) / this.#chlen) const buf = await this.#compressed.slice(chunks[startIndex][0], chunks[endIndex][0] + chunks[endIndex][1]).arrayBuffer() let arr = new Uint8Array() for (let pos = 0, i = startIndex; i <= endIndex; i++) { const data = new Uint8Array(buf, pos, chunks[i][1]) arr = concatTypedArray(arr, await this.inflate(data)) pos += chunks[i][1] } const startOffset = offset - startIndex * this.#chlen return arr.subarray(startOffset, startOffset + size) } } class Index { strcmp = strcmp // binary search bisect(query, start = 0, end = this.words.length - 1) { if (end - start === 1) { if (!this.strcmp(query, this.getWord(start))) return start if (!this.strcmp(query, this.getWord(end))) return end return null } const mid = Math.floor(start + (end - start) / 2) const cmp = this.strcmp(query, this.getWord(mid)) if (cmp < 0) return this.bisect(query, start, mid) if (cmp > 0) return this.bisect(query, mid, end) return mid } // check for multiple definitions checkAdjacent(query, i) { if (i == null) return [] let j = i const equals = i => { const word = this.getWord(i) return word ? this.strcmp(query, word) === 0 : false } while (equals(j - 1)) j-- let k = i while (equals(k + 1)) k++ return j === k ? [i] : Array.from({ length: k + 1 - j }, (_, i) => j + i) } lookup(query) { return this.checkAdjacent(query, this.bisect(query)) } } const decodeBase64Number = str => { const { length } = str let n = 0 for (let i = 0; i < length; i++) { const c = str.charCodeAt(i) n += (c === 43 ? 62 // "+" : c === 47 ? 63 // "/" : c < 58 ? c + 4 // 0-9 -> 52-61 : c < 91 ? c - 65 // A-Z -> 0-25 : c - 71 // a-z -> 26-51 ) * 64 ** (length - 1 - i) } return n } class DictdIndex extends Index { getWord(i) { return this.words[i] } async load(file) { const words = [] const offsets = [] const sizes = [] for (const line of decode(await file.arrayBuffer()).split('\n')) { const a = line.split('\t') words.push(a[0]) offsets.push(decodeBase64Number(a[1])) sizes.push(decodeBase64Number(a[2])) } this.words = words this.offsets = offsets this.sizes = sizes } } export class DictdDict { #dict = new DictZip() #idx = new DictdIndex() loadDict(file, inflate) { this.#dict.inflate = inflate return this.#dict.load(file) } async #readWord(i) { const word = this.#idx.getWord(i) const offset = this.#idx.offsets[i] const size = this.#idx.sizes[i] return { word, data: ['m', this.#dict.read(offset, size)] } } #readWords(arr) { return Promise.all(arr.map(this.#readWord.bind(this))) } lookup(query) { return this.#readWords(this.#idx.lookup(query)) } } class StarDictIndex extends Index { isSyn #arr getWord(i) { const word = this.words[i] if (!word) return return decode(this.#arr.subarray(word[0], word[1])) } async load(file) { const { isSyn } = this const buf = await file.arrayBuffer() const arr = new Uint8Array(buf) this.#arr = arr const view = new DataView(buf) const words = [] const offsets = [] const sizes = [] for (let i = 0; i < arr.length;) { const newI = arr.subarray(0, i + 256).indexOf(0, i) if (newI < 0) throw new Error('Word too big') words.push([i, newI]) offsets.push(view.getUint32(newI + 1)) if (isSyn) i = newI + 5 else { sizes.push(view.getUint32(newI + 5)) i = newI + 9 } } this.words = words this.offsets = offsets this.sizes = sizes } } export class StarDict { #dict = new DictZip() #idx = new StarDictIndex() #syn = Object.assign(new StarDictIndex(), { isSyn: true }) async loadIfo(file) { const str = decode(await file.arrayBuffer()) this.ifo = Object.fromEntries(str.split('\n').map(line => { const sep = line.indexOf('=') if (sep < 0) return return [line.slice(0, sep), line.slice(sep + 1)] }).filter(x => x)) } loadDict(file, inflate) { this.#dict.inflate = inflate return this.#dict.load(file) } loadIdx(file) { return this.#idx.load(file) } loadSyn(file) { if (file) return this.#syn.load(file) } async #readWord(i) { const word = this.#idx.getWord(i) const offset = this.#idx.offsets[i] const size = this.#idx.sizes[i] const data = await this.#dict.read(offset, size) const seq = this.ifo.sametypesequence if (!seq) throw new Error('TODO') if (seq.length === 1) return { word, data: [[seq[0], data]] } throw new Error('TODO') } #readWords(arr) { return Promise.all(arr.map(this.#readWord.bind(this))) } lookup(query) { return this.#readWords(this.#idx.lookup(query)) } synonyms(query) { return this.#readWords(this.#syn.lookup(query).map(i => this.#syn.offsets[i])) } } ``` ## /assets/foliate-js/epub.js ```js path="/assets/foliate-js/epub.js" import * as CFI from './epubcfi.js' const NS = { CONTAINER: 'urn:oasis:names:tc:opendocument:xmlns:container', XHTML: 'http://www.w3.org/1999/xhtml', OPF: 'http://www.idpf.org/2007/opf', EPUB: 'http://www.idpf.org/2007/ops', DC: 'http://purl.org/dc/elements/1.1/', DCTERMS: 'http://purl.org/dc/terms/', ENC: 'http://www.w3.org/2001/04/xmlenc#', NCX: 'http://www.daisy.org/z3986/2005/ncx/', XLINK: 'http://www.w3.org/1999/xlink', SMIL: 'http://www.w3.org/ns/SMIL', } const MIME = { XML: 'application/xml', NCX: 'application/x-dtbncx+xml', XHTML: 'application/xhtml+xml', HTML: 'text/html', CSS: 'text/css', SVG: 'image/svg+xml', JS: /\/(x-)?(javascript|ecmascript)/, } // convert to camel case const camel = x => x.toLowerCase().replace(/[-:](.)/g, (_, g) => g.toUpperCase()) // strip and collapse ASCII whitespace // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace const normalizeWhitespace = str => str ? str .replace(/[\t\n\f\r ]+/g, ' ') .replace(/^[\t\n\f\r ]+/, '') .replace(/[\t\n\f\r ]+$/, '') : '' const filterAttribute = (attr, value, isList) => isList ? el => el.getAttribute(attr)?.split(/\s/)?.includes(value) : typeof value === 'function' ? el => value(el.getAttribute(attr)) : el => el.getAttribute(attr) === value const getAttributes = (...xs) => el => el ? Object.fromEntries(xs.map(x => [camel(x), el.getAttribute(x)])) : null const getElementText = el => normalizeWhitespace(el?.textContent) const childGetter = (doc, ns) => { // ignore the namespace if it doesn't appear in document at all const useNS = doc.lookupNamespaceURI(null) === ns || doc.lookupPrefix(ns) const f = useNS ? (el, name) => el => el.namespaceURI === ns && el.localName === name : (el, name) => el => el.localName === name return { $: (el, name) => [...el.children].find(f(el, name)), $$: (el, name) => [...el.children].filter(f(el, name)), $$$: useNS ? (el, name) => [...el.getElementsByTagNameNS(ns, name)] : (el, name) => [...el.getElementsByTagName(name)], } } const resolveURL = (url, relativeTo) => { try { if (relativeTo.includes(':')) return new URL(url, relativeTo) // the base needs to be a valid URL, so set a base URL and then remove it const root = 'https://invalid.invalid/' const obj = new URL(url, root + relativeTo) obj.search = '' return decodeURI(obj.href.replace(root, '')) } catch (e) { console.warn(e) return url } } const isExternal = uri => /^(?!blob)\w+:/i.test(uri) // like `path.relative()` in Node.js const pathRelative = (from, to) => { if (!from) return to const as = from.replace(/\/$/, '').split('/') const bs = to.replace(/\/$/, '').split('/') const i = (as.length > bs.length ? as : bs).findIndex((_, i) => as[i] !== bs[i]) return i < 0 ? '' : Array(as.length - i).fill('..').concat(bs.slice(i)).join('/') } const pathDirname = str => str.slice(0, str.lastIndexOf('/') + 1) // replace asynchronously and sequentially // same techinque as https://stackoverflow.com/a/48032528 const replaceSeries = async (str, regex, f) => { const matches = [] str.replace(regex, (...args) => (matches.push(args), null)) const results = [] for (const args of matches) results.push(await f(...args)) return str.replace(regex, () => results.shift()) } const regexEscape = str => str.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&') const LANGS = { attrs: ['dir', 'xml:lang'] } const ALTS = { name: 'alternate-script', many: true, ...LANGS, props: ['file-as'] } const CONTRIB = { many: true, ...LANGS, props: [{ name: 'role', many: true, attrs: ['scheme'] }, 'file-as', ALTS], setLegacyAttrs: (obj, el) => { if (!obj.role?.length) { const value = el.getAttributeNS(NS.OPF, 'role') if (value) obj.role = [{ value }] } obj.fileAs ??= el.getAttributeNS(NS.OPF, 'file-as') }, } const METADATA = [ { name: 'title', many: true, ...LANGS, props: ['title-type', 'display-seq', 'file-as', ALTS], }, { name: 'identifier', many: true, props: [{ name: 'identifier-type', attrs: ['scheme'] }], setLegacyAttrs: (obj, el) => { if (!obj.identifierType) { const value = el.getAttributeNS(NS.OPF, 'scheme') if (value) obj.identifierType = { value } } }, }, { name: 'language', many: true }, { name: 'creator', ...CONTRIB }, { name: 'contributor', ...CONTRIB }, { name: 'publisher', ...LANGS, props: ['file-as', ALTS] }, { name: 'description', ...LANGS, props: [ALTS] }, { name: 'rights', ...LANGS, props: [ALTS] }, { name: 'date' }, { name: 'dcterms:modified', type: 'meta' }, { name: 'subject', many: true, ...LANGS, props: ['term', 'authority', ALTS], setLegacyAttrs: (obj, el) => { obj.term ??= el.getAttributeNS(NS.OPF, 'term') obj.authority ??= el.getAttributeNS(NS.OPF, 'authority') }, }, { name: 'source', many: true }, { name: 'belongs-to-collection', type: 'meta', many: true, ...LANGS, props: [ 'collection-type', 'group-position', 'dcterms:identifier', 'file-as', ALTS, { name: 'belongs-to-collection', recursive: true }, ], }, ] const getMetadata = opf => { const { $, $$ } = childGetter(opf, NS.OPF) const $metadata = $(opf.documentElement, 'metadata') const els = Array.from($metadata.children) const getValue = (obj, el) => { if (!el) return null const { props = [], attrs = [] } = obj const value = getElementText(el) if (!props.length && !attrs.length) return value const id = el.getAttribute('id') const refines = id ? els.filter(filterAttribute('refines', '#' + id)) : [] const result = Object.fromEntries([['value', value]] .concat(props.map(prop => { const { many, recursive } = prop const name = typeof prop === 'string' ? prop : prop.name const filter = filterAttribute('property', name) const subobj = recursive ? obj : prop return [camel(name), many ? refines.filter(filter).map(el => getValue(subobj, el)) : getValue(subobj, refines.find(filter))] })) .concat(attrs.map(attr => [camel(attr), el.getAttribute(attr)]))) obj.setLegacyAttrs?.(result, el) return result } const arr = els.filter(filterAttribute('refines', null)) const metadata = Object.fromEntries(METADATA.map(obj => { const { type, name, many } = obj const filter = type === 'meta' ? el => el.namespaceURI === NS.OPF && el.getAttribute('property') === name : el => el.namespaceURI === NS.DC && el.localName === name return [camel(name), many ? arr.filter(filter).map(el => getValue(obj, el)) : getValue(obj, arr.find(filter))] })) const $$meta = $$($metadata, 'meta') const getMetasByPrefix = prefix => $$meta .filter(filterAttribute('property', x => x?.startsWith(prefix))) .map(el => [el.getAttribute('property').replace(prefix, ''), el]) const rendition = Object.fromEntries(getMetasByPrefix('rendition:') .map(([k, el]) => [k, getElementText(el)])) const media = { narrator: [], duration: {} } for (const [k, el] of getMetasByPrefix('media:')) { const v = getElementText(el) if (k === 'duration') media.duration[ el.getAttribute('refines')?.split('#')?.[1] ?? ''] = parseClock(v) else if (k === 'active-class') media.activeClass = v else if (k === 'narrator') media.narrator.push(v) else if (k === 'playback-active-class') media.playbackActiveClass = v } return { metadata, rendition, media } } const parseNav = (doc, resolve = f => f) => { const { $, $$, $$$ } = childGetter(doc, NS.XHTML) const resolveHref = href => href ? decodeURI(resolve(href)) : null const parseLI = getType => $li => { const $a = $($li, 'a') ?? $($li, 'span') const $ol = $($li, 'ol') const href = resolveHref($a?.getAttribute('href')) const label = getElementText($a) || $a?.getAttribute('title') // TODO: get and concat alt/title texts in content const result = { label, href, subitems: parseOL($ol) } if (getType) result.type = $a?.getAttributeNS(NS.EPUB, 'type')?.split(/\s/) return result } const parseOL = ($ol, getType) => $ol ? $$($ol, 'li').map(parseLI(getType)) : null const parseNav = ($nav, getType) => parseOL($($nav, 'ol'), getType) const $$nav = $$$(doc, 'nav') let toc = null, pageList = null, landmarks = null, others = [] for (const $nav of $$nav) { const type = $nav.getAttributeNS(NS.EPUB, 'type')?.split(/\s/) ?? [] if (type.includes('toc')) toc ??= parseNav($nav) else if (type.includes('page-list')) pageList ??= parseNav($nav) else if (type.includes('landmarks')) landmarks ??= parseNav($nav, true) else others.push({ label: getElementText($nav.firstElementChild), type, list: parseNav($nav), }) } return { toc, pageList, landmarks, others } } const parseNCX = (doc, resolve = f => f) => { const { $, $$ } = childGetter(doc, NS.NCX) const resolveHref = href => href ? decodeURI(resolve(href)) : null const parseItem = el => { const $label = $(el, 'navLabel') const $content = $(el, 'content') const label = getElementText($label) const href = resolveHref($content.getAttribute('src')) if (el.localName === 'navPoint') { const els = $$(el, 'navPoint') return { label, href, subitems: els.length ? els.map(parseItem) : null } } return { label, href } } const parseList = (el, itemName) => $$(el, itemName).map(parseItem) const getSingle = (container, itemName) => { const $container = $(doc.documentElement, container) return $container ? parseList($container, itemName) : null } return { toc: getSingle('navMap', 'navPoint'), pageList: getSingle('pageList', 'pageTarget'), others: $$(doc.documentElement, 'navList').map(el => ({ label: getElementText($(el, 'navLabel')), list: parseList(el, 'navTarget'), })), } } const parseClock = str => { if (!str) return const parts = str.split(':').map(x => parseFloat(x)) if (parts.length === 3) { const [h, m, s] = parts return h * 60 * 60 + m * 60 + s } if (parts.length === 2) { const [m, s] = parts return m * 60 + s } const [x, unit] = str.split(/(?=[^\d.])/) const n = parseFloat(x) const f = unit === 'h' ? 60 * 60 : unit === 'min' ? 60 : unit === 'ms' ? .001 : 1 return n * f } class MediaOverlay extends EventTarget { #entries #lastMediaOverlayItem #sectionIndex #audioIndex #itemIndex #audio #volume = 1 #rate = 1 constructor(book, loadXML) { super() this.book = book this.loadXML = loadXML } async #loadSMIL(item) { if (this.#lastMediaOverlayItem === item) return const doc = await this.loadXML(item.href) const resolve = href => href ? resolveURL(href, item.href) : null const { $, $$$ } = childGetter(doc, NS.SMIL) this.#audioIndex = -1 this.#itemIndex = -1 this.#entries = $$$(doc, 'par').reduce((arr, $par) => { const text = resolve($($par, 'text')?.getAttribute('src')) const $audio = $($par, 'audio') if (!text || !$audio) return arr const src = resolve($audio.getAttribute('src')) const begin = parseClock($audio.getAttribute('clipBegin')) const end = parseClock($audio.getAttribute('clipEnd')) const last = arr.at(-1) if (last?.src === src) last.items.push({ text, begin, end }) else arr.push({ src, items: [{ text, begin, end }] }) return arr }, []) this.#lastMediaOverlayItem = item } get #activeAudio() { return this.#entries[this.#audioIndex] } get #activeItem() { return this.#activeAudio?.items?.[this.#itemIndex] } #error(e) { console.error(e) this.dispatchEvent(new CustomEvent('error', { detail: e })) } #highlight() { this.dispatchEvent(new CustomEvent('highlight', { detail: this.#activeItem })) } #unhighlight() { this.dispatchEvent(new CustomEvent('unhighlight', { detail: this.#activeItem })) } async #play(audioIndex, itemIndex) { if (this.#audio) { this.#audio.pause() URL.revokeObjectURL(this.#audio.src) this.#audio = null } this.#audioIndex = audioIndex this.#itemIndex = itemIndex const src = this.#activeAudio?.src if (!src || !this.#activeItem) return this.start(this.#sectionIndex + 1) const url = URL.createObjectURL(await this.book.loadBlob(src)) const audio = new Audio(url) this.#audio = audio audio.addEventListener('timeupdate', () => { if (audio.paused) return const t = audio.currentTime const { items } = this.#activeAudio if (t > this.#activeItem?.end) { this.#unhighlight() if (this.#itemIndex === items.length - 1) { this.#play(this.#audioIndex + 1, 0).catch(e => this.#error(e)) return } } const oldIndex = this.#itemIndex while (items[this.#itemIndex + 1]?.begin <= t) this.#itemIndex++ if (this.#itemIndex !== oldIndex) this.#highlight() }) audio.addEventListener('error', () => this.#error(new Error(`Failed to load ${src}`))) audio.addEventListener('playing', () => this.#highlight()) audio.addEventListener('pause', () => this.#unhighlight()) audio.addEventListener('ended', () => { this.#unhighlight() URL.revokeObjectURL(url) this.#audio = null this.#play(audioIndex + 1, 0).catch(e => this.#error(e)) }) audio.addEventListener('canplaythrough', () => { audio.currentTime = this.#activeItem.begin ?? 0 audio.volume = this.#volume audio.playbackRate = this.#rate audio.play().catch(e => this.#error(e)) }) } async start(sectionIndex, filter = () => true) { this.#audio?.pause() const section = this.book.sections[sectionIndex] const href = section?.id if (!href) return const { mediaOverlay } = section if (!mediaOverlay) return this.start(sectionIndex + 1) this.#sectionIndex = sectionIndex await this.#loadSMIL(mediaOverlay) for (let i = 0; i < this.#entries.length; i++) { const { items } = this.#entries[i] for (let j = 0; j < items.length; j++) { if (items[j].text.split('#')[0] === href && filter(items[j], j, items)) return this.#play(i, j).catch(e => this.#error(e)) } } } pause() { this.#audio?.pause() } resume() { this.#audio?.play().catch(e => this.#error(e)) } prev() { if (this.#itemIndex > 0) this.#play(this.#audioIndex, this.#itemIndex - 1) else if (this.#audioIndex > 0) this.#play(this.#audioIndex - 1, this.#entries[this.#audioIndex - 1].items.length - 1) else if (this.#sectionIndex > 0) this.start(this.#sectionIndex - 1, (_, i, items) => i === items.length - 1) } next() { this.#play(this.#audioIndex, this.#itemIndex + 1) } setVolume(volume) { this.#volume = volume if (this.#audio) this.#audio.volume = volume } setRate(rate) { this.#rate = rate if (this.#audio) this.#audio.playbackRate = rate } } const isUUID = /([0-9a-f]{8})-([0-9a-f]{4})-([0-9a-f]{4})-([0-9a-f]{4})-([0-9a-f]{12})/ const getUUID = opf => { for (const el of opf.getElementsByTagNameNS(NS.DC, 'identifier')) { const [id] = getElementText(el).split(':').slice(-1) if (isUUID.test(id)) return id } return '' } const getIdentifier = opf => getElementText( opf.getElementById(opf.documentElement.getAttribute('unique-identifier')) ?? opf.getElementsByTagNameNS(NS.DC, 'identifier')[0]) // https://www.w3.org/publishing/epub32/epub-ocf.html#sec-resource-obfuscation const deobfuscate = async (key, length, blob) => { const array = new Uint8Array(await blob.slice(0, length).arrayBuffer()) length = Math.min(length, array.length) for (var i = 0; i < length; i++) array[i] = array[i] ^ key[i % key.length] return new Blob([array, blob.slice(length)], { type: blob.type }) } const WebCryptoSHA1 = async str => { const data = new TextEncoder().encode(str) const buffer = await globalThis.crypto.subtle.digest('SHA-1', data) return new Uint8Array(buffer) } const deobfuscators = (sha1 = WebCryptoSHA1) => ({ 'http://www.idpf.org/2008/embedding': { key: opf => sha1(getIdentifier(opf) // eslint-disable-next-line no-control-regex .replaceAll(/[\u0020\u0009\u000d\u000a]/g, '')), decode: (key, blob) => deobfuscate(key, 1040, blob), }, 'http://ns.adobe.com/pdf/enc#RC': { key: opf => { const uuid = getUUID(opf).replaceAll('-', '') return Uint8Array.from({ length: 16 }, (_, i) => parseInt(uuid.slice(i * 2, i * 2 + 2), 16)) }, decode: (key, blob) => deobfuscate(key, 1024, blob), }, }) class Encryption { #uris = new Map() #decoders = new Map() #algorithms constructor(algorithms) { this.#algorithms = algorithms } async init(encryption, opf) { if (!encryption) return const data = Array.from( encryption.getElementsByTagNameNS(NS.ENC, 'EncryptedData'), el => ({ algorithm: el.getElementsByTagNameNS(NS.ENC, 'EncryptionMethod')[0] ?.getAttribute('Algorithm'), uri: el.getElementsByTagNameNS(NS.ENC, 'CipherReference')[0] ?.getAttribute('URI'), })) for (const { algorithm, uri } of data) { if (!this.#decoders.has(algorithm)) { const algo = this.#algorithms[algorithm] if (!algo) { console.warn('Unknown encryption algorithm') continue } const key = await algo.key(opf) this.#decoders.set(algorithm, blob => algo.decode(key, blob)) } this.#uris.set(uri, algorithm) } } getDecoder(uri) { return this.#decoders.get(this.#uris.get(uri)) ?? (x => x) } } class Resources { constructor({ opf, resolveHref }) { this.opf = opf const { $, $$, $$$ } = childGetter(opf, NS.OPF) const $manifest = $(opf.documentElement, 'manifest') const $spine = $(opf.documentElement, 'spine') const $$itemref = $$($spine, 'itemref') this.manifest = $$($manifest, 'item') .map(getAttributes('href', 'id', 'media-type', 'properties', 'media-overlay')) .map(item => { item.href = resolveHref(item.href) item.properties = item.properties?.split(/\s/) return item }) this.spine = $$itemref .map(getAttributes('idref', 'id', 'linear', 'properties')) .map(item => (item.properties = item.properties?.split(/\s/), item)) this.pageProgressionDirection = $spine .getAttribute('page-progression-direction') this.navPath = this.getItemByProperty('nav')?.href this.ncxPath = (this.getItemByID($spine.getAttribute('toc')) ?? this.manifest.find(item => item.mediaType === MIME.NCX))?.href const $guide = $(opf.documentElement, 'guide') if ($guide) this.guide = $$($guide, 'reference') .map(getAttributes('type', 'title', 'href')) .map(({ type, title, href }) => ({ label: title, type: type.split(/\s/), href: resolveHref(href), })) this.cover = this.getItemByProperty('cover-image') // EPUB 2 compat ?? this.getItemByID($$$(opf, 'meta') .find(filterAttribute('name', 'cover')) ?.getAttribute('content')) ?? this.getItemByHref(this.guide ?.find(ref => ref.type.includes('cover'))?.href) this.cfis = CFI.fromElements($$itemref) } getItemByID(id) { return this.manifest.find(item => item.id === id) } getItemByHref(href) { return this.manifest.find(item => item.href === href) } getItemByProperty(prop) { return this.manifest.find(item => item.properties?.includes(prop)) } resolveCFI(cfi) { const parts = CFI.parse(cfi) const top = (parts.parent ?? parts).shift() let $itemref = CFI.toElement(this.opf, top) // make sure it's an idref; if not, try again without the ID assertion // mainly because Epub.js used to generate wrong ID assertions // https://github.com/futurepress/epub.js/issues/1236 if ($itemref && $itemref.nodeName !== 'idref') { // top.at(-1).id = null top[top.length - 1].id = null $itemref = CFI.toElement(this.opf, top) } const idref = $itemref?.getAttribute('idref') const index = this.spine.findIndex(item => item.idref === idref) const anchor = doc => CFI.toRange(doc, parts) return { index, anchor } } } class Loader { #cache = new Map() #children = new Map() #refCount = new Map() allowScript = false constructor({ loadText, loadBlob, resources }) { this.loadText = loadText this.loadBlob = loadBlob this.manifest = resources.manifest this.assets = resources.manifest // needed only when replacing in (X)HTML w/o parsing (see below) //.filter(({ mediaType }) => ![MIME.XHTML, MIME.HTML].includes(mediaType)) } createURL(href, data, type, parent) { if (!data) return '' const url = URL.createObjectURL(new Blob([data], { type })) this.#cache.set(href, url) this.#refCount.set(href, 1) if (parent) { const childList = this.#children.get(parent) if (childList) childList.push(href) else this.#children.set(parent, [href]) } return url } ref(href, parent) { const childList = this.#children.get(parent) if (!childList?.includes(href)) { this.#refCount.set(href, this.#refCount.get(href) + 1) //console.log(`referencing ${href}, now ${this.#refCount.get(href)}`) if (childList) childList.push(href) else this.#children.set(parent, [href]) } return this.#cache.get(href) } unref(href) { if (!this.#refCount.has(href)) return const count = this.#refCount.get(href) - 1 //console.log(`unreferencing ${href}, now ${count}`) if (count < 1) { //console.log(`unloading ${href}`) URL.revokeObjectURL(this.#cache.get(href)) this.#cache.delete(href) this.#refCount.delete(href) // unref children const childList = this.#children.get(href) if (childList) while (childList.length) this.unref(childList.pop()) this.#children.delete(href) } else this.#refCount.set(href, count) } // load manifest item, recursively loading all resources as needed async loadItem(item, parents = []) { if (!item) return null const { href, mediaType } = item const isScript = MIME.JS.test(item.mediaType) if (isScript && !this.allowScript) return null const parent = parents[parents.length - 1] if (this.#cache.has(href)) return this.ref(href, parent) const shouldReplace = (isScript || [MIME.XHTML, MIME.HTML, MIME.CSS, MIME.SVG].includes(mediaType)) // prevent circular references && parents.every(p => p !== href) if (shouldReplace) return this.loadReplaced(item, parents) return this.createURL(href, await this.loadBlob(href), mediaType, parent) } async loadHref(href, base, parents = []) { if (isExternal(href)) return href const path = resolveURL(href, base) const item = this.manifest.find(item => item.href === path) if (!item) return href return this.loadItem(item, parents.concat(base)) } async loadReplaced(item, parents = []) { const { href, mediaType } = item // const parent = parents.at(-1) const parent = parents[parents.length - 1] const str = await this.loadText(href) if (!str) return null // note that one can also just use `replaceString` for everything: // \`\`\` // const replaced = await this.replaceString(str, href, parents) // return this.createURL(href, replaced, mediaType, parent) // \`\`\` // which is basically what Epub.js does, which is simpler, but will // break things like iframes (because you don't want to replace links) // or text that just happen to be paths // parse and replace in HTML if ([MIME.XHTML, MIME.HTML, MIME.SVG].includes(mediaType)) { let doc = new DOMParser().parseFromString(str, mediaType) // change to HTML if it's not valid XHTML if (mediaType === MIME.XHTML && (doc.querySelector('parsererror') || !doc.documentElement?.namespaceURI)) { console.warn(doc.querySelector('parsererror')?.innerText ?? 'Invalid XHTML') item.mediaType = MIME.HTML doc = new DOMParser().parseFromString(str, item.mediaType) } // replace hrefs in XML processing instructions // this is mainly for SVGs that use xml-stylesheet if ([MIME.XHTML, MIME.SVG].includes(item.mediaType)) { let child = doc.firstChild while (child instanceof ProcessingInstruction) { if (child.data) { const replacedData = await replaceSeries(child.data, /(?:^|\s*)(href\s*=\s*['"])([^'"]*)(['"])/i, (_, p1, p2, p3) => this.loadHref(p2, href, parents) .then(p2 => `${p1}${p2}${p3}`)) child.replaceWith(doc.createProcessingInstruction( child.target, replacedData)) } child = child.nextSibling } } // replace hrefs (excluding anchors) // TODO: srcset? const replace = async (el, attr) => el.setAttribute(attr, await this.loadHref(el.getAttribute(attr), href, parents)) for (const el of doc.querySelectorAll('link[href]')) await replace(el, 'href') for (const el of doc.querySelectorAll('[src]')) await replace(el, 'src') for (const el of doc.querySelectorAll('[poster]')) await replace(el, 'poster') for (const el of doc.querySelectorAll('object[data]')) await replace(el, 'data') for (const el of doc.querySelectorAll('[*|href]:not([href])')) el.setAttributeNS(NS.XLINK, 'href', await this.loadHref( el.getAttributeNS(NS.XLINK, 'href'), href, parents)) // replace inline styles for (const el of doc.querySelectorAll('style')) if (el.textContent) el.textContent = await this.replaceCSS(el.textContent, href, parents) for (const el of doc.querySelectorAll('[style]')) el.setAttribute('style', await this.replaceCSS(el.getAttribute('style'), href, parents)) // TODO: replace inline scripts? probably not worth the trouble const result = new XMLSerializer().serializeToString(doc) return this.createURL(href, result, item.mediaType, parent) } const result = mediaType === MIME.CSS ? await this.replaceCSS(str, href, parents) : await this.replaceString(str, href, parents) return this.createURL(href, result, mediaType, parent) } async replaceCSS(str, href, parents = []) { const replacedUrls = await replaceSeries(str, /url\(\s*["']?([^'"\n]*?)\s*["']?\s*\)/gi, (_, url) => this.loadHref(url, href, parents) .then(url => `url("${url}")`)) // apart from `url()`, strings can be used for `@import` (but why?!) const replacedImports = await replaceSeries(replacedUrls, /@import\s*["']([^"'\n]*?)["']/gi, (_, url) => this.loadHref(url, href, parents) .then(url => `@import "${url}"`)) const w = window?.innerWidth ?? 800 const h = window?.innerHeight ?? 600 return replacedImports // unprefix as most of the props are (only) supported unprefixed .replace(/([{\s;])-epub-/gi, '$1') // replace vw and vh as they cause problems with layout .replace(/(\d*\.?\d+)vw/gi, (_, d) => parseFloat(d) * w / 100 + 'px') .replace(/(\d*\.?\d+)vh/gi, (_, d) => parseFloat(d) * h / 100 + 'px') // `page-break-*` unsupported in columns; replace with `column-break-*` .replace(/page-break-(after|before|inside)\s*:/gi, (_, x) => `-webkit-column-break-${x}:`) .replace(/break-(after|before|inside)\s*:\s*(avoid-)?page/gi, (_, x, y) => `break-${x}: ${y ?? ''}column`) // If px is used as the unit of font-size, it should be converted to em and the // number should be divided by 16 .replace(/(\d*\.?\d+)px/gi, (_, d) => `${parseFloat(d) / 16}em`) } // find & replace all possible relative paths for all assets without parsing replaceString(str, href, parents = []) { const assetMap = new Map() const urls = this.assets.map(asset => { // do not replace references to the file itself if (asset.href === href) return // href was decoded and resolved when parsing the manifest const relative = pathRelative(pathDirname(href), asset.href) const relativeEnc = encodeURI(relative) const rootRelative = '/' + asset.href const rootRelativeEnc = encodeURI(rootRelative) const set = new Set([relative, relativeEnc, rootRelative, rootRelativeEnc]) for (const url of set) assetMap.set(url, asset) return Array.from(set) }).flat().filter(x => x) if (!urls.length) return str const regex = new RegExp(urls.map(regexEscape).join('|'), 'g') return replaceSeries(str, regex, async match => this.loadItem(assetMap.get(match.replace(/^\//, '')), parents.concat(href))) } unloadItem(item) { this.unref(item?.href) } destroy() { for (const url of this.#cache.values()) URL.revokeObjectURL(url) } } const getHTMLFragment = (doc, id) => doc.getElementById(id) ?? doc.querySelector(`[name="${CSS.escape(id)}"]`) const getPageSpread = properties => { for (const p of properties) { if (p === 'page-spread-left' || p === 'rendition:page-spread-left') return 'left' if (p === 'page-spread-right' || p === 'rendition:page-spread-right') return 'right' if (p === 'rendition:page-spread-center') return 'center' } } const getDisplayOptions = doc => { if (!doc) return null return { fixedLayout: getElementText(doc.querySelector('option[name="fixed-layout"]')), openToSpread: getElementText(doc.querySelector('option[name="open-to-spread"]')), } } export class EPUB { parser = new DOMParser() #loader #encryption constructor({ loadText, loadBlob, getSize, sha1 }) { this.loadText = loadText this.loadBlob = loadBlob this.getSize = getSize this.#encryption = new Encryption(deobfuscators(sha1)) } async #loadXML(uri) { const str = await this.loadText(uri) if (!str) return null const doc = this.parser.parseFromString(str, MIME.XML) if (doc.querySelector('parsererror')) throw new Error(`XML parsing error: ${uri} ${doc.querySelector('parsererror').innerText}`) return doc } async init() { const $container = await this.#loadXML('META-INF/container.xml') if (!$container) throw new Error('Failed to load container file') const opfs = Array.from( $container.getElementsByTagNameNS(NS.CONTAINER, 'rootfile'), getAttributes('full-path', 'media-type')) .filter(file => file.mediaType === 'application/oebps-package+xml') if (!opfs.length) throw new Error('No package document defined in container') const opfPath = opfs[0].fullPath const opf = await this.#loadXML(opfPath) if (!opf) throw new Error('Failed to load package document') const $encryption = await this.#loadXML('META-INF/encryption.xml') await this.#encryption.init($encryption, opf) this.resources = new Resources({ opf, resolveHref: url => resolveURL(url, opfPath), }) this.#loader = new Loader({ loadText: this.loadText, loadBlob: uri => Promise.resolve(this.loadBlob(uri)) .then(this.#encryption.getDecoder(uri)), resources: this.resources, }) this.sections = this.resources.spine.map((spineItem, index) => { const { idref, linear, properties = [] } = spineItem const item = this.resources.getItemByID(idref) if (!item) { console.warn(`Could not find item with ID "${idref}" in manifest`) return null } return { id: item.href, load: () => this.#loader.loadItem(item), unload: () => this.#loader.unloadItem(item), createDocument: () => this.loadDocument(item), size: this.getSize(item.href), cfi: this.resources.cfis[index], linear, pageSpread: getPageSpread(properties), resolveHref: href => resolveURL(href, item.href), mediaOverlay: item.mediaOverlay ? this.resources.getItemByID(item.mediaOverlay) : null, } }).filter(s => s) const { navPath, ncxPath } = this.resources if (navPath) try { const resolve = url => resolveURL(url, navPath) const nav = parseNav(await this.#loadXML(navPath), resolve) this.toc = nav.toc this.pageList = nav.pageList this.landmarks = nav.landmarks } catch (e) { console.warn(e) } if (!this.toc && ncxPath) try { const resolve = url => resolveURL(url, ncxPath) const ncx = parseNCX(await this.#loadXML(ncxPath), resolve) this.toc = ncx.toc this.pageList = ncx.pageList } catch (e) { console.warn(e) } this.landmarks ??= this.resources.guide const { metadata, rendition, media } = getMetadata(opf) this.rendition = rendition this.media = media this.dir = this.resources.pageProgressionDirection const displayOptions = getDisplayOptions( await this.#loadXML('META-INF/com.apple.ibooks.display-options.xml') ?? await this.#loadXML('META-INF/com.kobobooks.display-options.xml')) if (displayOptions) { if (displayOptions.fixedLayout === 'true') this.rendition.layout ??= 'pre-paginated' if (displayOptions.openToSpread === 'false') this.sections .find(section => section.linear !== 'no').pageSpread ??= this.dir === 'rtl' ? 'left' : 'right' } this.parsedMetadata = metadata // for debugging or advanced use cases const title = metadata?.title?.[0] this.metadata = { title: title?.value, subtitle: metadata?.title?.find(x => x.titleType === 'subtitle')?.value, sortAs: title?.fileAs, language: metadata?.language, identifier: getIdentifier(opf), description: metadata?.description?.value, publisher: metadata?.publisher?.value, published: metadata?.date, modified: metadata?.dctermsModified, subject: metadata?.subject ?.filter(({ value, term }) => value || term) ?.map(({ value, term, authority }) => ({ name: value, code: term, scheme: authority })), rights: metadata?.rights?.value, } const relators = { art: 'artist', aut: 'author', bkp: 'producer', clr: 'colorist', edt: 'editor', ill: 'illustrator', nrt: 'narrator', trl: 'translator', pbl: 'publisher', } const mapContributor = defaultKey => obj => { const keys = [...new Set(obj.role?.map(({ value, scheme }) => (!scheme || scheme === 'marc:relators' ? relators[value] : null) ?? defaultKey))] const value = { name: obj.value, sortAs: obj.fileAs } return [keys?.length ? keys : [defaultKey], value] } metadata?.creator?.map(mapContributor('author')) ?.concat(metadata?.contributor?.map?.(mapContributor('contributor'))) ?.forEach(([keys, value]) => keys.forEach(key => { if (this.metadata[key]) this.metadata[key].push(value) else this.metadata[key] = [value] })) return this } async loadDocument(item) { const str = await this.loadText(item.href) return this.parser.parseFromString(str, item.mediaType) } getMediaOverlay() { return new MediaOverlay(this, this.#loadXML.bind(this)) } resolveCFI(cfi) { return this.resources.resolveCFI(cfi) } resolveHref(href) { const [path, hash] = href.split('#') const item = this.resources.getItemByHref(decodeURI(path)) if (!item) return null const index = this.resources.spine.findIndex(({ idref }) => idref === item.id) const anchor = hash ? doc => getHTMLFragment(doc, hash) : () => 0 return { index, anchor } } splitTOCHref(href) { return href?.split('#') ?? [] } getTOCFragment(doc, id) { return doc.getElementById(id) ?? doc.querySelector(`[name="${CSS.escape(id)}"]`) } isExternal(uri) { return isExternal(uri) } async getCover() { const cover = this.resources?.cover return cover?.href ? new Blob([await this.loadBlob(cover.href)], { type: cover.mediaType }) : null } async getCalibreBookmarks() { const txt = await this.loadText('META-INF/calibre_bookmarks.txt') const magic = 'encoding=json+base64:' if (txt?.startsWith(magic)) { const json = atob(txt.slice(magic.length)) return JSON.parse(json) } } destroy() { this.#loader?.destroy() } } ``` ## /assets/foliate-js/epubcfi.js ```js path="/assets/foliate-js/epubcfi.js" const findIndices = (arr, f) => arr .map((x, i, a) => f(x, i, a) ? i : null).filter(x => x != null) const splitAt = (arr, is) => [-1, ...is, arr.length].reduce(({ xs, a }, b) => ({ xs: xs?.concat([arr.slice(a + 1, b)]) ?? [], a: b }), {}).xs const concatArrays = (a, b) => a.slice(0, -1).concat([a[a.length - 1].concat(b[0])]).concat(b.slice(1)) const isNumber = /\d/ export const isCFI = /^epubcfi\((.*)\)$/ const escapeCFI = str => str.replace(/[\^[\](),;=]/g, '^$&') const wrap = x => isCFI.test(x) ? x : `epubcfi(${x})` const unwrap = x => x.match(isCFI)?.[1] ?? x const lift = f => (...xs) => `epubcfi(${f(...xs.map(x => x.match(isCFI)?.[1] ?? x))})` export const joinIndir = lift((...xs) => xs.join('!')) const tokenizer = str => { const tokens = [] let state, escape, value = '' const push = x => (tokens.push(x), state = null, value = '') const cat = x => (value += x, escape = false) for (const char of Array.from(str.trim()).concat('')) { if (char === '^' && !escape) { escape = true continue } if (state === '!') push(['!']) else if (state === ',') push([',']) else if (state === '/' || state === ':') { if (isNumber.test(char)) { cat(char) continue } else push([state, parseInt(value)]) } else if (state === '~') { if (isNumber.test(char) || char === '.') { cat(char) continue } else push(['~', parseFloat(value)]) } else if (state === '@') { if (char === ':') { push(['@', parseFloat(value)]) state = '@' continue } if (isNumber.test(char) || char === '.') { cat(char) continue } else push(['@', parseFloat(value)]) } else if (state === '[') { if (char === ';' && !escape) { push(['[', value]) state = ';' } else if (char === ',' && !escape) { push(['[', value]) state = '[' } else if (char === ']' && !escape) push(['[', value]) else cat(char) continue } else if (state?.startsWith(';')) { if (char === '=' && !escape) { state = `;${value}` value = '' } else if (char === ';' && !escape) { push([state, value]) state = ';' } else if (char === ']' && !escape) push([state, value]) else cat(char) continue } if (char === '/' || char === ':' || char === '~' || char === '@' || char === '[' || char === '!' || char === ',') state = char } return tokens } const findTokens = (tokens, x) => findIndices(tokens, ([t]) => t === x) const parser = tokens => { const parts = [] let state for (const [type, val] of tokens) { if (type === '/') parts.push({ index: val }) else { const last = parts[parts.length - 1] if (type === ':') last.offset = val else if (type === '~') last.temporal = val else if (type === '@') last.spatial = (last.spatial ?? []).concat(val) else if (type === ';s') last.side = val else if (type === '[') { if (state === '/' && val) last.id = val else { last.text = (last.text ?? []).concat(val) continue } } } state = type } return parts } // split at step indirections, then parse each part const parserIndir = tokens => splitAt(tokens, findTokens(tokens, '!')).map(parser) export const parse = cfi => { const tokens = tokenizer(unwrap(cfi)) const commas = findTokens(tokens, ',') if (!commas.length) return parserIndir(tokens) const [parent, start, end] = splitAt(tokens, commas).map(parserIndir) return { parent, start, end } } const partToString = ({ index, id, offset, temporal, spatial, text, side }) => { const param = side ? `;s=${side}` : '' return `/${index}` + (id ? `[${escapeCFI(id)}${param}]` : '') // "CFI expressions [..] SHOULD include an explicit character offset" + (offset != null && index % 2 ? `:${offset}` : '') + (temporal ? `~${temporal}` : '') + (spatial ? `@${spatial.join(':')}` : '') + (text || (!id && side) ? '[' + (text?.map(escapeCFI)?.join(',') ?? '') + param + ']' : '') } const toInnerString = parsed => parsed.parent ? [parsed.parent, parsed.start, parsed.end].map(toInnerString).join(',') : parsed.map(parts => parts.map(partToString).join('')).join('!') const toString = parsed => wrap(toInnerString(parsed)) export const collapse = (x, toEnd) => typeof x === 'string' ? toString(collapse(parse(x), toEnd)) : x.parent ? concatArrays(x.parent, x[toEnd ? 'end' : 'start']) : x // create range CFI from two CFIs const buildRange = (from, to) => { if (typeof from === 'string') from = parse(from) if (typeof to === 'string') to = parse(to) from = collapse(from) to = collapse(to, true) // ranges across multiple documents are not allowed; handle local paths only const localFrom = from[from.length - 1], localTo = to[to.length - 1] const localParent = [], localStart = [], localEnd = [] let pushToParent = true const len = Math.max(localFrom.length, localTo.length) for (let i = 0; i < len; i++) { const a = localFrom[i], b = localTo[i] pushToParent &&= a?.index === b?.index && !a?.offset && !b?.offset if (pushToParent) localParent.push(a) else { if (a) localStart.push(a) if (b) localEnd.push(b) } } // copy non-local paths from `from` const parent = from.slice(0, -1).concat([localParent]) return toString({ parent, start: [localStart], end: [localEnd] }) } export const compare = (a, b) => { if (typeof a === 'string') a = parse(a) if (typeof b === 'string') b = parse(b) if (a.start || b.start) return compare(collapse(a), collapse(b)) || compare(collapse(a, true), collapse(b, true)) for (let i = 0; i < Math.max(a.length, b.length); i++) { const p = a[i], q = b[i] const maxIndex = Math.max(p.length, q.length) - 1 for (let i = 0; i <= maxIndex; i++) { const x = p[i], y = q[i] if (!x) return -1 if (!y) return 1 if (x.index > y.index) return 1 if (x.index < y.index) return -1 if (i === maxIndex) { // TODO: compare temporal & spatial offsets if (x.offset > y.offset) return 1 if (x.offset < y.offset) return -1 } } } return 0 } const isTextNode = ({ nodeType }) => nodeType === 3 || nodeType === 4 const isElementNode = ({ nodeType }) => nodeType === 1 const getChildNodes = (node, filter) => { const nodes = Array.from(node.childNodes) // "content other than element and character data is ignored" .filter(node => isTextNode(node) || isElementNode(node)) return filter ? nodes.map(node => { const accept = filter(node) if (accept === NodeFilter.FILTER_REJECT) return null else if (accept === NodeFilter.FILTER_SKIP) return getChildNodes(node, filter) else return node }).flat().filter(x => x) : nodes } // child nodes are organized such that the result is always // [element, text, element, text, ..., element], // regardless of the actual structure in the document; // so multiple text nodes need to be combined, and nonexistent ones counted; // see "Step Reference to Child Element or Character Data (/)" in EPUB CFI spec const indexChildNodes = (node, filter) => { const nodes = getChildNodes(node, filter) .reduce((arr, node) => { let last = arr[arr.length - 1] if (!last) arr.push(node) // "there is one chunk between each pair of child elements" else if (isTextNode(node)) { if (Array.isArray(last)) last.push(node) else if (isTextNode(last)) arr[arr.length - 1] = [last, node] else arr.push(node) } else { if (isElementNode(last)) arr.push(null, node) else arr.push(node) } return arr }, []) // "the first chunk is located before the first child element" if (isElementNode(nodes[0])) nodes.unshift('first') // "the last chunk is located after the last child element" if (isElementNode(nodes[nodes.length - 1])) nodes.push('last') // "'virtual' elements" nodes.unshift('before') // "0 is a valid index" nodes.push('after') // "n+2 is a valid index" return nodes } const partsToNode = (node, parts, filter) => { const { id } = parts[parts.length - 1] if (id) { const el = node.ownerDocument.getElementById(id) if (el) return { node: el, offset: 0 } } for (const { index } of parts) { const newNode = node ? indexChildNodes(node, filter)[index] : null // handle non-existent nodes if (newNode === 'first') return { node: node.firstChild ?? node } if (newNode === 'last') return { node: node.lastChild ?? node } if (newNode === 'before') return { node, before: true } if (newNode === 'after') return { node, after: true } node = newNode } const { offset } = parts[parts.length - 1] if (!Array.isArray(node)) return { node, offset } // get underlying text node and offset from the chunk let sum = 0 for (const n of node) { const { length } = n.nodeValue if (sum + length >= offset) return { node: n, offset: offset - sum } sum += length } } const nodeToParts = (node, offset, filter) => { const { parentNode, id } = node const indexed = indexChildNodes(parentNode, filter) const index = indexed.findIndex(x => Array.isArray(x) ? x.some(x => x === node) : x === node) // adjust offset as if merging the text nodes in the chunk const chunk = indexed[index] if (Array.isArray(chunk)) { let sum = 0 for (const x of chunk) { if (x === node) { sum += offset break } else sum += x.nodeValue.length } offset = sum } const part = { id, index, offset } return (parentNode !== node.ownerDocument.documentElement ? nodeToParts(parentNode, null, filter).concat(part) : [part]) // remove ignored nodes .filter(x => x.index !== -1) } export const fromRange = (range, filter) => { const { startContainer, startOffset, endContainer, endOffset } = range const start = nodeToParts(startContainer, startOffset, filter) if (range.collapsed) return toString([start]) const end = nodeToParts(endContainer, endOffset, filter) return buildRange([start], [end]) } export const toRange = (doc, parts, filter) => { const startParts = collapse(parts) const endParts = collapse(parts, true) const root = doc.documentElement const start = partsToNode(root, startParts[0], filter) const end = partsToNode(root, endParts[0], filter) const range = doc.createRange() if (start.before) range.setStartBefore(start.node) else if (start.after) range.setStartAfter(start.node) else range.setStart(start.node, start.offset) if (end.before) range.setEndBefore(end.node) else if (end.after) range.setEndAfter(end.node) else range.setEnd(end.node, end.offset) return range } // faster way of getting CFIs for sorted elements in a single parent export const fromElements = elements => { const results = [] const { parentNode } = elements[0] const parts = nodeToParts(parentNode) for (const [index, node] of indexChildNodes(parentNode).entries()) { const el = elements[results.length] if (node === el) results.push(toString([parts.concat({ id: el.id, index })])) } return results } export const toElement = (doc, parts) => partsToNode(doc.documentElement, collapse(parts)).node // turn indices into standard CFIs when you don't have an actual package document export const fake = { fromIndex: index => wrap(`/6/${(index + 1) * 2}`), toIndex: parts => parts?.at(-1).index / 2 - 1, } // get CFI from Calibre bookmarks // see https://github.com/johnfactotum/foliate/issues/849 export const fromCalibrePos = pos => { const [parts] = parse(pos) const item = parts.shift() parts.shift() return toString([[{ index: 6 }, item], parts]) } export const fromCalibreHighlight = ({ spine_index, start_cfi, end_cfi }) => { const pre = fake.fromIndex(spine_index) + '!' return buildRange(pre + start_cfi.slice(2), pre + end_cfi.slice(2)) } ``` ## /assets/foliate-js/eslint.config.js ```js path="/assets/foliate-js/eslint.config.js" import js from '@eslint/js' import globals from 'globals' export default [js.configs.recommended, { ignores: ['vendor'] }, { languageOptions: { globals: globals.browser, }, linterOptions: { reportUnusedDisableDirectives: true, }, rules: { semi: ['error', 'never'], indent: ['warn', 4, { flatTernaryExpressions: true, SwitchCase: 1 }], quotes: ['warn', 'single', { avoidEscape: true }], 'comma-dangle': ['warn', 'always-multiline'], 'no-trailing-spaces': 'warn', 'no-unused-vars': 'warn', 'no-console': ['warn', { allow: ['debug', 'warn', 'error', 'assert'] }], 'no-constant-condition': ['error', { checkLoops: false }], 'no-empty': ['error', { allowEmptyCatch: true }], }, }] ``` ## /assets/foliate-js/fb2.js ```js path="/assets/foliate-js/fb2.js" const normalizeWhitespace = str => str ? str .replace(/[\t\n\f\r ]+/g, ' ') .replace(/^[\t\n\f\r ]+/, '') .replace(/[\t\n\f\r ]+$/, '') : '' const getElementText = el => normalizeWhitespace(el?.textContent) const NS = { XLINK: 'http://www.w3.org/1999/xlink', EPUB: 'http://www.idpf.org/2007/ops', } const MIME = { XML: 'application/xml', XHTML: 'application/xhtml+xml', } const STYLE = { 'strong': ['strong', 'self'], 'emphasis': ['em', 'self'], 'style': ['span', 'self'], 'a': 'anchor', 'strikethrough': ['s', 'self'], 'sub': ['sub', 'self'], 'sup': ['sup', 'self'], 'code': ['code', 'self'], 'image': 'image', } const TABLE = { 'tr': ['tr', ['align']], 'th': ['th', ['colspan', 'rowspan', 'align', 'valign']], 'td': ['td', ['colspan', 'rowspan', 'align', 'valign']], } const POEM = { 'epigraph': ['blockquote'], 'subtitle': ['h2', STYLE], 'text-author': ['p', STYLE], 'date': ['p', STYLE], 'stanza': 'stanza', } const SECTION = { 'title': ['header', { 'p': ['h1', STYLE], 'empty-line': ['br'], }], 'epigraph': ['blockquote', 'self'], 'image': 'image', 'annotation': ['aside'], 'section': ['section', 'self'], 'p': ['p', STYLE], 'poem': ['blockquote', POEM], 'subtitle': ['h2', STYLE], 'cite': ['blockquote', 'self'], 'empty-line': ['br'], 'table': ['table', TABLE], 'text-author': ['p', STYLE], } POEM['epigraph'].push(SECTION) const BODY = { 'image': 'image', 'title': ['section', { 'p': ['h1', STYLE], 'empty-line': ['br'], }], 'epigraph': ['section', SECTION], 'section': ['section', SECTION], } const getImageSrc = el => { const href = el.getAttributeNS(NS.XLINK, 'href') const [, id] = href.split('#') const bin = el.getRootNode().getElementById(id) return bin ? `data:${bin.getAttribute('content-type')};base64,${bin.textContent}` : href } class FB2Converter { constructor(fb2) { this.fb2 = fb2 this.doc = document.implementation.createDocument(NS.XHTML, 'html') } image(node) { const el = this.doc.createElement('img') el.alt = node.getAttribute('alt') el.title = node.getAttribute('title') el.setAttribute('src', getImageSrc(node)) return el } anchor(node) { const el = this.convert(node, { 'a': ['a', STYLE] }) el.setAttribute('href', node.getAttributeNS(NS.XLINK, 'href')) if (node.getAttribute('type') === 'note') el.setAttributeNS(NS.EPUB, 'epub:type', 'noteref') return el } stanza(node) { const el = this.convert(node, { 'stanza': ['p', { 'title': ['header', { 'p': ['strong', STYLE], 'empty-line': ['br'], }], 'subtitle': ['p', STYLE], }], }) for (const child of node.children) if (child.nodeName === 'v') { el.append(this.doc.createTextNode(child.textContent)) el.append(this.doc.createElement('br')) } return el } convert(node, def) { // not an element; return text content if (node.nodeType === 3) return this.doc.createTextNode(node.textContent) if (node.nodeType === 4) return this.doc.createCDATASection(node.textContent) if (node.nodeType === 8) return this.doc.createComment(node.textContent) const d = def?.[node.nodeName] if (!d) return null if (typeof d === 'string') return this[d](node) const [name, opts] = d const el = this.doc.createElement(name) // copy the ID, and set class name from original element name if (node.id) el.id = node.id el.classList.add(node.nodeName) // copy attributes if (Array.isArray(opts)) for (const attr of opts) el.setAttribute(attr, node.getAttribute(attr)) // process child elements recursively const childDef = opts === 'self' ? def : Array.isArray(opts) ? null : opts let child = node.firstChild while (child) { const childEl = this.convert(child, childDef) if (childEl) el.append(childEl) child = child.nextSibling } return el } } const parseXML = async blob => { const buffer = await blob.arrayBuffer() const str = new TextDecoder('utf-8').decode(buffer) const parser = new DOMParser() const doc = parser.parseFromString(str, MIME.XML) const encoding = doc.xmlEncoding // `Document.xmlEncoding` is deprecated, and already removed in Firefox // so parse the XML declaration manually || str.match(/^<\?xml\s+version\s*=\s*["']1.\d+"\s+encoding\s*=\s*["']([A-Za-z0-9._-]*)["']/)?.[1] if (encoding && encoding.toLowerCase() !== 'utf-8') { const str = new TextDecoder(encoding).decode(buffer) return parser.parseFromString(str, MIME.XML) } return doc } const style = URL.createObjectURL(new Blob([` @namespace epub "http://www.idpf.org/2007/ops"; body > img, section > img { display: block; margin: auto; } .title h1 { text-align: center; } body > section > .title, body.notesBodyType > .title { margin: 3em 0; } body.notesBodyType > section .title h1 { text-align: start; } body.notesBodyType > section .title { margin: 1em 0; } p { text-indent: 1em; margin: 0; } :not(p) + p, p:first-child { text-indent: 0; } .poem p { text-indent: 0; margin: 1em 0; } .text-author, .date { text-align: end; } .text-author:before { content: "—"; } table { border-collapse: collapse; } td, th { padding: .25em; } a[epub|type~="noteref"] { font-size: .75em; vertical-align: super; } body:not(.notesBodyType) > .title, body:not(.notesBodyType) > .epigraph { margin: 3em 0; } `], { type: 'text/css' })) const template = html => ` ${html} ` // name of custom ID attribute for TOC items const dataID = 'data-foliate-id' export const makeFB2 = async blob => { const book = {} const doc = await parseXML(blob) const converter = new FB2Converter(doc) const $ = x => doc.querySelector(x) const $$ = x => [...doc.querySelectorAll(x)] const getPerson = el => { const nick = getElementText(el.querySelector('nickname')) if (nick) return nick const first = getElementText(el.querySelector('first-name')) const middle = getElementText(el.querySelector('middle-name')) const last = getElementText(el.querySelector('last-name')) const name = [first, middle, last].filter(x => x).join(' ') const sortAs = last ? [last, [first, middle].filter(x => x).join(' ')].join(', ') : null return { name, sortAs } } const getDate = el => el?.getAttribute('value') ?? getElementText(el) const annotation = $('title-info annotation') book.metadata = { title: getElementText($('title-info book-title')), identifier: getElementText($('document-info id')), language: getElementText($('title-info lang')), author: $$('title-info author').map(getPerson), translator: $$('title-info translator').map(getPerson), producer: $$('document-info author').map(getPerson) .concat($$('document-info program-used').map(getElementText)), publisher: getElementText($('publish-info publisher')), published: getDate($('title-info date')), modified: getDate($('document-info date')), description: annotation ? converter.convert(annotation, { annotation: ['div', SECTION] }).innerHTML : null, subject: $$('title-info genre').map(getElementText), } if ($('coverpage image')) { const src = getImageSrc($('coverpage image')) book.getCover = () => fetch(src).then(res => res.blob()) } else book.getCover = () => null // get convert each body const bodyData = Array.from(doc.querySelectorAll('body'), body => { const converted = converter.convert(body, { body: ['body', BODY] }) return [Array.from(converted.children, el => { // get list of IDs in the section const ids = [el, ...el.querySelectorAll('[id]')].map(el => el.id) return { el, ids } }), converted] }) const urls = [] const sectionData = bodyData[0][0] // make a separate section for each section in the first body .map(({ el, ids }) => { // set up titles for TOC const titles = Array.from( el.querySelectorAll(':scope > section > .title'), (el, index) => { el.setAttribute(dataID, index) return { title: getElementText(el), index } }) return { ids, titles, el } }) // for additional bodies, only make one section for each body .concat(bodyData.slice(1).map(([sections, body]) => { const ids = sections.map(s => s.ids).flat() body.classList.add('notesBodyType') return { ids, el: body, linear: 'no' } })) .map(({ ids, titles, el, linear }) => { const str = template(el.outerHTML) const blob = new Blob([str], { type: MIME.XHTML }) const url = URL.createObjectURL(blob) urls.push(url) const title = normalizeWhitespace( el.querySelector('.title, .subtitle, p')?.textContent ?? (el.classList.contains('title') ? el.textContent : '')) return { ids, title, titles, load: () => url, createDocument: () => new DOMParser().parseFromString(str, MIME.XHTML), // doo't count image data as it'd skew the size too much size: blob.size - Array.from(el.querySelectorAll('[src]'), el => el.getAttribute('src')?.length ?? 0) .reduce((a, b) => a + b, 0), linear, } }) const idMap = new Map() book.sections = sectionData.map((section, index) => { const { ids, load, createDocument, size, linear } = section for (const id of ids) if (id) idMap.set(id, index) return { id: index, load, createDocument, size, linear } }) book.toc = sectionData.map(({ title, titles }, index) => { const id = index.toString() return { label: title, href: id, subitems: titles?.length ? titles.map(({ title, index }) => ({ label: title, href: `${id}#${index}`, })) : null, } }).filter(item => item) book.resolveHref = href => { const [a, b] = href.split('#') return a // the link is from the TOC ? { index: Number(a), anchor: doc => doc.querySelector(`[${dataID}="${b}"]`) } // link from within the page : { index: idMap.get(b), anchor: doc => doc.getElementById(b) } } book.splitTOCHref = href => href?.split('#')?.map(x => Number(x)) ?? [] book.getTOCFragment = (doc, id) => doc.querySelector(`[${dataID}="${id}"]`) book.destroy = () => { for (const url of urls) URL.revokeObjectURL(url) } return book } ``` ## /assets/foliate-js/fixed-layout.js ```js path="/assets/foliate-js/fixed-layout.js" const parseViewport = str => str ?.split(/[,;\s]/) // NOTE: technically, only the comma is valid ?.filter(x => x) ?.map(x => x.split('=').map(x => x.trim())) const getViewport = (doc, viewport) => { // use `viewBox` for SVG if (doc.documentElement.localName === 'svg') { const [, , width, height] = doc.documentElement .getAttribute('viewBox')?.split(/\s/) ?? [] return { width, height } } // get `viewport` `meta` element const meta = parseViewport(doc.querySelector('meta[name="viewport"]') ?.getAttribute('content')) if (meta) return Object.fromEntries(meta) // fallback to book's viewport if (typeof viewport === 'string') return parseViewport(viewport) if (viewport) return viewport // if no viewport (possibly with image directly in spine), get image size const img = doc.querySelector('img') if (img) return { width: img.naturalWidth, height: img.naturalHeight } // just show *something*, i guess... console.warn(new Error('Missing viewport properties')) return { width: 1000, height: 2000 } } export class FixedLayout extends HTMLElement { #root = this.attachShadow({ mode: 'closed' }) #observer = new ResizeObserver(() => this.#render()) #spreads #index = -1 defaultViewport spread #portrait = false #left #right #center #side constructor() { super() const sheet = new CSSStyleSheet() this.#root.adoptedStyleSheets = [sheet] sheet.replaceSync(`:host { width: 100%; height: 100%; display: flex; justify-content: center; align-items: center; }`) this.#observer.observe(this) } async #createFrame(position, { index, src }) { const element = document.createElement('div') const iframe = document.createElement('iframe') element.append(iframe) Object.assign(iframe.style, { border: '0', display: 'none', overflow: 'hidden', }) // `allow-scripts` is needed for events because of WebKit bug // https://bugs.webkit.org/show_bug.cgi?id=218086 iframe.setAttribute('sandbox', 'allow-same-origin allow-scripts') iframe.setAttribute('scrolling', 'no') iframe.setAttribute('part', 'filter') this.#root.append(element) if (!src) return { blank: true, element, iframe } return new Promise(resolve => { const onload = () => { iframe.removeEventListener('load', onload) const doc = iframe.contentDocument doc.position = position this.dispatchEvent(new CustomEvent('load', { detail: { doc, index } })) const { width, height } = getViewport(doc, this.defaultViewport) resolve({ element, iframe, width: parseFloat(width), height: parseFloat(height), }) } iframe.addEventListener('load', onload) iframe.src = src }) } #render(side = this.#side) { if (!side) return const left = this.#left ?? {} const right = this.#center ?? this.#right const target = side === 'left' ? left : right const { width, height } = this.getBoundingClientRect() const portrait = this.spread !== 'both' && this.spread !== 'portrait' && height > width this.#portrait = portrait const blankWidth = left.width ?? right.width const blankHeight = left.height ?? right.height const scale = portrait || this.#center ? Math.min( width / (target.width ?? blankWidth), height / (target.height ?? blankHeight)) : Math.min( width / ((left.width ?? blankWidth) + (right.width ?? blankWidth)), height / Math.max( left.height ?? blankHeight, right.height ?? blankHeight)) const transform = frame => { const { element, iframe, width, height, blank } = frame iframe.contentDocument.scale = scale Object.assign(iframe.style, { width: `${width}px`, height: `${height}px`, transform: `scale(${scale})`, transformOrigin: 'top left', display: blank ? 'none' : 'block', }) Object.assign(element.style, { width: `${(width ?? blankWidth) * scale}px`, height: `${(height ?? blankHeight) * scale}px`, overflow: 'hidden', display: 'block', }) if (portrait && frame !== target) { element.style.display = 'none' } } if (this.#center) { transform(this.#center) } else { transform(left) transform(right) } } async #showSpread({ left, right, center, side }) { this.#root.replaceChildren() this.#left = null this.#right = null this.#center = null if (center) { this.#center = await this.#createFrame('center', center) this.#side = 'center' this.#render() } else { this.#left = await this.#createFrame('left', left) this.#right = await this.#createFrame('right', right) this.#side = this.#left.blank ? 'right' : this.#right.blank ? 'left' : side this.#render() } } #goLeft() { if (this.#center || this.#left?.blank) return if (this.#portrait && this.#left?.element?.style?.display === 'none') { this.#right.element.style.display = 'none' this.#left.element.style.display = 'block' this.#side = 'left' return true } } #goRight() { if (this.#center || this.#right?.blank) return if (this.#portrait && this.#right?.element?.style?.display === 'none') { this.#left.element.style.display = 'none' this.#right.element.style.display = 'block' this.#side = 'right' return true } } open(book) { this.book = book const { rendition } = book this.spread = rendition?.spread this.defaultViewport = rendition?.viewport const rtl = book.dir === 'rtl' const ltr = !rtl this.rtl = rtl if (rendition?.spread === 'none') this.#spreads = book.sections.map(section => ({ center: section })) else this.#spreads = book.sections.reduce((arr, section) => { const last = arr[arr.length - 1] const { linear, pageSpread } = section if (linear === 'no') return arr const newSpread = () => { const spread = {} arr.push(spread) return spread } if (pageSpread === 'center') { const spread = last.left || last.right ? newSpread() : last spread.center = section } else if (pageSpread === 'left') { const spread = last.center || last.left || ltr ? newSpread() : last spread.left = section } else if (pageSpread === 'right') { const spread = last.center || last.right || rtl ? newSpread() : last spread.right = section } else if (ltr) { if (last.center || last.right) newSpread().left = section else if (last.left) last.right = section else last.left = section } else { if (last.center || last.left) newSpread().right = section else if (last.right) last.left = section else last .right = section } return arr }, [{}]) } get index() { const spread = this.#spreads[this.#index] const section = spread?.center ?? (this.side === 'left' ? spread.left ?? spread.right : spread.right ?? spread.left) return this.book.sections.indexOf(section) } #reportLocation(reason) { this.dispatchEvent(new CustomEvent('relocate', { detail: { reason, range: null, index: this.index, fraction: 0, size: 1 } })) } getSpreadOf(section) { const spreads = this.#spreads for (let index = 0; index < spreads.length; index++) { const { left, right, center } = spreads[index] if (left === section) return { index, side: 'left' } if (right === section) return { index, side: 'right' } if (center === section) return { index, side: 'center' } } } async goToSpread(index, side, reason) { if (index < 0 || index > this.#spreads.length - 1) return if (index === this.#index) { this.#render(side) return } this.#index = index const spread = this.#spreads[index] if (spread.center) { const index = this.book.sections.indexOf(spread.center) const src = await spread.center?.load?.() await this.#showSpread({ center: { index, src } }) } else { const indexL = this.book.sections.indexOf(spread.left) const indexR = this.book.sections.indexOf(spread.right) const srcL = await spread.left?.load?.() const srcR = await spread.right?.load?.() const left = { index: indexL, src: srcL } const right = { index: indexR, src: srcR } await this.#showSpread({ left, right, side }) } this.#reportLocation(reason) } async select(target) { await this.goTo(target) // TODO } async goTo(target) { const { book } = this const resolved = await target const section = book.sections[resolved.index] if (!section) return const { index, side } = this.getSpreadOf(section) await this.goToSpread(index, side) } async next() { const s = this.rtl ? this.#goLeft() : this.#goRight() if (s) this.#reportLocation('page') else return this.goToSpread(this.#index + 1, this.rtl ? 'right' : 'left', 'page') } async prev() { const s = this.rtl ? this.#goRight() : this.#goLeft() if (s) this.#reportLocation('page') else return this.goToSpread(this.#index - 1, this.rtl ? 'left' : 'right', 'page') } getContents() { return Array.from(this.#root.querySelectorAll('iframe'), frame => ({ doc: frame.contentDocument, // TODO: index, overlayer })) } destroy() { this.#observer.unobserve(this) } } customElements.define('foliate-fxl', FixedLayout) ``` ## /assets/foliate-js/footnotes.js ```js path="/assets/foliate-js/footnotes.js" const getTypes = el => new Set(el?.getAttributeNS?.('http://www.idpf.org/2007/ops', 'type')?.split(' ')) const getRoles = el => new Set(el?.getAttribute?.('role')?.split(' ')) const isSuper = el => { const { verticalAlign } = getComputedStyle(el) return verticalAlign === 'super' || /^\d/.test(verticalAlign) } const refTypes = ['biblioref', 'glossref', 'noteref'] const refRoles = ['doc-biblioref', 'doc-glossref', 'doc-noteref'] const isFootnoteReference = a => { const types = getTypes(a) const roles = getRoles(a) return { yes: refRoles.some(r => roles.has(r)) || refTypes.some(t => types.has(t)), maybe: () => !types.has('backlink') && !roles.has('doc-backlink') && (isSuper(a) || a.children.length === 1 && isSuper(a.children[0]) || isSuper(a.parentElement)), } } const getReferencedType = el => { const types = getTypes(el) const roles = getRoles(el) return roles.has('doc-biblioentry') || types.has('biblioentry') ? 'biblioentry' : roles.has('definition') || types.has('glossdef') ? 'definition' : roles.has('doc-endnote') || types.has('endnote') || types.has('rearnote') ? 'endnote' : roles.has('doc-footnote') || types.has('footnote') ? 'footnote' : roles.has('note') || types.has('note') ? 'note' : null } const isInline = 'a, span, sup, sub, em, strong, i, b, small, big' const extractFootnote = (doc, anchor) => { let el = anchor(doc) const target = el while (el.matches(isInline)) { const parent = el.parentElement if (!parent) break el = parent } if (el === doc.body) { const sibling = target.nextElementSibling if (sibling && !sibling.matches(isInline)) return sibling throw new Error('Failed to extract footnote') } return el } export class FootnoteHandler extends EventTarget { detectFootnotes = true #showFragment(book, { index, anchor }, href) { const view = document.createElement('foliate-view') return new Promise((resolve, reject) => { view.addEventListener('load', e => { try { const { doc } = e.detail const el = anchor(doc) const type = getReferencedType(el) const hidden = el?.matches?.('aside') && type === 'footnote' if (el) { const range = el.startContainer ? el : doc.createRange() if (!el.startContainer) { if (el.matches('li, aside')) range.selectNodeContents(el) else range.selectNode(el) } const frag = range.extractContents() doc.body.replaceChildren() doc.body.appendChild(frag) } const detail = { view, href, type, hidden, target: el } this.dispatchEvent(new CustomEvent('render', { detail })) resolve() } catch (e) { reject(e) } }) view.open(book) .then(() => this.dispatchEvent(new CustomEvent('before-render', { detail: { view } }))) .then(() => view.goTo(index)) .catch(reject) }) } handle(book, e) { const { a, href } = e.detail const { yes, maybe } = isFootnoteReference(a) if (yes) { e.preventDefault() return Promise.resolve(book.resolveHref(href)).then(target => this.#showFragment(book, target, href)) } else if (this.detectFootnotes && maybe()) { e.preventDefault() return Promise.resolve(book.resolveHref(href)).then(({ index, anchor }) => { const target = { index, anchor: doc => extractFootnote(doc, anchor) } return this.#showFragment(book, target, href) }) } } } ``` ## /assets/foliate-js/index.html ```html path="/assets/foliate-js/index.html" Anx Reader
``` ## /assets/foliate-js/mobi.js ```js path="/assets/foliate-js/mobi.js" const unescapeHTML = str => { if (!str) return '' const textarea = document.createElement('textarea') textarea.innerHTML = str return textarea.value } const MIME = { XML: 'application/xml', XHTML: 'application/xhtml+xml', HTML: 'text/html', CSS: 'text/css', SVG: 'image/svg+xml', } const PDB_HEADER = { name: [0, 32, 'string'], type: [60, 4, 'string'], creator: [64, 4, 'string'], numRecords: [76, 2, 'uint'], } const PALMDOC_HEADER = { compression: [0, 2, 'uint'], numTextRecords: [8, 2, 'uint'], recordSize: [10, 2, 'uint'], encryption: [12, 2, 'uint'], } const MOBI_HEADER = { magic: [16, 4, 'string'], length: [20, 4, 'uint'], type: [24, 4, 'uint'], encoding: [28, 4, 'uint'], uid: [32, 4, 'uint'], version: [36, 4, 'uint'], titleOffset: [84, 4, 'uint'], titleLength: [88, 4, 'uint'], localeRegion: [94, 1, 'uint'], localeLanguage: [95, 1, 'uint'], resourceStart: [108, 4, 'uint'], huffcdic: [112, 4, 'uint'], numHuffcdic: [116, 4, 'uint'], exthFlag: [128, 4, 'uint'], trailingFlags: [240, 4, 'uint'], indx: [244, 4, 'uint'], } const KF8_HEADER = { resourceStart: [108, 4, 'uint'], fdst: [192, 4, 'uint'], numFdst: [196, 4, 'uint'], frag: [248, 4, 'uint'], skel: [252, 4, 'uint'], guide: [260, 4, 'uint'], } const EXTH_HEADER = { magic: [0, 4, 'string'], length: [4, 4, 'uint'], count: [8, 4, 'uint'], } const INDX_HEADER = { magic: [0, 4, 'string'], length: [4, 4, 'uint'], type: [8, 4, 'uint'], idxt: [20, 4, 'uint'], numRecords: [24, 4, 'uint'], encoding: [28, 4, 'uint'], language: [32, 4, 'uint'], total: [36, 4, 'uint'], ordt: [40, 4, 'uint'], ligt: [44, 4, 'uint'], numLigt: [48, 4, 'uint'], numCncx: [52, 4, 'uint'], } const TAGX_HEADER = { magic: [0, 4, 'string'], length: [4, 4, 'uint'], numControlBytes: [8, 4, 'uint'], } const HUFF_HEADER = { magic: [0, 4, 'string'], offset1: [8, 4, 'uint'], offset2: [12, 4, 'uint'], } const CDIC_HEADER = { magic: [0, 4, 'string'], length: [4, 4, 'uint'], numEntries: [8, 4, 'uint'], codeLength: [12, 4, 'uint'], } const FDST_HEADER = { magic: [0, 4, 'string'], numEntries: [8, 4, 'uint'], } const FONT_HEADER = { flags: [8, 4, 'uint'], dataStart: [12, 4, 'uint'], keyLength: [16, 4, 'uint'], keyStart: [20, 4, 'uint'], } const MOBI_ENCODING = { 1252: 'windows-1252', 65001: 'utf-8', } const EXTH_RECORD_TYPE = { 100: ['creator', 'string', true], 101: ['publisher'], 103: ['description'], 104: ['isbn'], 105: ['subject', 'string', true], 106: ['date'], 108: ['contributor', 'string', true], 109: ['rights'], 110: ['subjectCode', 'string', true], 112: ['source', 'string', true], 113: ['asin'], 121: ['boundary', 'uint'], 122: ['fixedLayout'], 125: ['numResources', 'uint'], 126: ['originalResolution'], 127: ['zeroGutter'], 128: ['zeroMargin'], 129: ['coverURI'], 132: ['regionMagnification'], 201: ['coverOffset', 'uint'], 202: ['thumbnailOffset', 'uint'], 503: ['title'], 524: ['language', 'string', true], 527: ['pageProgressionDirection'], } const MOBI_LANG = { 1: ['ar', 'ar-SA', 'ar-IQ', 'ar-EG', 'ar-LY', 'ar-DZ', 'ar-MA', 'ar-TN', 'ar-OM', 'ar-YE', 'ar-SY', 'ar-JO', 'ar-LB', 'ar-KW', 'ar-AE', 'ar-BH', 'ar-QA'], 2: ['bg'], 3: ['ca'], 4: ['zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-SG'], 5: ['cs'], 6: ['da'], 7: ['de', 'de-DE', 'de-CH', 'de-AT', 'de-LU', 'de-LI'], 8: ['el'], 9: ['en', 'en-US', 'en-GB', 'en-AU', 'en-CA', 'en-NZ', 'en-IE', 'en-ZA', 'en-JM', null, 'en-BZ', 'en-TT', 'en-ZW', 'en-PH'], 10: ['es', 'es-ES', 'es-MX', null, 'es-GT', 'es-CR', 'es-PA', 'es-DO', 'es-VE', 'es-CO', 'es-PE', 'es-AR', 'es-EC', 'es-CL', 'es-UY', 'es-PY', 'es-BO', 'es-SV', 'es-HN', 'es-NI', 'es-PR'], 11: ['fi'], 12: ['fr', 'fr-FR', 'fr-BE', 'fr-CA', 'fr-CH', 'fr-LU', 'fr-MC'], 13: ['he'], 14: ['hu'], 15: ['is'], 16: ['it', 'it-IT', 'it-CH'], 17: ['ja'], 18: ['ko'], 19: ['nl', 'nl-NL', 'nl-BE'], 20: ['no', 'nb', 'nn'], 21: ['pl'], 22: ['pt', 'pt-BR', 'pt-PT'], 23: ['rm'], 24: ['ro'], 25: ['ru'], 26: ['hr', null, 'sr'], 27: ['sk'], 28: ['sq'], 29: ['sv', 'sv-SE', 'sv-FI'], 30: ['th'], 31: ['tr'], 32: ['ur'], 33: ['id'], 34: ['uk'], 35: ['be'], 36: ['sl'], 37: ['et'], 38: ['lv'], 39: ['lt'], 41: ['fa'], 42: ['vi'], 43: ['hy'], 44: ['az'], 45: ['eu'], 46: ['hsb'], 47: ['mk'], 48: ['st'], 49: ['ts'], 50: ['tn'], 52: ['xh'], 53: ['zu'], 54: ['af'], 55: ['ka'], 56: ['fo'], 57: ['hi'], 58: ['mt'], 59: ['se'], 62: ['ms'], 63: ['kk'], 65: ['sw'], 67: ['uz', null, 'uz-UZ'], 68: ['tt'], 69: ['bn'], 70: ['pa'], 71: ['gu'], 72: ['or'], 73: ['ta'], 74: ['te'], 75: ['kn'], 76: ['ml'], 77: ['as'], 78: ['mr'], 79: ['sa'], 82: ['cy', 'cy-GB'], 83: ['gl', 'gl-ES'], 87: ['kok'], 97: ['ne'], 98: ['fy'], } const concatTypedArray = (a, b) => { const result = new a.constructor(a.length + b.length) result.set(a) result.set(b, a.length) return result } const concatTypedArray3 = (a, b, c) => { const result = new a.constructor(a.length + b.length + c.length) result.set(a) result.set(b, a.length) result.set(c, a.length + b.length) return result } const decoder = new TextDecoder() const getString = buffer => decoder.decode(buffer) const getUint = buffer => { if (!buffer) return const l = buffer.byteLength const func = l === 4 ? 'getUint32' : l === 2 ? 'getUint16' : 'getUint8' return new DataView(buffer)[func](0) } const getStruct = (def, buffer) => Object.fromEntries(Array.from(Object.entries(def)) .map(([key, [start, len, type]]) => [key, (type === 'string' ? getString : getUint)(buffer.slice(start, start + len))])) const getDecoder = x => new TextDecoder(MOBI_ENCODING[x]) const getVarLen = (byteArray, i = 0) => { let value = 0, length = 0 for (const byte of byteArray.subarray(i, i + 4)) { value = (value << 7) | (byte & 0b111_1111) >>> 0 length++ if (byte & 0b1000_0000) break } return { value, length } } // variable-length quantity, but read from the end of data const getVarLenFromEnd = byteArray => { let value = 0 for (const byte of byteArray.subarray(-4)) { // `byte & 0b1000_0000` indicates the start of value if (byte & 0b1000_0000) value = 0 value = (value << 7) | (byte & 0b111_1111) } return value } const countBitsSet = x => { let count = 0 for (; x > 0; x = x >> 1) if ((x & 1) === 1) count++ return count } const countUnsetEnd = x => { let count = 0 while ((x & 1) === 0) x = x >> 1, count++ return count } const decompressPalmDOC = array => { let output = [] for (let i = 0; i < array.length; i++) { const byte = array[i] if (byte === 0) output.push(0) // uncompressed literal, just copy it else if (byte <= 8) // copy next 1-8 bytes for (const x of array.subarray(i + 1, (i += byte) + 1)) output.push(x) else if (byte <= 0b0111_1111) output.push(byte) // uncompressed literal else if (byte <= 0b1011_1111) { // 1st and 2nd bits are 10, meaning this is a length-distance pair // read next byte and combine it with current byte const bytes = (byte << 8) | array[i++ + 1] // the 3rd to 13th bits encode distance const distance = (bytes & 0b0011_1111_1111_1111) >>> 3 // the last 3 bits, plus 3, is the length to copy const length = (bytes & 0b111) + 3 for (let j = 0; j < length; j++) output.push(output[output.length - distance]) } // compressed from space plus char else output.push(32, byte ^ 0b1000_0000) } return Uint8Array.from(output) } const read32Bits = (byteArray, from) => { const startByte = from >> 3 const end = from + 32 const endByte = end >> 3 let bits = 0n for (let i = startByte; i <= endByte; i++) bits = bits << 8n | BigInt(byteArray[i] ?? 0) return (bits >> (8n - BigInt(end & 7))) & 0xffffffffn } const huffcdic = async (mobi, loadRecord) => { const huffRecord = await loadRecord(mobi.huffcdic) const { magic, offset1, offset2 } = getStruct(HUFF_HEADER, huffRecord) if (magic !== 'HUFF') throw new Error('Invalid HUFF record') // table1 is indexed by byte value const table1 = Array.from({ length: 256 }, (_, i) => offset1 + i * 4) .map(offset => getUint(huffRecord.slice(offset, offset + 4))) .map(x => [x & 0b1000_0000, x & 0b1_1111, x >>> 8]) // table2 is indexed by code length const table2 = [null].concat(Array.from({ length: 32 }, (_, i) => offset2 + i * 8) .map(offset => [ getUint(huffRecord.slice(offset, offset + 4)), getUint(huffRecord.slice(offset + 4, offset + 8))])) const dictionary = [] for (let i = 1; i < mobi.numHuffcdic; i++) { const record = await loadRecord(mobi.huffcdic + i) const cdic = getStruct(CDIC_HEADER, record) if (cdic.magic !== 'CDIC') throw new Error('Invalid CDIC record') // `numEntries` is the total number of dictionary data across CDIC records // so `n` here is the number of entries in *this* record const n = Math.min(1 << cdic.codeLength, cdic.numEntries - dictionary.length) const buffer = record.slice(cdic.length) for (let i = 0; i < n; i++) { const offset = getUint(buffer.slice(i * 2, i * 2 + 2)) const x = getUint(buffer.slice(offset, offset + 2)) const length = x & 0x7fff const decompressed = x & 0x8000 const value = new Uint8Array( buffer.slice(offset + 2, offset + 2 + length)) dictionary.push([value, decompressed]) } } const decompress = byteArray => { let output = new Uint8Array() const bitLength = byteArray.byteLength * 8 for (let i = 0; i < bitLength;) { const bits = Number(read32Bits(byteArray, i)) let [found, codeLength, value] = table1[bits >>> 24] if (!found) { while (bits >>> (32 - codeLength) < table2[codeLength][0]) codeLength += 1 value = table2[codeLength][1] } if ((i += codeLength) > bitLength) break const code = value - (bits >>> (32 - codeLength)) let [result, decompressed] = dictionary[code] if (!decompressed) { // the result is itself compressed result = decompress(result) // cache the result for next time dictionary[code] = [result, true] } output = concatTypedArray(output, result) } return output } return decompress } const getIndexData = async (indxIndex, loadRecord) => { const indxRecord = await loadRecord(indxIndex) const indx = getStruct(INDX_HEADER, indxRecord) if (indx.magic !== 'INDX') throw new Error('Invalid INDX record') const decoder = getDecoder(indx.encoding) const tagxBuffer = indxRecord.slice(indx.length) const tagx = getStruct(TAGX_HEADER, tagxBuffer) if (tagx.magic !== 'TAGX') throw new Error('Invalid TAGX section') const numTags = (tagx.length - 12) / 4 const tagTable = Array.from({ length: numTags }, (_, i) => new Uint8Array(tagxBuffer.slice(12 + i * 4, 12 + i * 4 + 4))) const cncx = {} let cncxRecordOffset = 0 for (let i = 0; i < indx.numCncx; i++) { const record = await loadRecord(indxIndex + indx.numRecords + i + 1) const array = new Uint8Array(record) for (let pos = 0; pos < array.byteLength;) { const index = pos const { value, length } = getVarLen(array, pos) pos += length const result = record.slice(pos, pos + value) pos += value cncx[cncxRecordOffset + index] = decoder.decode(result) } cncxRecordOffset += 0x10000 } const table = [] for (let i = 0; i < indx.numRecords; i++) { const record = await loadRecord(indxIndex + 1 + i) const array = new Uint8Array(record) const indx = getStruct(INDX_HEADER, record) if (indx.magic !== 'INDX') throw new Error('Invalid INDX record') for (let j = 0; j < indx.numRecords; j++) { const offsetOffset = indx.idxt + 4 + 2 * j const offset = getUint(record.slice(offsetOffset, offsetOffset + 2)) const length = getUint(record.slice(offset, offset + 1)) const name = getString(record.slice(offset + 1, offset + 1 + length)) const tags = [] const startPos = offset + 1 + length let controlByteIndex = 0 let pos = startPos + tagx.numControlBytes for (const [tag, numValues, mask, end] of tagTable) { if (end & 1) { controlByteIndex++ continue } const offset = startPos + controlByteIndex const value = getUint(record.slice(offset, offset + 1)) & mask if (value === mask) { if (countBitsSet(mask) > 1) { const { value, length } = getVarLen(array, pos) tags.push([tag, null, value, numValues]) pos += length } else tags.push([tag, 1, null, numValues]) } else tags.push([tag, value >> countUnsetEnd(mask), null, numValues]) } const tagMap = {} for (const [tag, valueCount, valueBytes, numValues] of tags) { const values = [] if (valueCount != null) { for (let i = 0; i < valueCount * numValues; i++) { const { value, length } = getVarLen(array, pos) values.push(value) pos += length } } else { let count = 0 while (count < valueBytes) { const { value, length } = getVarLen(array, pos) values.push(value) pos += length count += length } } tagMap[tag] = values } table.push({ name, tagMap }) } } return { table, cncx } } const getNCX = async (indxIndex, loadRecord) => { const { table, cncx } = await getIndexData(indxIndex, loadRecord) const items = table.map(({ tagMap }, index) => ({ index, offset: tagMap[1]?.[0], size: tagMap[2]?.[0], label: cncx[tagMap[3]] ?? '', headingLevel: tagMap[4]?.[0], pos: tagMap[6], parent: tagMap[21]?.[0], firstChild: tagMap[22]?.[0], lastChild: tagMap[23]?.[0], })) const getChildren = item => { if (item.firstChild == null) return item item.children = items.filter(x => x.parent === item.index).map(getChildren) return item } return items.filter(item => item.headingLevel === 0).map(getChildren) } const getEXTH = (buf, encoding) => { const { magic, count } = getStruct(EXTH_HEADER, buf) if (magic !== 'EXTH') throw new Error('Invalid EXTH header') const decoder = getDecoder(encoding) const results = {} let offset = 12 for (let i = 0; i < count; i++) { const type = getUint(buf.slice(offset, offset + 4)) const length = getUint(buf.slice(offset + 4, offset + 8)) if (type in EXTH_RECORD_TYPE) { const [name, typ, many] = EXTH_RECORD_TYPE[type] const data = buf.slice(offset + 8, offset + length) const value = typ === 'uint' ? getUint(data) : decoder.decode(data) if (many) { results[name] ??= [] results[name].push(value) } else results[name] = value } offset += length } return results } const getFont = async (buf, unzlib) => { const { flags, dataStart, keyLength, keyStart } = getStruct(FONT_HEADER, buf) const array = new Uint8Array(buf.slice(dataStart)) // deobfuscate font if (flags & 0b10) { const bytes = keyLength === 16 ? 1024 : 1040 const key = new Uint8Array(buf.slice(keyStart, keyStart + keyLength)) const length = Math.min(bytes, array.length) for (var i = 0; i < length; i++) array[i] = array[i] ^ key[i % key.length] } // decompress font if (flags & 1) try { return await unzlib(array) } catch (e) { console.warn(e) console.warn('Failed to decompress font') } return array } export const isMOBI = async file => { const magic = getString(await file.slice(60, 68).arrayBuffer()) return magic === 'BOOKMOBI'// || magic === 'TEXtREAd' } class PDB { #file #offsets pdb async open(file) { this.#file = file const pdb = getStruct(PDB_HEADER, await file.slice(0, 78).arrayBuffer()) this.pdb = pdb const buffer = await file.slice(78, 78 + pdb.numRecords * 8).arrayBuffer() // get start and end offsets for each record this.#offsets = Array.from({ length: pdb.numRecords }, (_, i) => getUint(buffer.slice(i * 8, i * 8 + 4))) .map((x, i, a) => [x, a[i + 1]]) } loadRecord(index) { const offsets = this.#offsets[index] if (!offsets) throw new RangeError('Record index out of bounds') return this.#file.slice(...offsets).arrayBuffer() } async loadMagic(index) { const start = this.#offsets[index][0] return getString(await this.#file.slice(start, start + 4).arrayBuffer()) } } export class MOBI extends PDB { #start = 0 #resourceStart #decoder #encoder #decompress #removeTrailingEntries constructor({ unzlib }) { super() this.unzlib = unzlib } async open(file) { await super.open(file) // TODO: if (this.pdb.type === 'TEXt') this.headers = this.#getHeaders(await super.loadRecord(0)) this.#resourceStart = this.headers.mobi.resourceStart let isKF8 = this.headers.mobi.version >= 8 if (!isKF8) { const boundary = this.headers.exth?.boundary if (boundary < 0xffffffff) try { // it's a "combo" MOBI/KF8 file; try to open the KF8 part this.headers = this.#getHeaders(await super.loadRecord(boundary)) this.#start = boundary isKF8 = true } catch (e) { console.warn(e) console.warn('Failed to open KF8; falling back to MOBI') } } await this.#setup() return isKF8 ? new KF8(this).init() : new MOBI6(this).init() } #getHeaders(buf) { const palmdoc = getStruct(PALMDOC_HEADER, buf) const mobi = getStruct(MOBI_HEADER, buf) if (mobi.magic !== 'MOBI') throw new Error('Missing MOBI header') const { titleOffset, titleLength, localeLanguage, localeRegion } = mobi mobi.title = buf.slice(titleOffset, titleOffset + titleLength) const lang = MOBI_LANG[localeLanguage] mobi.language = lang?.[localeRegion >> 2] ?? lang?.[0] const exth = mobi.exthFlag & 0b100_0000 ? getEXTH(buf.slice(mobi.length + 16), mobi.encoding) : null const kf8 = mobi.version >= 8 ? getStruct(KF8_HEADER, buf) : null return { palmdoc, mobi, exth, kf8 } } async #setup() { const { palmdoc, mobi } = this.headers this.#decoder = getDecoder(mobi.encoding) // `TextEncoder` only supports UTF-8 // we are only encoding ASCII anyway, so I think it's fine this.#encoder = new TextEncoder() // set up decompressor const { compression } = palmdoc this.#decompress = compression === 1 ? f => f : compression === 2 ? decompressPalmDOC : compression === 17480 ? await huffcdic(mobi, this.loadRecord.bind(this)) : null if (!this.#decompress) throw new Error('Unknown compression type') // set up function for removing trailing bytes const { trailingFlags } = mobi const multibyte = trailingFlags & 1 const numTrailingEntries = countBitsSet(trailingFlags >>> 1) this.#removeTrailingEntries = array => { for (let i = 0; i < numTrailingEntries; i++) { const length = getVarLenFromEnd(array) array = array.subarray(0, -length) } if (multibyte) { const length = (array[array.length - 1] & 0b11) + 1 array = array.subarray(0, -length) } return array } } decode(...args) { return this.#decoder.decode(...args) } encode(...args) { return this.#encoder.encode(...args) } loadRecord(index) { return super.loadRecord(this.#start + index) } loadMagic(index) { return super.loadMagic(this.#start + index) } loadText(index) { return this.loadRecord(index + 1) .then(buf => new Uint8Array(buf)) .then(this.#removeTrailingEntries) .then(this.#decompress) } async loadResource(index) { const buf = await super.loadRecord(this.#resourceStart + index) const magic = getString(buf.slice(0, 4)) if (magic === 'FONT') return getFont(buf, this.unzlib) if (magic === 'VIDE' || magic === 'AUDI') return buf.slice(12) return buf } getNCX() { const index = this.headers.mobi.indx if (index < 0xffffffff) return getNCX(index, this.loadRecord.bind(this)) } getMetadata() { const { mobi, exth } = this.headers return { identifier: mobi.uid.toString(), title: unescapeHTML(exth?.title || this.decode(mobi.title)), author: exth?.creator?.map(unescapeHTML), publisher: unescapeHTML(exth?.publisher), language: exth?.language ?? mobi.language, published: exth?.date, description: unescapeHTML(exth?.description), subject: exth?.subject?.map(unescapeHTML), rights: unescapeHTML(exth?.rights), contributor: exth?.contributor, } } async getCover() { const { exth } = this.headers const offset = exth?.coverOffset < 0xffffffff ? exth?.coverOffset : exth?.thumbnailOffset < 0xffffffff ? exth?.thumbnailOffset : null if (offset != null) { const buf = await this.loadResource(offset) return new Blob([buf]) } } } const mbpPagebreakRegex = /<\s*(?:mbp:)?pagebreak[^>]*>/gi const fileposRegex = /<[^<>]+filepos=['"]{0,1}(\d+)[^<>]*>/gi const getIndent = el => { let x = 0 while (el) { const parent = el.parentElement if (parent) { const tag = parent.tagName.toLowerCase() if (tag === 'p') x += 1.5 else if (tag === 'blockquote') x += 2 } el = parent } return x } class MOBI6 { parser = new DOMParser() serializer = new XMLSerializer() #resourceCache = new Map() #textCache = new Map() #cache = new Map() #sections #fileposList = [] #type = MIME.HTML constructor(mobi) { this.mobi = mobi } async init() { // load all text records in an array let array = new Uint8Array() for (let i = 0; i < this.mobi.headers.palmdoc.numTextRecords; i++) array = concatTypedArray(array, await this.mobi.loadText(i)) // convert to string so we can use regex // note that `filepos` are byte offsets // so it needs to preserve each byte as a separate character // (see https://stackoverflow.com/q/50198017) const str = Array.from(new Uint8Array(array), c => String.fromCharCode(c)).join('') // split content into sections at each `` this.#sections = [0] .concat(Array.from(str.matchAll(mbpPagebreakRegex), m => m.index)) .map((x, i, a) => str.slice(x, a[i + 1])) // recover the original raw bytes .map(str => Uint8Array.from(str, x => x.charCodeAt(0))) .map(raw => ({ book: this, raw })) // get start and end filepos for each section .reduce((arr, x) => { const last = arr[arr.length - 1] x.start = last?.end ?? 0 x.end = x.start + x.raw.byteLength return arr.concat(x) }, []) this.sections = this.#sections.map((section, index) => ({ id: index, load: () => this.loadSection(section), createDocument: () => this.createDocument(section), size: section.end - section.start, })) try { this.landmarks = await this.getGuide() const tocHref = this.landmarks .find(({ type }) => type?.includes('toc'))?.href if (tocHref) { const { index } = this.resolveHref(tocHref) const doc = await this.sections[index].createDocument() let lastItem let lastLevel = 0 let lastIndent = 0 const lastLevelOfIndent = new Map() const lastParentOfLevel = new Map() this.toc = Array.from(doc.querySelectorAll('a[filepos]')) .reduce((arr, a) => { const indent = getIndent(a) const item = { label: a.innerText?.trim() ?? '', href: `filepos:${a.getAttribute('filepos')}`, } const level = indent > lastIndent ? lastLevel + 1 : indent === lastIndent ? lastLevel : lastLevelOfIndent.get(indent) ?? Math.max(0, lastLevel - 1) if (level > lastLevel) { if (lastItem) { lastItem.subitems ??= [] lastItem.subitems.push(item) lastParentOfLevel.set(level, lastItem) } else arr.push(item) } else { const parent = lastParentOfLevel.get(level) if (parent) parent.subitems.push(item) else arr.push(item) } lastItem = item lastLevel = level lastIndent = indent lastLevelOfIndent.set(indent, level) return arr }, []) } } catch(e) { console.warn(e) } // get list of all `filepos` references in the book, // which will be used to insert anchor elements // because only then can they be referenced in the DOM this.#fileposList = [...new Set( Array.from(str.matchAll(fileposRegex), m => m[1]))] .map(filepos => ({ filepos, number: Number(filepos) })) .sort((a, b) => a.number - b.number) this.metadata = this.mobi.getMetadata() this.getCover = this.mobi.getCover.bind(this.mobi) return this } async getGuide() { const doc = await this.createDocument(this.#sections[0]) return Array.from(doc.getElementsByTagName('reference'), ref => ({ label: ref.getAttribute('title'), type: ref.getAttribute('type')?.split(/\s/), href: `filepos:${ref.getAttribute('filepos')}`, })) } async loadResource(index) { if (this.#resourceCache.has(index)) return this.#resourceCache.get(index) const raw = await this.mobi.loadResource(index) const url = URL.createObjectURL(new Blob([raw])) this.#resourceCache.set(index, url) return url } async loadRecindex(recindex) { return this.loadResource(Number(recindex) - 1) } async replaceResources(doc) { for (const img of doc.querySelectorAll('img[recindex]')) { const recindex = img.getAttribute('recindex') try { img.src = await this.loadRecindex(recindex) } catch { console.warn(`Failed to load image ${recindex}`) } } for (const media of doc.querySelectorAll('[mediarecindex]')) { const mediarecindex = media.getAttribute('mediarecindex') const recindex = media.getAttribute('recindex') try { media.src = await this.loadRecindex(mediarecindex) if (recindex) media.poster = await this.loadRecindex(recindex) } catch { console.warn(`Failed to load media ${mediarecindex}`) } } for (const a of doc.querySelectorAll('[filepos]')) { const filepos = a.getAttribute('filepos') a.href = `filepos:${filepos}` } } async loadText(section) { if (this.#textCache.has(section)) return this.#textCache.get(section) const { raw } = section // insert anchor elements for each `filepos` const fileposList = this.#fileposList .filter(({ number }) => number >= section.start && number < section.end) .map(obj => ({ ...obj, offset: obj.number - section.start })) let arr = raw if (fileposList.length) { arr = raw.subarray(0, fileposList[0].offset) fileposList.forEach(({ filepos, offset }, i) => { const next = fileposList[i + 1] const a = this.mobi.encode(``) arr = concatTypedArray3(arr, a, raw.subarray(offset, next?.offset)) }) } const str = this.mobi.decode(arr).replaceAll(mbpPagebreakRegex, '') this.#textCache.set(section, str) return str } async createDocument(section) { const str = await this.loadText(section) return this.parser.parseFromString(str, this.#type) } async loadSection(section) { if (this.#cache.has(section)) return this.#cache.get(section) const doc = await this.createDocument(section) // inject default stylesheet const style = doc.createElement('style') doc.head.append(style) // blockquotes in MOBI seem to have only a small left margin by default // many books seem to rely on this, as it's the only way to set margin // (since there's no CSS) style.append(doc.createTextNode(`blockquote { margin-block-start: 0; margin-block-end: 0; margin-inline-start: 1em; margin-inline-end: 0; }`)) await this.replaceResources(doc) const result = this.serializer.serializeToString(doc) const url = URL.createObjectURL(new Blob([result], { type: this.#type })) this.#cache.set(section, url) return url } resolveHref(href) { const filepos = href.match(/filepos:(.*)/)[1] const number = Number(filepos) const index = this.#sections.findIndex(section => section.end > number) const anchor = doc => doc.getElementById(`filepos${filepos}`) return { index, anchor } } splitTOCHref(href) { const filepos = href.match(/filepos:(.*)/)[1] const number = Number(filepos) const index = this.#sections.findIndex(section => section.end > number) return [index, `filepos${filepos}`] } getTOCFragment(doc, id) { return doc.getElementById(id) } isExternal(uri) { return /^(?!blob|filepos)\w+:/i.test(uri) } destroy() { for (const url of this.#resourceCache.values()) URL.revokeObjectURL(url) for (const url of this.#cache.values()) URL.revokeObjectURL(url) } } // handlers for `kindle:` uris const kindleResourceRegex = /kindle:(flow|embed):(\w+)(?:\?mime=(\w+\/[-+.\w]+))?/ const kindlePosRegex = /kindle:pos:fid:(\w+):off:(\w+)/ const parseResourceURI = str => { const [resourceType, id, type] = str.match(kindleResourceRegex).slice(1) return { resourceType, id: parseInt(id, 32), type } } const parsePosURI = str => { const [fid, off] = str.match(kindlePosRegex).slice(1) return { fid: parseInt(fid, 32), off: parseInt(off, 32) } } const makePosURI = (fid = 0, off = 0) => `kindle:pos:fid:${fid.toString(32).toUpperCase().padStart(4, '0') }:off:${off.toString(32).toUpperCase().padStart(10, '0')}` // `kindle:pos:` links are originally links that contain fragments identifiers // so there should exist an element with `id` or `name` // otherwise try to find one with an `aid` attribute const getFragmentSelector = str => { const match = str.match(/\s(id|name|aid)\s*=\s*['"]([^'"]*)['"]/i) if (!match) return const [, attr, value] = match return `[${attr}="${CSS.escape(value)}"]` } // replace asynchronously and sequentially const replaceSeries = async (str, regex, f) => { const matches = [] str.replace(regex, (...args) => (matches.push(args), null)) const results = [] for (const args of matches) results.push(await f(...args)) return str.replace(regex, () => results.shift()) } const getPageSpread = properties => { for (const p of properties) { if (p === 'page-spread-left' || p === 'rendition:page-spread-left') return 'left' if (p === 'page-spread-right' || p === 'rendition:page-spread-right') return 'right' if (p === 'rendition:page-spread-center') return 'center' } } class KF8 { parser = new DOMParser() serializer = new XMLSerializer() #cache = new Map() #fragmentOffsets = new Map() #fragmentSelectors = new Map() #tables = {} #sections #fullRawLength #rawHead = new Uint8Array() #rawTail = new Uint8Array() #lastLoadedHead = -1 #lastLoadedTail = -1 #type = MIME.XHTML #inlineMap = new Map() constructor(mobi) { this.mobi = mobi } async init() { const loadRecord = this.mobi.loadRecord.bind(this.mobi) const { kf8 } = this.mobi.headers try { const fdstBuffer = await loadRecord(kf8.fdst) const fdst = getStruct(FDST_HEADER, fdstBuffer) if (fdst.magic !== 'FDST') throw new Error('Missing FDST record') const fdstTable = Array.from({ length: fdst.numEntries }, (_, i) => 12 + i * 8) .map(offset => [ getUint(fdstBuffer.slice(offset, offset + 4)), getUint(fdstBuffer.slice(offset + 4, offset + 8))]) this.#tables.fdstTable = fdstTable this.#fullRawLength = fdstTable[fdstTable.length - 1][1] } catch {} const skelTable = (await getIndexData(kf8.skel, loadRecord)).table .map(({ name, tagMap }, index) => ({ index, name, numFrag: tagMap[1][0], offset: tagMap[6][0], length: tagMap[6][1], })) const fragData = await getIndexData(kf8.frag, loadRecord) const fragTable = fragData.table.map(({ name, tagMap }) => ({ insertOffset: parseInt(name), selector: fragData.cncx[tagMap[2][0]], index: tagMap[4][0], offset: tagMap[6][0], length: tagMap[6][1], })) this.#tables.skelTable = skelTable this.#tables.fragTable = fragTable this.#sections = skelTable.reduce((arr, skel) => { const last = arr[arr.length - 1] const fragStart = last?.fragEnd ?? 0, fragEnd = fragStart + skel.numFrag const frags = fragTable.slice(fragStart, fragEnd) const length = skel.length + frags.map(f => f.length).reduce((a, b) => a + b) const totalLength = (last?.totalLength ?? 0) + length return arr.concat({ skel, frags, fragEnd, length, totalLength }) }, []) const resources = await this.getResourcesByMagic(['RESC', 'PAGE']) const pageSpreads = new Map() if (resources.RESC) { const buf = await this.mobi.loadRecord(resources.RESC) const str = this.mobi.decode(buf.slice(16)).replace(/\0/g, '') // the RESC record lacks the root `` element // but seem to be otherwise valid XML const index = str.search(/\?>/) const xmlStr = `${str.slice(index)}` const opf = this.parser.parseFromString(xmlStr, MIME.XML) for (const $itemref of opf.querySelectorAll('spine > itemref')) { const i = parseInt($itemref.getAttribute('skelid')) pageSpreads.set(i, getPageSpread( $itemref.getAttribute('properties')?.split(' ') ?? [])) } } this.sections = this.#sections.map((section, index) => section.frags.length ? ({ id: index, load: () => this.loadSection(section), createDocument: () => this.createDocument(section), size: section.length, pageSpread: pageSpreads.get(index), }) : ({ linear: 'no' })) try { const ncx = await this.mobi.getNCX() const map = ({ label, pos, children }) => { const [fid, off] = pos const href = makePosURI(fid, off) const arr = this.#fragmentOffsets.get(fid) if (arr) arr.push(off) else this.#fragmentOffsets.set(fid, [off]) return { label: unescapeHTML(label), href, subitems: children?.map(map) } } this.toc = ncx?.map(map) this.landmarks = await this.getGuide() } catch(e) { console.warn(e) } const { exth } = this.mobi.headers this.dir = exth.pageProgressionDirection this.rendition = { layout: exth.fixedLayout === 'true' ? 'pre-paginated' : 'reflowable', viewport: Object.fromEntries(exth.originalResolution ?.split('x')?.slice(0, 2) ?.map((x, i) => [i ? 'height' : 'width', x]) ?? []), } this.metadata = this.mobi.getMetadata() this.getCover = this.mobi.getCover.bind(this.mobi) return this } // is this really the only way of getting to RESC, PAGE, etc.? async getResourcesByMagic(keys) { const results = {} const start = this.mobi.headers.kf8.resourceStart const end = this.mobi.pdb.numRecords for (let i = start; i < end; i++) { try { const magic = await this.mobi.loadMagic(i) const match = keys.find(key => key === magic) if (match) results[match] = i } catch {} } return results } async getGuide() { const index = this.mobi.headers.kf8.guide if (index < 0xffffffff) { const loadRecord = this.mobi.loadRecord.bind(this.mobi) const { table, cncx } = await getIndexData(index, loadRecord) return table.map(({ name, tagMap }) => ({ label: cncx[tagMap[1][0]] ?? '', type: name?.split(/\s/), href: makePosURI(tagMap[6]?.[0] ?? tagMap[3]?.[0]), })) } } async loadResourceBlob(str) { const { resourceType, id, type } = parseResourceURI(str) const raw = resourceType === 'flow' ? await this.loadFlow(id) : await this.mobi.loadResource(id - 1) const result = [MIME.XHTML, MIME.HTML, MIME.CSS, MIME.SVG].includes(type) ? await this.replaceResources(this.mobi.decode(raw)) : raw const doc = type === MIME.SVG ? this.parser.parseFromString(result, type) : null return [new Blob([result], { type }), // SVG wrappers need to be inlined // as browsers don't allow external resources when loading SVG as an image doc?.getElementsByTagNameNS('http://www.w3.org/2000/svg', 'image')?.length ? doc.documentElement : null] } async loadResource(str) { if (this.#cache.has(str)) return this.#cache.get(str) const [blob, inline] = await this.loadResourceBlob(str) const url = inline ? str : URL.createObjectURL(blob) if (inline) this.#inlineMap.set(url, inline) this.#cache.set(str, url) return url } replaceResources(str) { const regex = new RegExp(kindleResourceRegex, 'g') return replaceSeries(str, regex, this.loadResource.bind(this)) } // NOTE: there doesn't seem to be a way to access text randomly? // how to know the decompressed size of the records without decompressing? // 4096 is just the maximum size async loadRaw(start, end) { // here we load either from the front or back until we have reached the // required offsets; at worst you'd have to load half the book at once const distanceHead = end - this.#rawHead.length const distanceEnd = this.#fullRawLength == null ? Infinity : (this.#fullRawLength - this.#rawTail.length) - start // load from the start if (distanceHead < 0 || distanceHead < distanceEnd) { while (this.#rawHead.length < end) { const index = ++this.#lastLoadedHead const data = await this.mobi.loadText(index) this.#rawHead = concatTypedArray(this.#rawHead, data) } return this.#rawHead.slice(start, end) } // load from the end while (this.#fullRawLength - this.#rawTail.length > start) { const index = this.mobi.headers.palmdoc.numTextRecords - 1 - (++this.#lastLoadedTail) const data = await this.mobi.loadText(index) this.#rawTail = concatTypedArray(data, this.#rawTail) } const rawTailStart = this.#fullRawLength - this.#rawTail.length return this.#rawTail.slice(start - rawTailStart, end - rawTailStart) } loadFlow(index) { if (index < 0xffffffff) return this.loadRaw(...this.#tables.fdstTable[index]) } async loadText(section) { const { skel, frags, length } = section const raw = await this.loadRaw(skel.offset, skel.offset + length) let skeleton = raw.slice(0, skel.length) for (const frag of frags) { const insertOffset = frag.insertOffset - skel.offset const offset = skel.length + frag.offset const fragRaw = raw.slice(offset, offset + frag.length) skeleton = concatTypedArray3( skeleton.slice(0, insertOffset), fragRaw, skeleton.slice(insertOffset)) const offsets = this.#fragmentOffsets.get(frag.index) if (offsets) for (const offset of offsets) { const str = this.mobi.decode(fragRaw).slice(offset) const selector = getFragmentSelector(str) this.#setFragmentSelector(frag.index, offset, selector) } } return this.mobi.decode(skeleton) } async createDocument(section) { const str = await this.loadText(section) return this.parser.parseFromString(str, this.#type) } async loadSection(section) { if (this.#cache.has(section)) return this.#cache.get(section) const str = await this.loadText(section) const replaced = await this.replaceResources(str) // by default, type is XHTML; change to HTML if it's not valid XHTML let doc = this.parser.parseFromString(replaced, this.#type) if (doc.querySelector('parsererror') || !doc.documentElement?.namespaceURI) { this.#type = MIME.HTML doc = this.parser.parseFromString(replaced, this.#type) } for (const [url, node] of this.#inlineMap) { for (const el of doc.querySelectorAll(`img[src="${url}"]`)) el.replaceWith(node) } const url = URL.createObjectURL( new Blob([this.serializer.serializeToString(doc)], { type: this.#type })) this.#cache.set(section, url) return url } getIndexByFID(fid) { return this.#sections.findIndex(section => section.frags.some(frag => frag.index === fid)) } #setFragmentSelector(id, offset, selector) { const map = this.#fragmentSelectors.get(id) if (map) map.set(offset, selector) else { const map = new Map() this.#fragmentSelectors.set(id, map) map.set(offset, selector) } } async resolveHref(href) { const { fid, off } = parsePosURI(href) const index = this.getIndexByFID(fid) if (index < 0) return const saved = this.#fragmentSelectors.get(fid)?.get(off) if (saved) return { index, anchor: doc => doc.querySelector(saved) } const { skel, frags } = this.#sections[index] const frag = frags.find(frag => frag.index === fid) const offset = skel.offset + skel.length + frag.offset const fragRaw = await this.loadRaw(offset, offset + frag.length) const str = this.mobi.decode(fragRaw).slice(off) const selector = getFragmentSelector(str) this.#setFragmentSelector(fid, off, selector) const anchor = doc => doc.querySelector(selector) return { index, anchor } } splitTOCHref(href) { const pos = parsePosURI(href) const index = this.getIndexByFID(pos.fid) return [index, pos] } getTOCFragment(doc, { fid, off }) { const selector = this.#fragmentSelectors.get(fid)?.get(off) return doc.querySelector(selector) } isExternal(uri) { return /^(?!blob|kindle)\w+:/i.test(uri) } destroy() { for (const url of this.#cache.values()) URL.revokeObjectURL(url) } } ``` The content has been capped at 50000 tokens, and files over NaN bytes have been omitted. The user could consider applying other filters to refine the result. The better and more specific the context, the better the LLM can follow instructions. If the context seems verbose, the user can refine the filter using uithub. Thank you for using https://uithub.com - Perfect LLM context for any GitHub repo.