```
├── undefined
├── .gitattributes
├── .gitignore
├── AI_manager.py
├── AI_professor_UI.py
├── AI_professor_chat.py
├── LICENSE
├── README.md
├── TTS_manager.py
├── assets
├── assets/
├── ai_avatar.svg
├── down_arrow.svg
├── fold_page.png
├── katex
├── katex/
├── README.md
├── contrib
├── contrib/
├── auto-render.js
├── auto-render.min.js
├── auto-render.mjs
├── copy-tex.js
├── copy-tex.min.js
├── copy-tex.mjs
├── mathtex-script-type.js
├── mathtex-script-type.min.js
├── mathtex-script-type.mjs
├── mhchem.js
├── mhchem.min.js
├── mhchem.mjs
```
## /
Binary file available at https://raw.githubusercontent.com/LYiHub/mad-professor-public/refs/heads/main/
## /.gitattributes
```gitattributes path="/.gitattributes"
# Auto detect text files and perform LF normalization
* text=auto
```
## /.gitignore
```gitignore path="/.gitignore"
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
```
## /AI_manager.py
```py path="/AI_manager.py"
from PyQt6.QtCore import QObject, pyqtSignal, QUuid
from AI_professor_chat import AIProfessorChat
from threads import AIResponseThread
from TTS_manager import TTSManager
from voice_input import VoiceInput
from rag_retriever import RagRetriever
import os
class AIManager(QObject):
"""
AI管理类 - 处理所有AI相关的功能
包括:
- AI对话逻辑
- 语音识别
- TTS语音合成
- RAG检索增强生成
"""
# 信号定义
ai_response_ready = pyqtSignal(str) # AI回复准备好信号
vad_started = pyqtSignal() # 语音活动开始信号
vad_stopped = pyqtSignal() # 语音活动结束信号
voice_text_received = pyqtSignal(str) # 接收到语音文本信号
voice_error = pyqtSignal(str) # 语音错误信号
voice_ready = pyqtSignal() # 语音系统就绪信号
voice_device_switched = pyqtSignal(bool) # 语音设备切换状态信号
ai_sentence_ready = pyqtSignal(str, str) # 单句AI回复准备好信号(内容, 请求ID)
ai_generation_cancelled = pyqtSignal() # AI生成被取消信号
def __init__(self):
"""初始化AI管理器"""
super().__init__()
# 初始化AI聊天助手
self._init_ai_assistant()
# 初始化TTS管理器
self.tts_manager = TTSManager()
# 连接TTS播放开始信号
self.tts_manager.tts_playback_started.connect(self._on_tts_playback_started)
# 连接TTS音频实际播放开始信号
self.tts_manager.tts_audio_playback_started.connect(self._on_tts_audio_playback_started)
# 缓存待显示的句子
self.pending_sentences = {}
# 语音输入对象将在init_voice_recognition中初始化
self.voice_input = None
self.data_manager = None # 将在later设置
# 添加状态标志来跟踪是否有正在进行的AI生成
self.is_generating_response = False
# 当前活动的请求ID
self.current_request_id = None
# 添加累积响应变量
self.accumulated_response = ""
def set_data_manager(self, data_manager):
"""设置数据管理器引用"""
self.data_manager = data_manager
def _init_ai_assistant(self):
"""初始化AI聊天助手和响应线程"""
self.ai_chat = AIProfessorChat()
self.ai_response_thread = AIResponseThread(self.ai_chat)
self.ai_response_thread.response_ready.connect(self._on_ai_response_ready)
# 连接新的单句信号
self.ai_response_thread.sentence_ready.connect(self._on_ai_sentence_ready)
def init_voice_recognition(self, input_device_index=0):
"""初始化语音识别系统"""
if self.voice_input is not None:
return True # 已经初始化
try:
# 创建语音输入对象
self.voice_input = VoiceInput(input_device_index)
# 连接信号
self.voice_input.text_received.connect(self._on_voice_text_received)
self.voice_input.vad_started.connect(self._on_vad_started)
self.voice_input.vad_stopped.connect(self._on_vad_stopped)
self.voice_input.error_occurred.connect(self._on_voice_error)
self.voice_input.initialization_complete.connect(self._on_voice_init_complete)
# 开始后台初始化
self.voice_input.initialize()
return True
except Exception as e:
print(f"初始化语音识别失败: {str(e)}")
return False
def _on_voice_init_complete(self, success):
"""语音初始化完成回调"""
if success:
self.voice_ready.emit()
else:
self.voice_error.emit("语音系统初始化失败")
def cancel_current_response(self):
"""取消当前正在生成的AI响应"""
print("取消当前的AI响应...")
# 停止TTS播放并清除与当前请求相关的所有待处理TTS
if self.current_request_id:
self.tts_manager.cancel_request_id(self.current_request_id)
else:
self.tts_manager.stop_playing() # 旧版兼容
# 处理已收集的部分响应
# 只有当有实际内容时才添加到历史记录
if self.accumulated_response and self.accumulated_response.strip():
print(f"保存已生成的部分响应到对话历史: {self.accumulated_response[:30]}...")
# 将已生成的部分添加到对话历史
if hasattr(self.ai_chat, 'conversation_history'):
# 添加到对话历史
self.ai_chat.conversation_history.append({
"role": "assistant",
"content": self.accumulated_response
})
# 无论是否添加到历史,都重置累积响应
self.accumulated_response = ""
# 清空待处理的句子
self.pending_sentences.clear()
# 中断AI响应线程
if self.ai_response_thread.isRunning():
print("正在停止AI生成...")
self.ai_response_thread.requestInterruption()
self.ai_response_thread.wait(1000) # 等待最多1秒
# 发出取消信号,以便UI清理loading bubble
self.is_generating_response = False
self.ai_generation_cancelled.emit()
# 清除当前请求ID
self.current_request_id = None
def is_busy(self):
"""检查是否有AI响应正在生成或TTS正在播放"""
return self.is_generating_response or not self.tts_manager.is_queue_empty()
def get_ai_response(self, query, paper_id=None, visible_content=None):
"""获取AI对用户查询的响应"""
try:
# 如果已经有正在生成的响应,先取消它
if self.is_generating_response:
self.cancel_current_response()
# 确保线程不在运行状态
if self.ai_response_thread.isRunning():
print("等待上一个AI响应线程结束...")
self.ai_response_thread.requestInterruption()
self.ai_response_thread.wait(1000) # 等待最多1秒
# 如果线程仍在运行,创建新的线程
if self.ai_response_thread.isRunning():
print("创建新的AI响应线程...")
self._init_ai_assistant()
# 生成新的请求ID
request_id = str(QUuid.createUuid().toString(QUuid.StringFormat.Id128))
self.current_request_id = request_id
print(f"创建新的AI请求,ID: {request_id}")
# 确保有论文上下文(如果必要)
if not paper_id and self.data_manager and self.data_manager.current_paper:
paper_id = self.data_manager.current_paper.get('id')
# 获取论文数据并设置上下文
if paper_id and self.data_manager:
paper_data = self.data_manager.load_rag_tree(paper_id)
if paper_data:
self.ai_chat.set_paper_context(paper_id, paper_data)
# 设置请求参数并启动线程
self.ai_response_thread.set_request(query, paper_id, visible_content)
# 更新状态标志
self.is_generating_response = True
# 启动线程
self.ai_response_thread.start()
# 返回请求ID,以便调用者可以使用
return request_id
except Exception as e:
print(f"AI响应生成失败: {str(e)}")
self.is_generating_response = False
self.current_request_id = None
self.ai_response_ready.emit(f"抱歉,处理您的问题时出现错误: {str(e)}")
return None
def _on_ai_response_ready(self, response):
"""处理AI响应就绪事件"""
# 更新状态标志
self.is_generating_response = False
# 发出信号通知UI
self.ai_response_ready.emit(response)
# 不再重复调用TTS - 只有在非流式响应时才使用TTS
if not self.ai_response_thread.use_streaming:
self._speak_response(response)
def _on_ai_sentence_ready(self, sentence, emotion, scroll_info=None):
"""处理单句AI响应就绪事件"""
# 如果没有当前请求ID,可能是已经被取消,忽略这个句子
if not self.current_request_id:
return
# 缓存句子,并关联请求ID和情绪
sentence_id = id(sentence) # 使用对象id作为唯一标识
self.pending_sentences[sentence_id] = (sentence, self.current_request_id, emotion)
# 累积响应
self.accumulated_response += sentence
# 删除此行,不在AI生成时触发显示
# self.ai_sentence_ready.emit(sentence, self.current_request_id)
# 处理滚动信息 - 如果有滚动信息且markdown_view被设置,则执行滚动
if scroll_info and hasattr(self, 'markdown_view') and self.markdown_view:
self._scroll_to_content(scroll_info)
# 使用TTS朗读单句 - 传递从AI生成的实际情绪
self._speak_response(sentence, sentence_id, emotion)
def _speak_response(self, text, sentence_id=None, emotion="neutral"):
"""使用TTS朗读文本"""
# 确保有当前请求ID
if not self.current_request_id:
return
# 为文本添加标识,用于在TTS开始播放时匹配回来
if sentence_id:
# 保存文本、请求ID和情绪的映射关系,请求TTS时传递请求ID和情绪
self.tts_manager.request_tts(text, self.current_request_id, emotion)
# 存储映射关系(句子ID与句子内容+请求ID+情绪)
self.pending_sentences[sentence_id] = (text, self.current_request_id, emotion)
else:
# 对于非流式响应,直接传递请求ID和情绪
self.tts_manager.request_tts(text, self.current_request_id, emotion)
def _on_tts_playback_started(self, text, request_id):
"""当TTS加入播放队列时调用(不再触发消息显示)"""
# 如果请求ID不匹配当前活动请求,忽略这个播放事件
if request_id != self.current_request_id:
print(f"忽略过时的TTS播放:{text[:20]}... (请求ID: {request_id})")
return
# 可以在此处添加进度指示等逻辑,但不再触发消息显示
def _on_tts_audio_playback_started(self, text, request_id):
"""当TTS音频实际开始播放时调用(触发消息显示)"""
# 如果请求ID不匹配当前活动请求,忽略这个播放事件
if request_id != self.current_request_id:
print(f"忽略过时的TTS音频播放:{text[:20]}... (请求ID: {request_id})")
return
# 查找匹配的句子
for sentence_id, (sentence, stored_request_id, _) in list(self.pending_sentences.items()):
if sentence == text and stored_request_id == request_id:
# 发出显示此句子的信号,附带请求ID
self.ai_sentence_ready.emit(sentence, request_id)
# 从待处理列表中移除
self.pending_sentences.pop(sentence_id, None)
break
# 语音识别相关方法
def toggle_voice_detection(self, active):
"""切换语音检测状态"""
if not self.voice_input:
return False
if (active):
return self.voice_input.start_listening()
else:
return self.voice_input.stop_listening()
def get_voice_devices(self):
"""获取可用的语音输入设备"""
return VoiceInput.get_input_devices()
def switch_voice_device(self, device_index):
"""切换语音输入设备"""
if not self.voice_input:
print("语音输入系统未初始化")
self.voice_device_switched.emit(False)
return False
# 开始切换设备并返回结果
success = self.voice_input.switch_device(device_index)
# 这里不发送信号,由voice_input的initialization_complete信号触发
# 但需要确保正确连接这个信号到设备切换完成处理
# 确保初始化完成信号连接到正确的处理方法
if success:
# 断开可能存在的旧连接,避免重复连接
try:
self.voice_input.initialization_complete.disconnect(self._on_device_switch_complete)
except:
pass # 如果没有连接,忽略错误
# 添加新连接,将初始化完成信号连接到设备切换完成处理
self.voice_input.initialization_complete.connect(self._on_device_switch_complete)
return success
def _on_device_switch_complete(self, success):
"""处理设备切换完成事件"""
# 转发信号到UI
self.voice_device_switched.emit(success)
# 断开特定连接,避免混淆普通初始化和设备切换的初始化完成信号
try:
self.voice_input.initialization_complete.disconnect(self._on_device_switch_complete)
except:
pass
# 如果切换成功,也需要触发voice_ready信号
if success:
self.voice_ready.emit()
# 语音回调转发方法
def _on_voice_text_received(self, text):
self.voice_text_received.emit(text)
def _on_vad_started(self):
self.vad_started.emit()
def _on_vad_stopped(self):
self.vad_stopped.emit()
def _on_voice_error(self, error_message):
self.voice_error.emit(error_message)
def cleanup(self):
"""清理所有资源"""
# 停止TTS
if hasattr(self, 'tts_manager'):
self.tts_manager.stop()
# 停止语音识别
if self.voice_input:
self.voice_input.cleanup()
# 停止AI响应线程
if self.ai_response_thread and self.ai_response_thread.isRunning():
self.ai_response_thread.requestInterruption()
self.ai_response_thread.wait()
def init_rag_retriever(self, base_path):
"""在后台初始化RAG检索器"""
try:
print(f"[INFO] 开始初始化RAG检索器: {base_path}")
# 创建RAG检索器并开始后台加载
self.retriever = RagRetriever(base_path)
# 确保AI聊天模块使用相同的检索器
if hasattr(self, 'ai_chat') and self.ai_chat:
if self.ai_chat.retriever is not None:
print("[INFO] 替换AI聊天模块中的旧检索器")
self.ai_chat.retriever = self.retriever
# 连接加载完成信号以进行日志记录
self.retriever.loading_complete.connect(self._on_retriever_loaded)
return True
except Exception as e:
print(f"[ERROR] 初始化RAG检索器失败: {str(e)}")
return False
def _on_retriever_loaded(self, success):
"""处理检索器加载完成事件"""
if success:
print(f"[INFO] RAG检索器加载完成,共加载了 {len(self.retriever.paper_vector_paths)} 篇论文的向量库索引")
# 可以添加额外验证代码
for paper_id, path in self.retriever.paper_vector_paths.items():
if not os.path.exists(path):
print(f"[WARNING] 论文 {paper_id} 的向量库路径不存在: {path}")
else:
print("[ERROR] RAG检索器加载失败或没有找到论文")
def add_paper_vector_store(self, paper_id, vector_store_path):
"""添加新论文的向量库
在处理完新论文后调用此方法
Args:
paper_id: 论文ID
vector_store_path: 向量库路径
Returns:
bool: 成功返回True
"""
if hasattr(self, 'retriever'):
return self.retriever.add_paper(paper_id, vector_store_path)
return False
def _scroll_to_content(self, scroll_info):
"""根据滚动信息滚动到对应内容"""
if not scroll_info:
return
# 获取当前语言
current_lang = self.markdown_view.get_current_language()
# 根据当前语言选择内容
content = scroll_info['zh_content'] if current_lang == 'zh' else scroll_info['en_content']
node_type = scroll_info.get('node_type', 'text')
is_title = scroll_info.get('is_title', False)
# 如果内容为空,尝试使用另一种语言的内容
if not content:
content = scroll_info['en_content'] if current_lang == 'zh' else scroll_info['zh_content']
# 执行滚动
if content:
# 根据节点类型确定滚动类型
if is_title:
self.markdown_view._scroll_to_matching_content(content, 'title')
else:
self.markdown_view._scroll_to_matching_content(content, 'text')
```
## /AI_professor_UI.py
```py path="/AI_professor_UI.py"
import os
from PyQt6.QtWidgets import (QMainWindow, QWidget, QVBoxLayout,
QHBoxLayout, QPushButton, QSplitter,
QLabel, QFrame)
from PyQt6.QtCore import Qt
from PyQt6.QtGui import QFont
from ui.markdown_view import MarkdownView
from ui.chat_widget import ChatWidget
from ui.sidebar_widget import SidebarWidget
from data_manager import DataManager
from AI_manager import AIManager
class AIProfessorUI(QMainWindow):
"""
主窗口类 - 学术论文AI助手的主界面
负责创建和管理整个应用的UI布局、样式和交互逻辑,
包括侧边栏、文档查看区和AI聊天区
"""
def __init__(self):
"""初始化主窗口及所有子组件"""
super().__init__()
# 初始化数据管理器和AI管理器
self.data_manager = DataManager()
self.ai_manager = AIManager()
# 设置两者互相引用
self.ai_manager.set_data_manager(self.data_manager)
self.data_manager.set_ai_manager(self.ai_manager)
# 设置UI元素
self.init_window_properties()
self.init_custom_titlebar()
self.init_ui_components()
# 连接数据管理器信号
self.connect_signals()
# 加载论文数据
self.data_manager.load_papers_index()
# 显示欢迎信息
self.show_welcome_message()
# 在后台预加载所有论文向量库
self.ai_manager.init_rag_retriever("output")
def init_window_properties(self):
"""初始化窗口属性:大小、图标、状态栏和窗口风格"""
# 设置窗口标题和初始大小
self.setWindowTitle("暴躁的教授读论文")
self.setGeometry(100, 100, 1400, 900)
# 添加状态栏
self.statusBar().showMessage("就绪")
self.statusBar().setStyleSheet("""
QStatusBar {
background-color: #303F9F;
color: white;
padding: 2px;
font-size: 11px;
}
""")
# 设置无边框窗口,但允许调整大小
self.setWindowFlags(Qt.WindowType.FramelessWindowHint |
Qt.WindowType.WindowMaximizeButtonHint |
Qt.WindowType.WindowMinimizeButtonHint |
Qt.WindowType.WindowCloseButtonHint)
# 设置窗口样式
self.setStyleSheet("""
QMainWindow {
background-color: #E8EAF6;
}
""")
def init_custom_titlebar(self):
"""
初始化自定义标题栏
创建一个美观的自定义标题栏,包含应用图标、标题和窗口控制按钮,
并实现拖拽移动和双击最大化的功能
"""
# 创建标题栏框架
self.titlebar = QFrame(self)
self.titlebar.setObjectName("customTitleBar")
self.titlebar.setFixedHeight(30)
self.titlebar.setStyleSheet("""
#customTitleBar {
background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
stop:0 #0D47A1, stop:0.5 #1A237E, stop:1 #0D47A1);
color: white;
}
""")
# 设置布局
titlebar_layout = QHBoxLayout(self.titlebar)
titlebar_layout.setContentsMargins(10, 0, 10, 0)
titlebar_layout.setSpacing(5)
# 设置应用图标
app_icon = QLabel()
# 使用应用程序图标渲染到标题栏
app_icon.setPixmap(self.windowIcon().pixmap(16, 16))
# 设置应用标题
app_title = QLabel("暴躁的教授读论文")
app_title.setStyleSheet("color: white; font-weight: bold;")
# 创建窗口控制按钮
self.create_window_control_buttons()
# 添加组件到布局
titlebar_layout.addWidget(app_icon)
titlebar_layout.addWidget(app_title)
titlebar_layout.addStretch(1)
titlebar_layout.addWidget(self.btn_minimize)
titlebar_layout.addWidget(self.btn_maximize)
titlebar_layout.addWidget(self.btn_close)
# 绑定拖动和双击事件
self.titlebar.mousePressEvent = self.titlebar_mousePressEvent
self.titlebar.mouseMoveEvent = self.titlebar_mouseMoveEvent
self.titlebar.mouseDoubleClickEvent = self.titlebar_doubleClickEvent
# 将标题栏添加到主窗口
self.layout().setMenuBar(self.titlebar)
def create_window_control_buttons(self):
"""创建窗口控制按钮:最小化、最大化和关闭"""
# 通用按钮样式
btn_style = """
QPushButton {
background-color: transparent;
color: white;
border: none;
font-family: Arial;
font-weight: bold;
font-size: 14px;
padding: 4px 8px;
border-radius: 4px;
}
QPushButton:hover {
background-color: rgba(255, 255, 255, 0.2);
}
"""
# 最小化按钮
self.btn_minimize = QPushButton("🗕")
self.btn_minimize.setStyleSheet(btn_style)
self.btn_minimize.clicked.connect(self.showMinimized)
self.btn_minimize.setToolTip("最小化")
self.btn_minimize.setCursor(Qt.CursorShape.PointingHandCursor)
# 最大化/还原按钮
self.btn_maximize = QPushButton("🗖")
self.btn_maximize.setStyleSheet(btn_style)
self.btn_maximize.clicked.connect(self.toggle_maximize)
self.btn_maximize.setToolTip("最大化")
self.btn_maximize.setCursor(Qt.CursorShape.PointingHandCursor)
# 关闭按钮
self.btn_close = QPushButton("✕")
self.btn_close.setStyleSheet("""
QPushButton {
background-color: transparent;
color: white;
border: none;
font-family: Arial;
font-weight: bold;
font-size: 14px;
padding: 4px 8px;
border-radius: 4px;
}
QPushButton:hover {
background-color: #E81123;
border-radius: 4px;
}
""")
self.btn_close.clicked.connect(self.close)
self.btn_close.setToolTip("关闭")
self.btn_close.setCursor(Qt.CursorShape.PointingHandCursor)
def titlebar_mousePressEvent(self, event):
"""处理标题栏的鼠标按下事件,用于实现窗口拖动"""
if event.button() == Qt.MouseButton.LeftButton:
self.dragPos = event.globalPosition().toPoint()
event.accept()
def titlebar_mouseMoveEvent(self, event):
"""处理标题栏的鼠标移动事件,实现窗口拖动"""
if event.buttons() == Qt.MouseButton.LeftButton:
if hasattr(self, 'dragPos'):
self.move(self.pos() + event.globalPosition().toPoint() - self.dragPos)
self.dragPos = event.globalPosition().toPoint()
event.accept()
def titlebar_doubleClickEvent(self, event):
"""处理标题栏的双击事件,切换窗口最大化状态"""
self.toggle_maximize()
def toggle_maximize(self):
"""切换窗口最大化/还原状态"""
if self.isMaximized():
self.showNormal()
self.btn_maximize.setText("🗖")
self.btn_maximize.setToolTip("最大化")
else:
self.showMaximized()
self.btn_maximize.setText("🗗")
self.btn_maximize.setToolTip("还原")
def init_ui_components(self):
"""
初始化UI组件和布局
创建应用的主要UI组件,包括:
- 侧边栏:用于显示和选择论文
- 文档查看区:显示论文内容,支持中英文切换
- 聊天区域:用于与AI助手交互
"""
# 设置中心部件和主布局
central_widget = QWidget()
central_widget.setObjectName("centralWidget")
self.setCentralWidget(central_widget)
main_layout = QHBoxLayout(central_widget)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
# 初始化侧边栏
self.sidebar = SidebarWidget()
# 初始化主内容区域
content_container = self.create_content_container()
# 添加到主布局
main_layout.addWidget(self.sidebar)
main_layout.addWidget(content_container)
# 应用全局样式
self.apply_global_styles()
def create_content_container(self):
"""创建主内容区域容器,包含文档查看区和聊天区域"""
# 主内容区域容器
content_container = QWidget()
content_container.setObjectName("contentContainer")
content_container.setStyleSheet("""
#contentContainer {
background-color: #E8EAF6;
}
""")
content_layout = QVBoxLayout(content_container)
content_layout.setContentsMargins(10, 10, 10, 10)
# 内容区域
content_widget = QWidget()
content_widget.setObjectName("contentWidget")
content_widget.setStyleSheet("""
#contentWidget {
background-color: #E8EAF6;
border: 1px solid rgba(0,0,0,0.1);
}
""")
content_inner_layout = QHBoxLayout(content_widget)
content_inner_layout.setContentsMargins(0, 0, 0, 0)
# 创建分隔器和内容区域组件
splitter = self.create_content_splitter()
content_inner_layout.addWidget(splitter)
content_layout.addWidget(content_widget)
return content_container
def create_content_splitter(self):
"""创建内容区域分隔器,用于调整文档和聊天区域的比例"""
# 分隔器,用于调整文档和聊天的宽度比例
splitter = QSplitter(Qt.Orientation.Horizontal)
splitter.setHandleWidth(1) # 设置分隔条宽度
splitter.setStyleSheet("""
QSplitter::handle {
background-color: #C5CAE9;
}
""")
# 创建Markdown显示区域
md_container = self.create_markdown_container()
# 创建聊天区域
self.chat_widget = ChatWidget()
self.chat_widget.set_paper_controller(self.data_manager)
self.chat_widget.set_ai_controller(self.ai_manager)
self.chat_widget.set_markdown_view(self.md_view)
# 添加到分隔器并设置初始比例
splitter.addWidget(md_container)
splitter.addWidget(self.chat_widget)
splitter.setSizes([int(self.width() * 0.6), int(self.width() * 0.4)])
return splitter
def create_markdown_container(self):
"""创建Markdown文档显示区域"""
# Markdown显示区域容器
md_container = QWidget()
md_container.setObjectName("mdContainer")
md_layout = QVBoxLayout(md_container)
md_layout.setContentsMargins(0, 0, 0, 0)
md_layout.setSpacing(0)
# 创建文档工具栏
toolbar = self.create_doc_toolbar()
# 创建Markdown视图容器
md_view_container = QFrame()
md_view_container.setObjectName("mdViewContainer")
md_view_container.setStyleSheet("""
#mdViewContainer {
background-color: #FFFFFF;
border-bottom-left-radius: 10px;
border-bottom-right-radius: 10px;
border-left: 1px solid #CFD8DC;
border-right: 1px solid #CFD8DC;
border-bottom: 1px solid #CFD8DC;
}
""")
md_view_layout = QVBoxLayout(md_view_container)
md_view_layout.setContentsMargins(5, 5, 5, 10)
# 创建Markdown视图并传入数据管理器
self.md_view = MarkdownView()
self.md_view.set_data_manager(self.data_manager) # 设置数据管理器
self.md_view.setStyleSheet("background-color: #FFFFFF;")
md_view_layout.addWidget(self.md_view)
# 添加到布局
md_layout.addWidget(toolbar)
md_layout.addWidget(md_view_container)
return md_container
def create_doc_toolbar(self):
"""创建文档工具栏,包含标题和语言切换按钮"""
# 工具栏容器
toolbar = QFrame()
toolbar.setObjectName("docToolbar")
toolbar.setFixedHeight(40)
toolbar.setStyleSheet("""
#docToolbar {
background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
stop:0 #303F9F, stop:1 #1A237E);
border-top-left-radius: 10px;
border-top-right-radius: 10px;
color: white;
}
""")
# 工具栏布局
toolbar_layout = QHBoxLayout(toolbar)
toolbar_layout.setContentsMargins(15, 0, 15, 0)
# 工具栏标题
title_font = QFont("Source Han Sans SC", 11, QFont.Weight.Bold)
doc_title = QLabel("论文阅读")
doc_title.setFont(title_font)
doc_title.setStyleSheet("color: white; font-weight: bold;")
# 语言切换按钮
self.lang_button = QPushButton("切换为英文")
self.lang_button.setObjectName("langButton")
self.lang_button.setStyleSheet("""
#langButton {
background-color: rgba(255, 255, 255, 0.2);
color: white;
border: 1px solid rgba(255, 255, 255, 0.3);
border-radius: 8px;
padding: 5px 15px;
font-weight: bold;
}
#langButton:hover {
background-color: rgba(255, 255, 255, 0.3);
}
""")
self.lang_button.setCursor(Qt.CursorShape.PointingHandCursor)
self.lang_button.clicked.connect(self.toggle_language)
# 添加到布局
toolbar_layout.addWidget(doc_title, 0, Qt.AlignmentFlag.AlignLeft)
toolbar_layout.addWidget(self.lang_button, 0, Qt.AlignmentFlag.AlignRight)
return toolbar
def apply_global_styles(self):
"""应用全局样式,主要用于统一滚动条风格"""
self.setStyleSheet("""
QMainWindow {
background-color: #E8EAF6;
}
QScrollBar:vertical {
border: none;
background: #F5F5F5;
width: 8px;
border-radius: 4px;
}
QScrollBar::handle:vertical {
background: #C5CAE9;
min-height: 20px;
border-radius: 4px;
}
QScrollBar::handle:vertical:hover {
background: #7986CB;
}
QScrollBar::add-line:vertical, QScrollBar::sub-line:vertical {
height: 0px;
}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
background: none;
}
""")
def connect_signals(self):
"""连接数据管理器和UI组件的信号和槽"""
# 连接侧边栏上传信号
self.sidebar.upload_file.connect(self.data_manager.upload_file)
self.sidebar.pause_processing.connect(self.data_manager.pause_processing)
self.sidebar.resume_processing.connect(self.data_manager.resume_processing)
# 连接数据管理器的论文数据信号
self.sidebar.resume_processing.connect(self.data_manager.resume_processing)
# 连接数据管理器的论文数据信号
self.data_manager.papers_loaded.connect(self.on_papers_loaded) # 这是关键连接
self.data_manager.paper_content_loaded.connect(self.on_paper_content_loaded)
self.data_manager.loading_error.connect(self.on_loading_error)
self.data_manager.message.connect(self.on_message)
# 连接侧边栏的论文选择信号
self.sidebar.paper_selected.connect(self.on_paper_selected)
# 连接处理进度信号
self.data_manager.processing_progress.connect(self.on_processing_progress)
self.data_manager.processing_finished.connect(self.on_processing_finished)
self.data_manager.processing_error.connect(self.on_processing_error)
self.data_manager.queue_updated.connect(self.on_queue_updated)
# 初始化处理系统
self.data_manager.initialize_processing_system()
def on_papers_loaded(self, papers):
"""
处理论文列表加载完成的信号
Args:
papers: 论文数据列表
"""
self.sidebar.load_papers(papers)
def on_paper_selected(self, paper_id):
"""
处理论文选择事件
当用户在侧边栏选择一篇论文时,通知数据管理器加载相应内容
Args:
paper_id: 选择的论文ID
"""
# 通知数据管理器加载选定的论文
self.data_manager.load_paper_content(paper_id)
def on_paper_content_loaded(self, paper, zh_content, en_content):
"""
处理论文内容加载完成的信号
Args:
paper: 论文数据字典
zh_content: 中文内容
en_content: 英文内容
"""
# 加载文档内容到Markdown视图
self.md_view.load_markdown(zh_content, "zh", render=False) # 不立即渲染
self.md_view.load_markdown(en_content, "en", render=False) # 不立即渲染
self.md_view.set_language("zh") # 默认显示中文
# 更新语言按钮文本
self.lang_button.setText("切换为英文")
self.lang_button.setStyleSheet("""
#langButton {
background-color: rgba(255, 255, 255, 0.2);
color: white;
border: 1px solid rgba(255, 255, 255, 0.3);
border-radius: 8px;
padding: 5px 15px;
font-weight: bold;
}
#langButton:hover {
background-color: rgba(255, 255, 255, 0.3);
}
""")
# 更新状态栏
title = paper.get('translated_title', '') or paper.get('title', '')
self.statusBar().showMessage(f"已加载论文: {title}")
# 向AI助手发送论文加载通知
self.chat_widget.receive_ai_message(f"已加载论文「{title}」")
def on_loading_error(self, error_message):
"""
处理加载错误的信号
Args:
error_message: 错误信息
"""
# 更新状态栏显示错误
self.statusBar().showMessage(f"错误: {error_message}")
# 也可以在这里添加更明显的错误提示,如弹窗等
def on_message(self, message):
"""
处理一般消息的信号
Args:
message: 消息内容
"""
# 更新状态栏
self.statusBar().showMessage(message)
def show_welcome_message(self):
"""显示欢迎信息"""
welcome_md = """
# 哼!又来一个不读论文的学生是吧?
很好,至少你知道打开这个软件。我是你的论文指导教授,**不要期望我对你手下留情**。
## 听好了,这是你能做的事:
- **选论文**:左边那一堆,挑一篇你能看懂的(如果有的话)
- **换语言**:中英文看不懂?按上面那个按钮切换,别指望换了语言就能理解内容
- **问问题**:有不懂的就右边提问,我会回答,虽然你的问题可能很蠢
- **看摘要**:懒得读全文?我给你总结重点,省得你到处抓瞎
## 开始用吧,别磨蹭!
从左边随便选一篇,然后开始读。有不明白的就问我,**别憋着装懂**!
记住:_真正的学术是刀尖起舞,而不是像你平时那样浅尝辄止!_
...不过别担心,我会一直在这陪你读完的。
"""
self.md_view.load_markdown(welcome_md)
def toggle_language(self):
"""
切换文档语言
在中文和英文之间切换文档显示语言,并更新按钮状态和样式
"""
lang = self.md_view.toggle_language()
# 设置按钮文本和样式
if lang == "zh":
btn_text = "切换为英文"
self.lang_button.setStyleSheet("""
#langButton {
background-color: rgba(255, 255, 255, 0.2);
color: white;
border: 1px solid rgba(255, 255, 255, 0.3);
border-radius: 8px;
padding: 5px 15px;
font-weight: bold;
}
#langButton:hover {
background-color: rgba(255, 255, 255, 0.3);
}
""")
else:
btn_text = "切换为中文"
self.lang_button.setStyleSheet("""
#langButton {
background-color: rgba(65, 105, 225, 0.3);
color: white;
border: 1px solid rgba(255, 255, 255, 0.3);
border-radius: 8px;
padding: 5px 15px;
font-weight: bold;
}
#langButton:hover {
background-color: rgba(65, 105, 225, 0.4);
}
""")
self.lang_button.setText(btn_text)
# 更新状态栏
current_paper = self.data_manager.current_paper
if current_paper:
language_text = "英文" if lang == "en" else "中文"
title = current_paper.get('title' if lang == "en" else 'translated_title', '')
self.statusBar().showMessage(f"已切换到{language_text}版本: {title}")
def on_processing_progress(self, file_name, stage, progress, remaining):
self.sidebar.update_upload_status(file_name, stage, progress, remaining)
def on_processing_finished(self, paper_id):
self.data_manager.load_papers_index()
def on_processing_error(self, paper_id, error_msg):
self.statusBar().showMessage(f"处理论文出错: {error_msg}")
def on_queue_updated(self, queue):
"""处理队列更新回调"""
# 获取待处理文件数量
pending_count = len(queue)
# 更新状态栏显示
if pending_count > 0:
self.statusBar().showMessage(f"队列中有 {pending_count} 个文件待处理")
else:
self.statusBar().showMessage("处理队列为空")
# 更新上传组件UI
if pending_count == 0:
# 队列空时更新UI为完成状态
self.sidebar.update_upload_status("", "全部完成", 100, 0)
elif not self.data_manager.is_processing and pending_count > 0:
# 有待处理文件但当前没在处理时,显示下一个要处理的文件
next_item = queue[0]
self.sidebar.update_upload_status(
os.path.basename(next_item['path']),
"等待处理",
0,
pending_count
)
def closeEvent(self, event):
"""处理窗口关闭事件 - 确保所有线程停止"""
# 调用聊天部件的closeEvent
# 清理AI管理器资源
if hasattr(self, 'ai_manager'):
self.ai_manager.cleanup()
if hasattr(self, 'chat_widget'):
# 如果chat_widget中有语音线程,请求中断并清理
if hasattr(self.chat_widget, 'voice_thread') and self.chat_widget.voice_thread:
self.chat_widget.voice_thread.stop() # 使用新增的stop()方法
self.chat_widget.voice_thread.wait(1000) # 等待线程完成,最多1秒
self.chat_widget.closeEvent(event)
# 停止任何正在运行的处理线程
if self.data_manager.current_thread is not None and self.data_manager.current_thread.isRunning():
self.data_manager.current_thread.stop()
self.data_manager.current_thread.wait(1000) # 等待线程完成,最多1秒
# 调用父类的closeEvent
super().closeEvent(event)
```
## /AI_professor_chat.py
```py path="/AI_professor_chat.py"
import logging
import json
import os
from typing import List, Dict, Any, Generator, Tuple
from config import LLMClient
AI_CHARACTER_PROMPT_PATH = "prompt/ai_character_prompt_leidian.txt"
AI_EXPLAIN_PROMPT_PATH = "prompt/ai_explain_prompt.txt"
AI_ROUTER_PROMPT_PATH = "prompt/ai_router_prompt.txt"
class AIProfessorChat:
"""
AI对话助手 - 学术论文智能问答系统
支持多种回答策略:
- 直接回答
- 页面内容分析
- 宏观检索(章节概要)
- RAG检索(精准段落)
"""
def __init__(self):
"""初始化AI对话助手"""
self.logger = logging.getLogger(__name__)
# 设置基础路径
self.base_path = os.path.dirname(os.path.abspath(__file__))
self.output_path = os.path.join(self.base_path, "output")
# 对话历史 (保持最近10条)
self.conversation_history = []
# 当前论文上下文
self.current_paper_id = None
self.current_paper_data = None
# 将实例化改为引用初始化
self.retriever = None # 稍后由AI_manager设置
# LLM客户端
self.llm_client = None
try:
self.llm_client = LLMClient()
self.logger.info("AI对话助手初始化完成")
except Exception as e:
self.logger.error(f"初始化AI对话组件失败: {str(e)}")
def _read_file(self, filepath: str) -> str:
"""读取文件内容"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
return f.read().strip()
except Exception as e:
self.logger.warning(f"读取文件 {filepath} 失败: {str(e)}")
return ""
def set_paper_context(self, paper_id: str, paper_data: Dict[str, Any]) -> bool:
"""设置当前论文上下文
Args:
paper_id: 论文ID
paper_data: 论文数据字典
Returns:
bool: 成功返回True,失败返回False
"""
try:
self.current_paper_id = paper_id
self.current_paper_data = paper_data
self.logger.info(f"已设置论文上下文: {paper_id}")
return True
except Exception as e:
self.logger.error(f"设置论文上下文失败: {str(e)}")
return False
def process_query_stream(self, query: str, visible_content: str = None) -> Generator[Tuple[str, str, Dict], None, None]:
"""流式处理用户查询并生成回答,按句子返回
Args:
query: 用户查询文本
visible_content: 当前可见的页面内容
Yields:
Tuple[str, str, Dict]: (生成的句子, 情绪, 滚动定位信息)
Returns:
Generator: 句子生成器
"""
try:
if not self.llm_client:
yield "AI服务尚未初始化,请稍后再试。", None, None
return
print(f"\n==== 用户查询 ====\n{query}")
# 1. 检查是否需要添加用户问题到对话历史
should_add_query = True
if self.conversation_history and len(self.conversation_history) > 0:
last_message = self.conversation_history[-1]
if last_message["role"] == "user" and last_message["content"] == query:
# 问题已存在于历史记录的最后一条,不需要重复添加
should_add_query = False
self.logger.info("检测到重复问题,跳过添加到历史记录")
# 只有在需要时才添加问题到对话历史
if should_add_query:
self.conversation_history.append({"role": "user", "content": query})
# 保持对话历史在合理长度
if len(self.conversation_history) > 10:
self.conversation_history = self.conversation_history[-10:]
# 2. 决策过程 - 调用LLM进行决策
decision = self._make_decision(query)
self.logger.info(f"决策结果: {decision}")
print(f"\n==== 决策结果 ====\n{json.dumps(decision, ensure_ascii=False, indent=2)}")
# 3. 根据决策选择策略
emotion = decision.get('emotion', 'neutral')
function_name = decision.get('function', 'direct_answer')
optimized_query = decision.get('query', query) # 获取优化后的查询
# 4. 根据策略执行不同的处理
context_info = ""
scroll_info = None # 初始化滚动信息
if function_name == 'direct_answer':
# 直接回答,不需要额外信息
print("\n==== 直接回答模式 ====\n无需检索上下文")
pass
elif function_name == 'page_content_analysis':
# 分析当前页面内容
if visible_content:
context_info = f"以下是页面当前显示的内容:\n\n{visible_content}"
print(f"\n==== 页面内容分析 ====\n{context_info}")
elif function_name == 'macro_retrieval':
# 宏观检索 - 获取章节概要
if self.current_paper_data:
context_info = self._get_macro_context(optimized_query) # 使用优化查询
elif function_name == 'rag_retrieval':
# RAG检索 - 获取相关段落
if self.current_paper_id:
context_info, scroll_info = self._get_rag_context(optimized_query) # 使用优化查询
else:
# 未知策略,使用直接回答
self.logger.warning(f"未知的回答策略: {function_name},使用直接回答")
# 5. 准备最终查询消息,传递原始查询、优化查询和回答策略
final_messages = self._prepare_final_messages(
query=query,
context_info=context_info,
emotion=emotion,
optimized_query=optimized_query, # 传递优化后的查询
function_name=function_name # 传递回答策略
)
print(f"\n==== 最终发送给LLM的消息 ====")
for i, msg in enumerate(final_messages):
print(f"消息 {i+1} - 角色: {msg['role']}")
print(f"内容: {msg['content']}\n")
# 6. 调用LLM获取流式回答
response_generator = self.llm_client.chat_stream_by_sentence(
messages=final_messages,
temperature=0.7
)
# 7. 收集完整响应以添加到历史记录
full_response = ""
# 8. 流式返回结果,第一个句子附带滚动信息
first_sentence = True
for sentence in response_generator:
full_response += sentence
if first_sentence:
yield sentence, emotion, scroll_info # 添加情绪参数
first_sentence = False
else:
yield sentence, emotion, None # 添加情绪参数
# 9. 记录AI回答到对话历史
self.conversation_history.append({"role": "assistant", "content": full_response})
print(f"\n==== LLM完整响应 ====\n{full_response}")
except Exception as e:
error_msg = f"流式处理查询失败: {str(e)}"
self.logger.error(error_msg)
yield f"抱歉,处理您的问题时出现错误: {str(e)}", None, None
def record_assistant_response(self, response):
"""记录AI助手的回应到对话历史
Args:
response: AI生成的回答
"""
# 记录AI回答到对话历史
self.conversation_history.append({"role": "assistant", "content": response})
def _validate_decision(self, decision_data: Dict[str, str]) -> bool:
"""验证决策结果是否符合要求
Args:
decision_data: 决策数据字典
Returns:
bool: 验证通过返回True,否则返回False
"""
# 检查必要字段
required_fields = ["emotion", "function", "query"]
if not all(field in decision_data for field in required_fields):
self.logger.warning("决策数据缺少必要字段")
return False
# 确保emotion在有效范围内
valid_emotions = ["happy", "sad", "angry", "fearful", "disgusted", "surprised", "neutral"]
if decision_data["emotion"] not in valid_emotions:
self.logger.warning(f"无效的情绪类型: {decision_data['emotion']}")
return False
# 确保function在有效范围内
valid_functions = ["direct_answer", "page_content_analysis", "macro_retrieval", "rag_retrieval"]
if decision_data["function"] not in valid_functions:
self.logger.warning(f"无效的功能类型: {decision_data['function']}")
return False
return True
def _make_decision(self, query: str) -> Dict[str, str]:
"""决定如何回答用户的问题
Args:
query: 用户查询
Returns:
Dict[str, str]: 包含emotion, function, query的决策字典
"""
# 默认决策结果
default_decision = {
"emotion": "neutral",
"function": "direct_answer",
"query": query # 默认使用原始查询
}
try:
# 1. 读取并准备决策提示词
router_prompt = self._read_file(AI_ROUTER_PROMPT_PATH)
# 确定当前论文状态
has_paper_loaded = self.current_paper_id is not None and self.current_paper_data is not None
paper_status = "有论文加载" if has_paper_loaded else "无论文加载"
# 获取当前论文标题(如果有)
paper_title = "无论文"
if has_paper_loaded:
paper_title = self.current_paper_data.get('translated_title', '') or self.current_paper_data.get('title', '')
paper_title = f"当前论文标题: {paper_title}"
# 准备对话历史格式 - 不包括最新的用户查询
formatted_history = ""
if len(self.conversation_history) > 1: # 确保有足够的历史记录
# 只取最近的历史记录(不包括最新的用户查询)
recent_history = self.conversation_history[:-1][-4:] # 最多取4条历史记录(不包括最新的)
history_items = []
for msg in recent_history:
role = "用户" if msg["role"] == "user" else "暴躁教授"
content = msg["content"]
history_items.append(f"{role}: {content}")
formatted_history = "\n".join(history_items)
# 将论文状态、论文标题和对话历史添加到提示中
decision_prompt = router_prompt.format(
query=query,
paper_status=paper_status,
paper_title=paper_title,
conversation_history=formatted_history
)
print(f"\n==== 决策提示 ====\n{decision_prompt}")
# 2. 准备调用LLM的消息
messages = [{"role": "user", "content": decision_prompt}]
# 3. 最多尝试两次
import re
decision_data = None
for attempt in range(2):
self.logger.info(f"决策请求尝试 {attempt+1}/2")
# 调用LLM进行决策
decision_response = self.llm_client.chat(
messages=messages,
temperature=0.7,
stream=False
)
print(f"\n==== 决策LLM响应 (尝试 {attempt+1}) ====\n{decision_response}")
# 使用正则表达式匹配JSON结构
json_match = re.search(r'\{.*\}', decision_response, re.DOTALL)
if not json_match:
self.logger.warning("无法从响应中提取JSON,将重试")
continue
try:
# 解析提取的JSON
decision_data = json.loads(json_match.group(0))
# 验证决策数据
if self._validate_decision(decision_data):
# 验证通过,跳出循环
break
else:
self.logger.warning("决策验证失败,将重试")
except json.JSONDecodeError:
self.logger.warning("JSON解析失败,将重试")
# 4. 如果无论文加载,强制使用direct_answer
if not has_paper_loaded and decision_data and self._validate_decision(decision_data):
decision_data["function"] = "direct_answer"
self.logger.info("无论文加载,强制使用direct_answer策略")
# 5. 返回决策结果:如果decision_data有效则使用它,否则使用默认值
if decision_data and self._validate_decision(decision_data):
return {
"emotion": decision_data["emotion"],
"function": decision_data["function"],
"query": decision_data["query"]
}
else:
self.logger.warning("所有决策尝试均失败,使用默认决策")
return default_decision
except Exception as e:
self.logger.error(f"决策过程失败: {str(e)}")
return default_decision
def _get_macro_context(self, query: str) -> str:
"""获取宏观上下文 - 从章节概要中提取
提取内容:
- 论文总标题(翻译或原始)
- 论文总摘要(如果存在)
- 第一级章节的标题和摘要(不递归处理子章节)
Args:
query: 检索查询
Returns:
str: 宏观上下文信息
"""
try:
if not self.current_paper_data:
return ""
# 提取章节标题和摘要
context_parts = []
# 添加文档标题
doc_title = self.current_paper_data.get('translated_title', '') or self.current_paper_data.get('title', '')
if doc_title:
context_parts.append(f"# {doc_title}")
# 添加论文总摘要(如果存在)
if 'summary' in self.current_paper_data and self.current_paper_data['summary']:
context_parts.append(f"## 总摘要\n{self.current_paper_data['summary']}")
# 添加第一级章节标题和摘要(不递归)
if 'sections' in self.current_paper_data and self.current_paper_data['sections']:
context_parts.append("## 章节概要")
for section in self.current_paper_data['sections']:
# 提取章节标题(优先使用翻译标题)
section_title = section.get('translated_title', '') or section.get('title', '')
# 提取章节摘要
section_summary = section.get('summary', '')
if section_title:
# 添加章节标题和摘要
section_text = f"### {section_title}"
if section_summary:
section_text += f"\n{section_summary}"
context_parts.append(section_text)
# 组合所有上下文
if context_parts:
context_result = "\n\n".join(context_parts)
print(f"\n==== 宏观检索结果 ====\n{context_result}")
return context_result
else:
print("\n==== 宏观检索结果为空 ====")
return ""
except Exception as e:
self.logger.error(f"获取宏观上下文失败: {str(e)}")
return ""
def _get_rag_context(self, query: str) -> Tuple[str, Dict]:
"""从RAG检索器获取相关上下文和滚动定位信息"""
try:
if not self.current_paper_id or not query:
return "", None
print(f"\n==== RAG检索查询 ====\n{query}")
# 添加检查 - 确保检索器存在且已加载完成
if not self.retriever:
self.logger.warning("RAG检索器未初始化,无法执行检索")
return "", None
# 检查检索器是否就绪
if not self.retriever.is_ready():
self.logger.warning("RAG检索器尚未加载完成,无法执行检索")
return "", None
# 使用RAG检索器获取结构化相关内容和滚动信息
context, scroll_info = self.retriever.retrieve_with_context(
query=query,
paper_id=self.current_paper_id,
top_k=5
)
print(f"\n==== RAG检索结果 ====\n{context}")
return context, scroll_info
except Exception as e:
self.logger.error(f"RAG检索失败: {str(e)}")
return "", None
def _prepare_final_messages(self, query: str, context_info: str, emotion: str, optimized_query: str = None, function_name: str = None) -> List[Dict[str, str]]:
"""准备最终发送给LLM的消息列表
Args:
query: 原始用户查询
context_info: 上下文信息
emotion: 情绪类型
optimized_query: 优化后的查询
function_name: 回答策略
Returns:
List[Dict[str, str]]: 消息列表
"""
messages = []
# 读取角色提示词和解释提示词
character_prompt = self._read_file(AI_CHARACTER_PROMPT_PATH)
explain_prompt = self._read_file(AI_EXPLAIN_PROMPT_PATH)
# 添加论文标题到系统提示(如果有)
title = ""
if self.current_paper_data:
title = self.current_paper_data.get('translated_title', '') or self.current_paper_data.get('title', '')
else:
title = "无论文"
explain_prompt = explain_prompt.format(title=title)
# 系统提示 - 使用回车拼接提示词
system_message = f"{character_prompt}\n{explain_prompt}"
messages.append({"role": "system", "content": system_message})
# 添加对话历史(不包括最新的用户查询)
if len(self.conversation_history) > 1:
messages.extend(self.conversation_history[:-1])
# 构建用户查询 - 包含原始查询和优化查询
final_query = f"当前用户消息:{query}\n\n你的回答情绪应该是:{emotion}"
# 如果有上下文信息,根据function_name添加对应的信息类型说明
if context_info:
context_type = "参考信息"
if function_name == "page_content_analysis":
context_type = "当前页面内容"
elif function_name == "macro_retrieval":
context_type = "论文概要"
elif function_name == "rag_retrieval":
context_type = "相关论文段落"
final_query = f"{final_query}\n\n{context_type}:\n{context_info}"
final_query += f"{final_query}\n\n输出回复的话:"
# 添加最终用户查询
messages.append({"role": "user", "content": final_query})
return messages
```
## /LICENSE
``` path="/LICENSE"
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [Unitec Media Company Limited]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
## /README.md
# 暴躁的教授读论文(mad-professor)
一个Python应用程序,特色是具有暴躁个性的AI教授,让学术论文阅读更加高效有趣。
## 项目概述
"暴躁教授读论文"是一个学术论文阅读伴侣应用程序,旨在通过富有个性的AI助手提高论文阅读效率。它集成了PDF处理、AI翻译、RAG检索、AI问答和语音交互等多种功能,为学术研究者提供一站式的论文阅读解决方案。

## 主要特性
- **论文自动处理**:导入PDF后自动提取、翻译和结构化论文内容
- **双语显示**:支持中英文对照阅读论文
- **AI智能问答**:与论文内容结合,提供专业的解释和分析
- **个性化AI教授**:AI以"暴躁教授"的个性回答问题,增加趣味性
- **语音交互**:支持语音提问和TTS语音回答
- **RAG增强检索**:基于论文内容的精准检索和定位
- **分屏界面**:左侧论文内容,右侧AI问答,高效交互
## 技术架构
- **前端界面**:PyQt6构建的现代化桌面应用
- **核心引擎**:
- AI问答模块:基于LLM的学术问答系统
- RAG检索系统:向量检索增强的问答精准度
- 论文处理管线:PDF转MD、自动翻译、结构化解析
- **交互系统**:
- 语音识别:实时语音输入识别
- TTS语音合成:AI回答实时播报
- 情感识别:根据问题内容调整回答情绪
## 安装指南
### 环境要求
- Python 3.10或更高版本
- CUDA支持
- 6GB 以上显存
### 项目依赖
本项目依赖以下开源项目
- MinerU https://github.com/opendatalab/MinerU
- RealtimeSTT https://github.com/KoljaB/RealtimeSTT
本项目依赖以下在线API服务(可以通过修改代码改为本地实现)
- DeepSeek https://api-docs.deepseek.com
- MiniMax https://platform.minimaxi.com/document/Voice%20Cloning?key=66719032a427f0c8a570165b
### 安装步骤
1. 使用conda创建环境
```
conda create -n mad-professor python=3.10.16
conda activate mad-professor
```
2. 安装MinerU依赖
```
pip install -U magic-pdf[full]==1.3.3 -i https://mirrors.aliyun.com/pypi/simple
```
3. 安装剩余依赖
```
pip install -r requirements.txt
```
4. 安装电脑显卡版本匹配的CUDA和torch, 要求numpy<=2.1.1,例(具体版本请按电脑配置修改,目前支持CUDA 11.8/12.4/12.6):
```
pip install --force-reinstall torch torchvision torchaudio "numpy<=2.1.1" --index-url https://download.pytorch.org/whl/cu124
```
如果出现报错,请根据MinerU和RealtimeSTT开源项目中的CUDA依赖修改符合的torch和torchaudio版本
5. 安装FAISS的gpu版本 (注:faiss-gpu版本只能通过conda安装,无法通过pip安装)
```
conda install -c conda-forge faiss-gpu
```
6. 模型下载
```
python download_models.py
```
python脚本会自动下载模型文件并配置好配置文件中的模型目录,配置文件可以在用户目录中找到,文件名为magic-pdf.json
windows的【用户目录】为 "C:\Users\用户名", linux【用户目录】为 "/home/用户名"
修改【用户目录】配置文件magic-pdf.json中"device-mode"的值来启用CUDA
```
{
"device-mode":"cuda"
}
```
语音输入的Whisper模型会在运行时自动下载
7. API密钥配置
项目依赖LLM和TTS在线API服务
通过修改`config.py`中的对应字段配置请求路径和密钥
```
API_BASE_URL = "YOUR_API_URL"
API_KEY = "YOUR_API_KEY"
```
按照DeepSeek官方文档配置 https://api-docs.deepseek.com
```
TTS_GROUP_ID = "YOUR_MINIMAX_GROUP_ID"
TTS_API_KEY = "YOUR_MINIMAX_API_KEY"
```
按照MiniMax官方文档配置 https://platform.minimaxi.com/document/Voice%20Cloning?key=66719032a427f0c8a570165b
## 使用说明
### 教授人设/声音修改
目前人设和声音的修改只能通过手动修改代码实现
1. 人设prompt修改
在`prompt`文件夹中创建一个新的`ai_character_prompt_[你的人设名字].txt`
将`AI_professor_chat.py`程序开头`AI_CHARACTER_PROMPT_PATH`字段修改为相应的人设prompt路径
```
AI_CHARACTER_PROMPT_PATH = "prompt/ai_character_prompt_[你的人设名字].txt"
```
当前已有两个人设`ai_character_prompt_keli.txt`和`ai_character_prompt_leidian.txt`,可以作为示例
2. 声音修改
按照MiniMax官方文档新建voice id,或使用现有voice id。官方文档:https://platform.minimaxi.com/document/Voice%20Cloning?key=66719032a427f0c8a570165b
修改`TTS_manager.py`程序` TTSManager`类中`build_tts_stream_body`请求方法对应的voice_id参数
```
body = json.dumps({
"model": "speech-02-turbo",
"text": text,
"stream": True,
"voice_setting": {
"voice_id": "将这个参数修改为你想要使用的voice id",
"speed": 1,
"vol": 1,
"pitch": 0,
"emotion": mapped_emotion
},
"audio_setting": {
"sample_rate": 32000,
"bitrate": 128000,
"format": "pcm",
"channel": 1
}
})
```
### 启动应用
运行`main.py`
python main.py
### 导入论文
1. 点击侧边栏的"导入论文"按钮
2. 选择PDF文件导入
3. 点击“继续”,等待处理完成(包括翻译和索引构建)
4. 导入的PDF会存放到data文件夹中,也可以将多篇PDF放入data文件夹,程序会检测未处理的文件批量处理

### 论文阅读
1. 在侧边栏选择已经处理好的论文

2. 在主窗口查看论文内容,右上角可切换中英文

3. 左右侧可折叠隐藏,提供沉浸式阅读体验

### AI问答与语音对话
1. 在对话窗口下方选择语音输入设备

2. 点击麦克风按钮,等指示灯变绿时开始对话
3. 如果说话时指示灯没有变黄,可能说明输入设备无法检测到人声,建议切换其他输入设备进行尝试
## 项目结构
```
mad-professor/
├── 核心模块
│ ├── AI_manager.py # AI功能管理器,整合所有AI相关功能
│ ├── AI_professor_chat.py # AI对话逻辑,实现暴躁教授的交互回答
│ ├── AI_professor_UI.py # 主界面实现,应用程序的UI入口
│ ├── data_manager.py # 数据管理器,处理论文索引和内容加载
│ ├── pipeline.py # 处理管线,协调各处理器的工作流程
│ ├── rag_retriever.py # RAG检索系统,实现向量检索和上下文提取
│ ├── TTS_manager.py # TTS管理器,处理语音合成和播放
│ ├── voice_input.py # 语音输入处理,实时语音识别
│ └── threads.py # 线程管理,处理异步任务和并发
│
├── 用户界面组件 (ui/)
│ ├── chat_widget.py # 聊天界面组件
│ ├── markdown_view.py # Markdown渲染和显示组件
│ ├── message_bubble.py # 消息气泡组件
│ ├── sidebar_widget.py # 侧边栏组件(论文列表和上传)
│ └── upload_widget.py # 文件上传组件
│
├── 处理器模块 (processor/)
│ ├── pdf_processor.py # PDF处理器,提取PDF内容转为Markdown
│ ├── md_processor.py # Markdown处理器,结构化解析Markdown
│ ├── json_processor.py # JSON处理器,处理结构化数据
│ ├── tiling_processor.py # 分块处理器,将内容分割为块
│ ├── translate_processor.py # 翻译处理器,中英文翻译
│ ├── md_restore_processor.py # Markdown还原处理器
│ ├── extra_info_processor.py # 额外信息处理器,生成摘要和问题
│ └── rag_processor.py # RAG处理器,生成向量库和检索树
│
├── 提示词模板 (prompt/)
│ ├── ai_character_prompt_keli.txt # 可莉教授人设提示词
│ ├── ai_character_prompt_leidian.txt # 雷电教授人设提示词
│ ├── ai_explain_prompt.txt # 解释功能提示词
│ ├── ai_router_prompt.txt # 路由决策提示词
│ ├── content_translate_prompt.txt # 内容翻译提示词
│ ├── formula_analysis_prompt.txt # 公式分析提示词
│ └── summary_generation_prompt.txt # 摘要生成提示词
│
├── 资源和配置
│ ├── config.py # 配置文件,API密钥和模型设置
│ ├── paths.py # 路径管理,统一管理文件路径
│ ├── main.py # 程序入口文件
│ ├── download_models.py # 模型下载脚本
│ ├── assets/ # 资源文件目录(图片、样式等)
│ └── font/ # 字体文件目录
│
└── 数据目录
├── data/ # 源数据目录(论文PDF)
└── output/ # 输出目录(处理结果)
```
## 已知问题
1. 本项目目前仅适用论文结构的PDF文档,对于非论文结构的文档可能报错/失效
2. 在音频输入设备未完成加载时激活麦克风按钮,再进行输入设备切换,可能会切换失败,激活麦克风按钮建议在音频设备完全加载后进行
3. 当前语音对话在外放时,AI教授的声音可能会被当做用户声音重复录入,建议使用耳机避免声音泄露
## 许可证
本项目采用 Apache 许可证 - 详情见 LICENSE 文件
## 致谢
特别感谢 MinerU 和 RealtimeSTT 项目
## /TTS_manager.py
```py path="/TTS_manager.py"
import json
import queue
import pyaudio
import requests
from PyQt6.QtCore import QThread, QObject, pyqtSignal, QMutex, QTimer
from config import TTS_GROUP_ID, TTS_API_KEY
url = "https://api.minimax.chat/v1/t2a_v2?GroupId=" + TTS_GROUP_ID
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + TTS_API_KEY}
class TTSThread(QThread):
"""TTS播放线程,负责播放音频数据"""
# 添加实际播放开始的信号
audio_playback_started = pyqtSignal(bytes, object) # 音频数据和附加信息
def __init__(self, audio_config):
super().__init__()
self.audio_config = audio_config
self.audio_queue = queue.Queue()
self.is_running = True
self.mutex = QMutex()
self.full_audio = b""
def run(self):
"""线程主函数,负责播放队列中的音频数据"""
p = pyaudio.PyAudio()
stream = p.open(
format=self.audio_config['format'],
channels=self.audio_config['channels'],
rate=self.audio_config['rate'],
output=True
)
try:
while self.is_running:
try:
# 从队列获取音频数据和元数据
data = self.audio_queue.get(timeout=0.1)
if not data:
continue
# 解包数据和元数据
if isinstance(data, tuple) and len(data) == 2:
audio_data, metadata = data
else:
audio_data, metadata = data, None
# 发出实际播放开始信号
if audio_data:
self.audio_playback_started.emit(audio_data, metadata)
# 播放音频数据
stream.write(audio_data)
except queue.Empty:
continue
except Exception as e:
print(f"[播放错误] {str(e)}")
finally:
stream.stop_stream()
stream.close()
p.terminate()
def stop(self):
"""停止线程"""
self.is_running = False
self.wait()
# 修改add_audio方法,支持元数据
def add_audio(self, audio_data, metadata=None):
"""添加音频数据到队列"""
self.audio_queue.put((audio_data, metadata))
self.full_audio += audio_data
def clear_queue(self):
"""清空音频队列"""
try:
while not self.audio_queue.empty():
self.audio_queue.get_nowait()
except queue.Empty:
pass
def is_queue_empty(self):
"""检查队列是否为空"""
return self.audio_queue.empty()
def cancel_request_id(self, request_id):
"""取消特定请求ID的所有待播放音频"""
if not request_id:
return
# 创建新队列并过滤数据
new_queue = queue.Queue()
cancelled_count = 0
# 加锁防止并发问题
self.mutex.lock()
try:
# 逐个检查队列中的项目
while not self.audio_queue.empty():
try:
item = self.audio_queue.get_nowait()
if not item:
continue
# 检查元数据中的请求ID
if isinstance(item, tuple) and len(item) == 2:
audio_data, metadata = item
# 如果元数据是元组且包含请求ID
if isinstance(metadata, tuple) and len(metadata) >= 2 and metadata[1] == request_id:
cancelled_count += 1
continue
# 保留不匹配的项目
new_queue.put(item)
except queue.Empty:
break
# 替换原队列
self.audio_queue = new_queue
finally:
self.mutex.unlock()
if cancelled_count > 0:
print(f"已从播放队列中移除 {cancelled_count} 条过时音频")
class TTSManager(QObject):
# 修改信号:添加request_id参数
tts_playback_started = pyqtSignal(str, str) # (text, request_id)
# 添加新信号:实际播放开始信号
tts_audio_playback_started = pyqtSignal(str, str) # (text, request_id)
def __init__(self):
super().__init__()
# 音频配置 - 与API请求中的配置保持一致
self.audio_config = {
'channels': 1,
'rate': 32000, # 与API的sample_rate一致
'format': pyaudio.paInt16 # 16位PCM
}
# 创建播放线程
self.player_thread = TTSThread(self.audio_config)
self.player_thread.start()
# 连接实际播放开始信号
self.player_thread.audio_playback_started.connect(self._on_audio_playback_started)
# 当前请求是否正在进行
self.is_requesting = False
# 修改请求队列结构,包含文本和请求ID
self.request_queue = [] # [(text, request_id, emotion), ...]
self.is_processing = False
# 当前正在处理的请求ID
self.current_processing_id = None
def is_queue_empty(self) -> bool:
"""
检查是否还有音频在队列中等待播放
Returns:
bool: True 表示队列为空(没有音频在播放或等待),False 表示队列非空
"""
return self.player_thread.is_queue_empty() and len(self.request_queue) == 0
def build_tts_stream_headers(self) -> dict:
"""构建请求头"""
headers = {
'accept': 'application/json, text/plain, */*',
'content-type': 'application/json',
'authorization': "Bearer " + TTS_API_KEY,
}
return headers
def build_tts_stream_body(self, text: str, emotion: str = "neutral") -> dict:
"""构建请求体"""
# 映射简化的情绪到minimax支持的情绪
emotion_mapping = {
"happy": "happy",
"sad": "sad",
"angry": "angry",
"fearful": "fearful",
"disgusted": "disgusted",
"surprised": "surprised",
"neutral": "neutral"
}
# 获取映射后的情绪,如果没有则使用neutral作为默认值
mapped_emotion = emotion_mapping.get(emotion, "neutral")
body = json.dumps({
"model": "speech-02-turbo",
"text": text,
"stream": True,
"voice_setting": {
"voice_id": "leidianjiangjun",
"speed": 1,
"vol": 1,
"pitch": 0,
"emotion": mapped_emotion
},
"audio_setting": {
"sample_rate": 32000,
"bitrate": 128000,
"format": "pcm",
"channel": 1
}
})
return body
def request_tts(self, text: str, request_id: str = None, emotion: str = "neutral"):
"""
发起TTS请求
Args:
text: 要转换的文本
request_id: 请求标识符,用于跟踪特定请求的TTS
emotion: 情绪类型,用于调整语音风格
"""
if not text or not text.strip():
return
# 如果没有提供请求ID,生成一个特殊标记
if request_id is None:
request_id = "default_request"
# 将请求添加到队列,包含文本、请求ID和情绪
self.request_queue.append((text, request_id, emotion))
print(f"已添加TTS请求到队列: '{text[:20]}...' (请求ID: {request_id}, 情绪: {emotion})")
# 如果当前没有处理中的请求,开始处理
if not self.is_processing:
self._process_next_request()
def _on_audio_playback_started(self, audio_data, metadata):
"""处理音频实际开始播放事件"""
if metadata and isinstance(metadata, tuple) and len(metadata) == 2:
text, request_id = metadata
# 发送实际播放开始信号
self.tts_audio_playback_started.emit(text, request_id)
def _process_next_request(self):
"""处理队列中的下一个TTS请求"""
if not self.request_queue:
self.is_processing = False
self.current_processing_id = None
return
# 设置处理标志
self.is_processing = True
# 解包请求数据
if len(self.request_queue[0]) == 3:
text, request_id, emotion = self.request_queue.pop(0)
else:
# 兼容旧格式
text, request_id = self.request_queue.pop(0)
emotion = "neutral"
self.current_processing_id = request_id
print(f"开始处理TTS请求: '{text[:20]}...' (请求ID: {request_id}, 情绪: {emotion})")
# 处理TTS请求
tts_headers = self.build_tts_stream_headers()
tts_body = self.build_tts_stream_body(text, emotion) # 传递情绪参数
try:
response = requests.request("POST", url, stream=True, headers=tts_headers, data=tts_body)
# 即时处理所有音频块
audio_chunks = []
for chunk in response.raw:
if chunk and chunk[:5] == b'data:':
data = json.loads(chunk[5:])
if "data" in data and "extra_info" not in data:
if "audio" in data["data"]:
audio_hex = data["data"]['audio']
if audio_hex and audio_hex != '\n':
audio_data = bytes.fromhex(audio_hex)
audio_chunks.append(audio_data)
# 合并所有音频数据
full_chunk = b"".join(audio_chunks)
# 添加带元数据的音频到播放队列
self.player_thread.add_audio(full_chunk, (text, request_id))
# 发送队列添加信号(保持原有行为,可以在UI中用于显示进度或状态)
self.tts_playback_started.emit(text, request_id)
# 处理下一个请求
QTimer.singleShot(100, self._process_next_request)
except Exception as e:
print(f"[TTS请求错误] {str(e)}")
self.is_processing = False
self.current_processing_id = None
# 出错时也继续处理队列
QTimer.singleShot(500, self._process_next_request)
def stop_playing(self):
"""停止当前播放并清空队列"""
self.request_queue = []
self.is_processing = False
self.current_processing_id = None
self.player_thread.clear_queue()
print("已停止所有TTS播放和请求")
def cancel_request_id(self, request_id: str):
"""
取消特定请求ID的所有TTS请求
"""
print(f"取消请求ID为 {request_id} 的所有TTS请求")
# 过滤掉队列中指定请求ID的项目
self.request_queue = [(text, rid, emotion) for text, rid, emotion in self.request_queue if rid != request_id]
# 如果当前正在处理的请求是要取消的请求,则停止处理
if self.current_processing_id == request_id:
self.is_processing = False
self.current_processing_id = None
# 清理播放队列中的过时音频 - 增加这一行
self.player_thread.cancel_request_id(request_id)
# 如果还有其他请求,则开始处理
if not self.is_processing and self.request_queue:
QTimer.singleShot(100, self._process_next_request)
def stop(self):
"""停止播放并清理资源"""
self.player_thread.stop()
def get_audio(self) -> bytes:
"""获取收集的完整音频数据"""
return self.player_thread.full_audio
```
## /assets/
Binary file available at https://raw.githubusercontent.com/LYiHub/mad-professor-public/refs/heads/main/assets/
## /assets/ai_avatar.svg
```svg path="/assets/ai_avatar.svg"
```
## /assets/down_arrow.svg
```svg path="/assets/down_arrow.svg"
```
## /assets/fold_page.png
Binary file available at https://raw.githubusercontent.com/LYiHub/mad-professor-public/refs/heads/main/assets/fold_page.png
## /assets/katex/
Binary file available at https://raw.githubusercontent.com/LYiHub/mad-professor-public/refs/heads/main/assets/katex/
## /assets/katex/README.md
[](https://www.npmjs.com/package/katex)
[](https://github.com/semantic-release/semantic-release)
[](https://github.com/KaTeX/KaTeX/actions?query=workflow%3ACI)
[](https://codecov.io/gh/KaTeX/KaTeX)
[](https://github.com/KaTeX/KaTeX/discussions)
[](https://www.jsdelivr.com/package/npm/katex)

[](https://gitpod.io/#https://github.com/KaTeX/KaTeX)
[](https://opencollective.com/katex)
KaTeX is a fast, easy-to-use JavaScript library for TeX math rendering on the web.
* **Fast:** KaTeX renders its math synchronously and doesn't need to reflow the page. See how it compares to a competitor in [this speed test](https://www.intmath.com/cg5/katex-mathjax-comparison.php).
* **Print quality:** KaTeX's layout is based on Donald Knuth's TeX, the gold standard for math typesetting.
* **Self contained:** KaTeX has no dependencies and can easily be bundled with your website resources.
* **Server side rendering:** KaTeX produces the same output regardless of browser or environment, so you can pre-render expressions using Node.js and send them as plain HTML.
KaTeX is compatible with all major browsers, including Chrome, Safari, Firefox, Opera, Edge, and IE 11.
KaTeX supports much (but not all) of LaTeX and many LaTeX packages. See the [list of supported functions](https://katex.org/docs/supported.html).
Try out KaTeX [on the demo page](https://katex.org/#demo)!
## Getting started
### Starter template
```html
...
```
You can also [download KaTeX](https://github.com/KaTeX/KaTeX/releases) and host it yourself.
For details on how to configure auto-render extension, refer to [the documentation](https://katex.org/docs/autorender.html).
### API
Call `katex.render` to render a TeX expression directly into a DOM element.
For example:
```js
katex.render("c = \\pm\\sqrt{a^2 + b^2}", element, {
throwOnError: false
});
```
Call `katex.renderToString` to generate an HTML string of the rendered math,
e.g., for server-side rendering. For example:
```js
var html = katex.renderToString("c = \\pm\\sqrt{a^2 + b^2}", {
throwOnError: false
});
// '...'
```
Make sure to include the CSS and font files in both cases.
If you are doing all rendering on the server, there is no need to include the
JavaScript on the client.
The examples above use the `throwOnError: false` option, which renders invalid
inputs as the TeX source code in red (by default), with the error message as
hover text. For other available options, see the
[API documentation](https://katex.org/docs/api.html),
[options documentation](https://katex.org/docs/options.html), and
[handling errors documentation](https://katex.org/docs/error.html).
## Demo and Documentation
Learn more about using KaTeX [on the website](https://katex.org)!
## Contributors
### Code Contributors
This project exists thanks to all the people who contribute code. If you'd like to help, see [our guide to contributing code](CONTRIBUTING.md).
### Financial Contributors
Become a financial contributor and help us sustain our community.
#### Individuals
#### Organizations
Support this project with your organization. Your logo will show up here with a link to your website.
## License
KaTeX is licensed under the [MIT License](https://opensource.org/licenses/MIT).
## /assets/katex/contrib/
Binary file available at https://raw.githubusercontent.com/LYiHub/mad-professor-public/refs/heads/main/assets/katex/contrib/
## /assets/katex/contrib/auto-render.js
```js path="/assets/katex/contrib/auto-render.js"
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory(require("katex"));
else if(typeof define === 'function' && define.amd)
define(["katex"], factory);
else if(typeof exports === 'object')
exports["renderMathInElement"] = factory(require("katex"));
else
root["renderMathInElement"] = factory(root["katex"]);
})((typeof self !== 'undefined' ? self : this), function(__WEBPACK_EXTERNAL_MODULE__757__) {
return /******/ (function() { // webpackBootstrap
/******/ "use strict";
/******/ var __webpack_modules__ = ({
/***/ 757:
/***/ (function(module) {
module.exports = __WEBPACK_EXTERNAL_MODULE__757__;
/***/ })
/******/ });
/************************************************************************/
/******/ // The module cache
/******/ var __webpack_module_cache__ = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/ // Check if module is in cache
/******/ var cachedModule = __webpack_module_cache__[moduleId];
/******/ if (cachedModule !== undefined) {
/******/ return cachedModule.exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = __webpack_module_cache__[moduleId] = {
/******/ // no module.id needed
/******/ // no module.loaded needed
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/************************************************************************/
/******/ /* webpack/runtime/compat get default export */
/******/ !function() {
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function() { return module['default']; } :
/******/ function() { return module; };
/******/ __webpack_require__.d(getter, { a: getter });
/******/ return getter;
/******/ };
/******/ }();
/******/
/******/ /* webpack/runtime/define property getters */
/******/ !function() {
/******/ // define getter functions for harmony exports
/******/ __webpack_require__.d = function(exports, definition) {
/******/ for(var key in definition) {
/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
/******/ }
/******/ }
/******/ };
/******/ }();
/******/
/******/ /* webpack/runtime/hasOwnProperty shorthand */
/******/ !function() {
/******/ __webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }
/******/ }();
/******/
/************************************************************************/
var __webpack_exports__ = {};
// EXPORTS
__webpack_require__.d(__webpack_exports__, {
"default": function() { return /* binding */ auto_render; }
});
// EXTERNAL MODULE: external "katex"
var external_katex_ = __webpack_require__(757);
var external_katex_default = /*#__PURE__*/__webpack_require__.n(external_katex_);
;// CONCATENATED MODULE: ./contrib/auto-render/splitAtDelimiters.js
/* eslint no-constant-condition:0 */
const findEndOfMath = function (delimiter, text, startIndex) {
// Adapted from
// https://github.com/Khan/perseus/blob/master/src/perseus-markdown.jsx
let index = startIndex;
let braceLevel = 0;
const delimLength = delimiter.length;
while (index < text.length) {
const character = text[index];
if (braceLevel <= 0 && text.slice(index, index + delimLength) === delimiter) {
return index;
} else if (character === "\\") {
index++;
} else if (character === "{") {
braceLevel++;
} else if (character === "}") {
braceLevel--;
}
index++;
}
return -1;
};
const escapeRegex = function (string) {
return string.replace(/[-/\\^$*+?.()|[\]{}]/g, "\\$&");
};
const amsRegex = /^\\begin{/;
const splitAtDelimiters = function (text, delimiters) {
let index;
const data = [];
const regexLeft = new RegExp("(" + delimiters.map(x => escapeRegex(x.left)).join("|") + ")");
while (true) {
index = text.search(regexLeft);
if (index === -1) {
break;
}
if (index > 0) {
data.push({
type: "text",
data: text.slice(0, index)
});
text = text.slice(index); // now text starts with delimiter
} // ... so this always succeeds:
const i = delimiters.findIndex(delim => text.startsWith(delim.left));
index = findEndOfMath(delimiters[i].right, text, delimiters[i].left.length);
if (index === -1) {
break;
}
const rawData = text.slice(0, index + delimiters[i].right.length);
const math = amsRegex.test(rawData) ? rawData : text.slice(delimiters[i].left.length, index);
data.push({
type: "math",
data: math,
rawData,
display: delimiters[i].display
});
text = text.slice(index + delimiters[i].right.length);
}
if (text !== "") {
data.push({
type: "text",
data: text
});
}
return data;
};
/* harmony default export */ var auto_render_splitAtDelimiters = (splitAtDelimiters);
;// CONCATENATED MODULE: ./contrib/auto-render/auto-render.js
/* eslint no-console:0 */
/* Note: optionsCopy is mutated by this method. If it is ever exposed in the
* API, we should copy it before mutating.
*/
const renderMathInText = function (text, optionsCopy) {
const data = auto_render_splitAtDelimiters(text, optionsCopy.delimiters);
if (data.length === 1 && data[0].type === 'text') {
// There is no formula in the text.
// Let's return null which means there is no need to replace
// the current text node with a new one.
return null;
}
const fragment = document.createDocumentFragment();
for (let i = 0; i < data.length; i++) {
if (data[i].type === "text") {
fragment.appendChild(document.createTextNode(data[i].data));
} else {
const span = document.createElement("span");
let math = data[i].data; // Override any display mode defined in the settings with that
// defined by the text itself
optionsCopy.displayMode = data[i].display;
try {
if (optionsCopy.preProcess) {
math = optionsCopy.preProcess(math);
}
external_katex_default().render(math, span, optionsCopy);
} catch (e) {
if (!(e instanceof (external_katex_default()).ParseError)) {
throw e;
}
optionsCopy.errorCallback("KaTeX auto-render: Failed to parse `" + data[i].data + "` with ", e);
fragment.appendChild(document.createTextNode(data[i].rawData));
continue;
}
fragment.appendChild(span);
}
}
return fragment;
};
const renderElem = function (elem, optionsCopy) {
for (let i = 0; i < elem.childNodes.length; i++) {
const childNode = elem.childNodes[i];
if (childNode.nodeType === 3) {
// Text node
// Concatenate all sibling text nodes.
// Webkit browsers split very large text nodes into smaller ones,
// so the delimiters may be split across different nodes.
let textContentConcat = childNode.textContent;
let sibling = childNode.nextSibling;
let nSiblings = 0;
while (sibling && sibling.nodeType === Node.TEXT_NODE) {
textContentConcat += sibling.textContent;
sibling = sibling.nextSibling;
nSiblings++;
}
const frag = renderMathInText(textContentConcat, optionsCopy);
if (frag) {
// Remove extra text nodes
for (let j = 0; j < nSiblings; j++) {
childNode.nextSibling.remove();
}
i += frag.childNodes.length - 1;
elem.replaceChild(frag, childNode);
} else {
// If the concatenated text does not contain math
// the siblings will not either
i += nSiblings;
}
} else if (childNode.nodeType === 1) {
// Element node
const className = ' ' + childNode.className + ' ';
const shouldRender = optionsCopy.ignoredTags.indexOf(childNode.nodeName.toLowerCase()) === -1 && optionsCopy.ignoredClasses.every(x => className.indexOf(' ' + x + ' ') === -1);
if (shouldRender) {
renderElem(childNode, optionsCopy);
}
} // Otherwise, it's something else, and ignore it.
}
};
const renderMathInElement = function (elem, options) {
if (!elem) {
throw new Error("No element provided to render");
}
const optionsCopy = {}; // Object.assign(optionsCopy, option)
for (const option in options) {
if (options.hasOwnProperty(option)) {
optionsCopy[option] = options[option];
}
} // default options
optionsCopy.delimiters = optionsCopy.delimiters || [{
left: "$$",
right: "$$",
display: true
}, {
left: "\\(",
right: "\\)",
display: false
}, // LaTeX uses $…$, but it ruins the display of normal `$` in text:
// {left: "$", right: "$", display: false},
// $ must come after $$
// Render AMS environments even if outside $$…$$ delimiters.
{
left: "\\begin{equation}",
right: "\\end{equation}",
display: true
}, {
left: "\\begin{align}",
right: "\\end{align}",
display: true
}, {
left: "\\begin{alignat}",
right: "\\end{alignat}",
display: true
}, {
left: "\\begin{gather}",
right: "\\end{gather}",
display: true
}, {
left: "\\begin{CD}",
right: "\\end{CD}",
display: true
}, {
left: "\\[",
right: "\\]",
display: true
}];
optionsCopy.ignoredTags = optionsCopy.ignoredTags || ["script", "noscript", "style", "textarea", "pre", "code", "option"];
optionsCopy.ignoredClasses = optionsCopy.ignoredClasses || [];
optionsCopy.errorCallback = optionsCopy.errorCallback || console.error; // Enable sharing of global macros defined via `\gdef` between different
// math elements within a single call to `renderMathInElement`.
optionsCopy.macros = optionsCopy.macros || {};
renderElem(elem, optionsCopy);
};
/* harmony default export */ var auto_render = (renderMathInElement);
__webpack_exports__ = __webpack_exports__["default"];
/******/ return __webpack_exports__;
/******/ })()
;
});
```
## /assets/katex/contrib/auto-render.min.js
```js path="/assets/katex/contrib/auto-render.min.js"
!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t(require("katex")):"function"==typeof define&&define.amd?define(["katex"],t):"object"==typeof exports?exports.renderMathInElement=t(require("katex")):e.renderMathInElement=t(e.katex)}("undefined"!=typeof self?self:this,(function(e){return function(){"use strict";var t={757:function(t){t.exports=e}},n={};function r(e){var o=n[e];if(void 0!==o)return o.exports;var i=n[e]={exports:{}};return t[e](i,i.exports,r),i.exports}r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,{a:t}),t},r.d=function(e,t){for(var n in t)r.o(t,n)&&!r.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)};var o={};r.d(o,{default:function(){return p}});var i=r(757),a=r.n(i);const l=function(e,t,n){let r=n,o=0;const i=e.length;for(;re.left.replace(/[-/\\^$*+?.()|[\]{}]/g,"\\$&"))).join("|")+")");for(;n=e.search(o),-1!==n;){n>0&&(r.push({type:"text",data:e.slice(0,n)}),e=e.slice(n));const o=t.findIndex((t=>e.startsWith(t.left)));if(n=l(t[o].right,e,t[o].left.length),-1===n)break;const i=e.slice(0,n+t[o].right.length),a=s.test(i)?i:e.slice(t[o].left.length,n);r.push({type:"math",data:a,rawData:i,display:t[o].display}),e=e.slice(n+t[o].right.length)}return""!==e&&r.push({type:"text",data:e}),r};const c=function(e,t){const n=d(e,t.delimiters);if(1===n.length&&"text"===n[0].type)return null;const r=document.createDocumentFragment();for(let e=0;e-1===e.indexOf(" "+t+" ")))&&f(r,t)}}};var p=function(e,t){if(!e)throw new Error("No element provided to render");const n={};for(const e in t)t.hasOwnProperty(e)&&(n[e]=t[e]);n.delimiters=n.delimiters||[{left:"$$",right:"$$",display:!0},{left:"\\(",right:"\\)",display:!1},{left:"\\begin{equation}",right:"\\end{equation}",display:!0},{left:"\\begin{align}",right:"\\end{align}",display:!0},{left:"\\begin{alignat}",right:"\\end{alignat}",display:!0},{left:"\\begin{gather}",right:"\\end{gather}",display:!0},{left:"\\begin{CD}",right:"\\end{CD}",display:!0},{left:"\\[",right:"\\]",display:!0}],n.ignoredTags=n.ignoredTags||["script","noscript","style","textarea","pre","code","option"],n.ignoredClasses=n.ignoredClasses||[],n.errorCallback=n.errorCallback||console.error,n.macros=n.macros||{},f(e,n)};return o=o.default}()}));
```
## /assets/katex/contrib/auto-render.mjs
```mjs path="/assets/katex/contrib/auto-render.mjs"
import katex from '../katex.mjs';
/* eslint no-constant-condition:0 */
var findEndOfMath = function findEndOfMath(delimiter, text, startIndex) {
// Adapted from
// https://github.com/Khan/perseus/blob/master/src/perseus-markdown.jsx
var index = startIndex;
var braceLevel = 0;
var delimLength = delimiter.length;
while (index < text.length) {
var character = text[index];
if (braceLevel <= 0 && text.slice(index, index + delimLength) === delimiter) {
return index;
} else if (character === "\\") {
index++;
} else if (character === "{") {
braceLevel++;
} else if (character === "}") {
braceLevel--;
}
index++;
}
return -1;
};
var escapeRegex = function escapeRegex(string) {
return string.replace(/[-/\\^$*+?.()|[\]{}]/g, "\\$&");
};
var amsRegex = /^\\begin{/;
var splitAtDelimiters = function splitAtDelimiters(text, delimiters) {
var index;
var data = [];
var regexLeft = new RegExp("(" + delimiters.map(x => escapeRegex(x.left)).join("|") + ")");
while (true) {
index = text.search(regexLeft);
if (index === -1) {
break;
}
if (index > 0) {
data.push({
type: "text",
data: text.slice(0, index)
});
text = text.slice(index); // now text starts with delimiter
} // ... so this always succeeds:
var i = delimiters.findIndex(delim => text.startsWith(delim.left));
index = findEndOfMath(delimiters[i].right, text, delimiters[i].left.length);
if (index === -1) {
break;
}
var rawData = text.slice(0, index + delimiters[i].right.length);
var math = amsRegex.test(rawData) ? rawData : text.slice(delimiters[i].left.length, index);
data.push({
type: "math",
data: math,
rawData,
display: delimiters[i].display
});
text = text.slice(index + delimiters[i].right.length);
}
if (text !== "") {
data.push({
type: "text",
data: text
});
}
return data;
};
/* eslint no-console:0 */
/* Note: optionsCopy is mutated by this method. If it is ever exposed in the
* API, we should copy it before mutating.
*/
var renderMathInText = function renderMathInText(text, optionsCopy) {
var data = splitAtDelimiters(text, optionsCopy.delimiters);
if (data.length === 1 && data[0].type === 'text') {
// There is no formula in the text.
// Let's return null which means there is no need to replace
// the current text node with a new one.
return null;
}
var fragment = document.createDocumentFragment();
for (var i = 0; i < data.length; i++) {
if (data[i].type === "text") {
fragment.appendChild(document.createTextNode(data[i].data));
} else {
var span = document.createElement("span");
var math = data[i].data; // Override any display mode defined in the settings with that
// defined by the text itself
optionsCopy.displayMode = data[i].display;
try {
if (optionsCopy.preProcess) {
math = optionsCopy.preProcess(math);
}
katex.render(math, span, optionsCopy);
} catch (e) {
if (!(e instanceof katex.ParseError)) {
throw e;
}
optionsCopy.errorCallback("KaTeX auto-render: Failed to parse `" + data[i].data + "` with ", e);
fragment.appendChild(document.createTextNode(data[i].rawData));
continue;
}
fragment.appendChild(span);
}
}
return fragment;
};
var renderElem = function renderElem(elem, optionsCopy) {
for (var i = 0; i < elem.childNodes.length; i++) {
var childNode = elem.childNodes[i];
if (childNode.nodeType === 3) {
// Text node
// Concatenate all sibling text nodes.
// Webkit browsers split very large text nodes into smaller ones,
// so the delimiters may be split across different nodes.
var textContentConcat = childNode.textContent;
var sibling = childNode.nextSibling;
var nSiblings = 0;
while (sibling && sibling.nodeType === Node.TEXT_NODE) {
textContentConcat += sibling.textContent;
sibling = sibling.nextSibling;
nSiblings++;
}
var frag = renderMathInText(textContentConcat, optionsCopy);
if (frag) {
// Remove extra text nodes
for (var j = 0; j < nSiblings; j++) {
childNode.nextSibling.remove();
}
i += frag.childNodes.length - 1;
elem.replaceChild(frag, childNode);
} else {
// If the concatenated text does not contain math
// the siblings will not either
i += nSiblings;
}
} else if (childNode.nodeType === 1) {
(function () {
// Element node
var className = ' ' + childNode.className + ' ';
var shouldRender = optionsCopy.ignoredTags.indexOf(childNode.nodeName.toLowerCase()) === -1 && optionsCopy.ignoredClasses.every(x => className.indexOf(' ' + x + ' ') === -1);
if (shouldRender) {
renderElem(childNode, optionsCopy);
}
})();
} // Otherwise, it's something else, and ignore it.
}
};
var renderMathInElement = function renderMathInElement(elem, options) {
if (!elem) {
throw new Error("No element provided to render");
}
var optionsCopy = {}; // Object.assign(optionsCopy, option)
for (var option in options) {
if (options.hasOwnProperty(option)) {
optionsCopy[option] = options[option];
}
} // default options
optionsCopy.delimiters = optionsCopy.delimiters || [{
left: "$$",
right: "$$",
display: true
}, {
left: "\\(",
right: "\\)",
display: false
}, // LaTeX uses $…$, but it ruins the display of normal `$` in text:
// {left: "$", right: "$", display: false},
// $ must come after $$
// Render AMS environments even if outside $$…$$ delimiters.
{
left: "\\begin{equation}",
right: "\\end{equation}",
display: true
}, {
left: "\\begin{align}",
right: "\\end{align}",
display: true
}, {
left: "\\begin{alignat}",
right: "\\end{alignat}",
display: true
}, {
left: "\\begin{gather}",
right: "\\end{gather}",
display: true
}, {
left: "\\begin{CD}",
right: "\\end{CD}",
display: true
}, {
left: "\\[",
right: "\\]",
display: true
}];
optionsCopy.ignoredTags = optionsCopy.ignoredTags || ["script", "noscript", "style", "textarea", "pre", "code", "option"];
optionsCopy.ignoredClasses = optionsCopy.ignoredClasses || [];
optionsCopy.errorCallback = optionsCopy.errorCallback || console.error; // Enable sharing of global macros defined via `\gdef` between different
// math elements within a single call to `renderMathInElement`.
optionsCopy.macros = optionsCopy.macros || {};
renderElem(elem, optionsCopy);
};
export { renderMathInElement as default };
```
## /assets/katex/contrib/copy-tex.js
```js path="/assets/katex/contrib/copy-tex.js"
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})((typeof self !== 'undefined' ? self : this), function() {
return /******/ (function() { // webpackBootstrap
/******/ "use strict";
var __webpack_exports__ = {};
;// CONCATENATED MODULE: ./contrib/copy-tex/katex2tex.js
// Set these to how you want inline and display math to be delimited.
const defaultCopyDelimiters = {
inline: ['$', '$'],
// alternative: ['\(', '\)']
display: ['$$', '$$'] // alternative: ['\[', '\]']
}; // Replace .katex elements with their TeX source ( element).
// Modifies fragment in-place. Useful for writing your own 'copy' handler,
// as in copy-tex.js.
function katexReplaceWithTex(fragment, copyDelimiters) {
if (copyDelimiters === void 0) {
copyDelimiters = defaultCopyDelimiters;
}
// Remove .katex-html blocks that are preceded by .katex-mathml blocks
// (which will get replaced below).
const katexHtml = fragment.querySelectorAll('.katex-mathml + .katex-html');
for (let i = 0; i < katexHtml.length; i++) {
const element = katexHtml[i];
if (element.remove) {
element.remove();
} else if (element.parentNode) {
element.parentNode.removeChild(element);
}
} // Replace .katex-mathml elements with their annotation (TeX source)
// descendant, with inline delimiters.
const katexMathml = fragment.querySelectorAll('.katex-mathml');
for (let i = 0; i < katexMathml.length; i++) {
const element = katexMathml[i];
const texSource = element.querySelector('annotation');
if (texSource) {
if (element.replaceWith) {
element.replaceWith(texSource);
} else if (element.parentNode) {
element.parentNode.replaceChild(texSource, element);
}
texSource.innerHTML = copyDelimiters.inline[0] + texSource.innerHTML + copyDelimiters.inline[1];
}
} // Switch display math to display delimiters.
const displays = fragment.querySelectorAll('.katex-display annotation');
for (let i = 0; i < displays.length; i++) {
const element = displays[i];
element.innerHTML = copyDelimiters.display[0] + element.innerHTML.substr(copyDelimiters.inline[0].length, element.innerHTML.length - copyDelimiters.inline[0].length - copyDelimiters.inline[1].length) + copyDelimiters.display[1];
}
return fragment;
}
/* harmony default export */ var katex2tex = (katexReplaceWithTex);
;// CONCATENATED MODULE: ./contrib/copy-tex/copy-tex.js
// Return
element containing node, or null if not found.
function closestKatex(node) {
// If node is a Text Node, for example, go up to containing Element,
// where we can apply the `closest` method.
const element = node instanceof Element ? node : node.parentElement;
return element && element.closest('.katex');
} // Global copy handler to modify behavior on/within .katex elements.
document.addEventListener('copy', function (event) {
const selection = window.getSelection();
if (selection.isCollapsed || !event.clipboardData) {
return; // default action OK if selection is empty or unchangeable
}
const clipboardData = event.clipboardData;
const range = selection.getRangeAt(0); // When start point is within a formula, expand to entire formula.
const startKatex = closestKatex(range.startContainer);
if (startKatex) {
range.setStartBefore(startKatex);
} // Similarly, when end point is within a formula, expand to entire formula.
const endKatex = closestKatex(range.endContainer);
if (endKatex) {
range.setEndAfter(endKatex);
}
const fragment = range.cloneContents();
if (!fragment.querySelector('.katex-mathml')) {
return; // default action OK if no .katex-mathml elements
}
const htmlContents = Array.prototype.map.call(fragment.childNodes, el => el instanceof Text ? el.textContent : el.outerHTML).join(''); // Preserve usual HTML copy/paste behavior.
clipboardData.setData('text/html', htmlContents); // Rewrite plain-text version.
clipboardData.setData('text/plain', katex2tex(fragment).textContent); // Prevent normal copy handling.
event.preventDefault();
});
__webpack_exports__ = __webpack_exports__["default"];
/******/ return __webpack_exports__;
/******/ })()
;
});
```
## /assets/katex/contrib/copy-tex.min.js
```js path="/assets/katex/contrib/copy-tex.min.js"
!function(e,t){if("object"==typeof exports&&"object"==typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{var n=t();for(var o in n)("object"==typeof exports?exports:e)[o]=n[o]}}("undefined"!=typeof self?self:this,(function(){return function(){"use strict";var e={};const t={inline:["$","$"],display:["$$","$$"]};var n=function(e,n){void 0===n&&(n=t);const o=e.querySelectorAll(".katex-mathml + .katex-html");for(let e=0;ee instanceof Text?e.textContent:e.outerHTML)).join("");r.setData("text/html",c),r.setData("text/plain",n(s).textContent),e.preventDefault()})),e=e.default}()}));
```
## /assets/katex/contrib/copy-tex.mjs
```mjs path="/assets/katex/contrib/copy-tex.mjs"
// Set these to how you want inline and display math to be delimited.
var defaultCopyDelimiters = {
inline: ['$', '$'],
// alternative: ['\(', '\)']
display: ['$$', '$$'] // alternative: ['\[', '\]']
}; // Replace .katex elements with their TeX source ( element).
// Modifies fragment in-place. Useful for writing your own 'copy' handler,
// as in copy-tex.js.
function katexReplaceWithTex(fragment, copyDelimiters) {
if (copyDelimiters === void 0) {
copyDelimiters = defaultCopyDelimiters;
}
// Remove .katex-html blocks that are preceded by .katex-mathml blocks
// (which will get replaced below).
var katexHtml = fragment.querySelectorAll('.katex-mathml + .katex-html');
for (var i = 0; i < katexHtml.length; i++) {
var element = katexHtml[i];
if (element.remove) {
element.remove();
} else if (element.parentNode) {
element.parentNode.removeChild(element);
}
} // Replace .katex-mathml elements with their annotation (TeX source)
// descendant, with inline delimiters.
var katexMathml = fragment.querySelectorAll('.katex-mathml');
for (var _i = 0; _i < katexMathml.length; _i++) {
var _element = katexMathml[_i];
var texSource = _element.querySelector('annotation');
if (texSource) {
if (_element.replaceWith) {
_element.replaceWith(texSource);
} else if (_element.parentNode) {
_element.parentNode.replaceChild(texSource, _element);
}
texSource.innerHTML = copyDelimiters.inline[0] + texSource.innerHTML + copyDelimiters.inline[1];
}
} // Switch display math to display delimiters.
var displays = fragment.querySelectorAll('.katex-display annotation');
for (var _i2 = 0; _i2 < displays.length; _i2++) {
var _element2 = displays[_i2];
_element2.innerHTML = copyDelimiters.display[0] + _element2.innerHTML.substr(copyDelimiters.inline[0].length, _element2.innerHTML.length - copyDelimiters.inline[0].length - copyDelimiters.inline[1].length) + copyDelimiters.display[1];
}
return fragment;
}
function closestKatex(node) {
// If node is a Text Node, for example, go up to containing Element,
// where we can apply the `closest` method.
var element = node instanceof Element ? node : node.parentElement;
return element && element.closest('.katex');
} // Global copy handler to modify behavior on/within .katex elements.
document.addEventListener('copy', function (event) {
var selection = window.getSelection();
if (selection.isCollapsed || !event.clipboardData) {
return; // default action OK if selection is empty or unchangeable
}
var clipboardData = event.clipboardData;
var range = selection.getRangeAt(0); // When start point is within a formula, expand to entire formula.
var startKatex = closestKatex(range.startContainer);
if (startKatex) {
range.setStartBefore(startKatex);
} // Similarly, when end point is within a formula, expand to entire formula.
var endKatex = closestKatex(range.endContainer);
if (endKatex) {
range.setEndAfter(endKatex);
}
var fragment = range.cloneContents();
if (!fragment.querySelector('.katex-mathml')) {
return; // default action OK if no .katex-mathml elements
}
var htmlContents = Array.prototype.map.call(fragment.childNodes, el => el instanceof Text ? el.textContent : el.outerHTML).join(''); // Preserve usual HTML copy/paste behavior.
clipboardData.setData('text/html', htmlContents); // Rewrite plain-text version.
clipboardData.setData('text/plain', katexReplaceWithTex(fragment).textContent); // Prevent normal copy handling.
event.preventDefault();
});
```
## /assets/katex/contrib/mathtex-script-type.js
```js path="/assets/katex/contrib/mathtex-script-type.js"
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory(require("katex"));
else if(typeof define === 'function' && define.amd)
define(["katex"], factory);
else {
var a = typeof exports === 'object' ? factory(require("katex")) : factory(root["katex"]);
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})((typeof self !== 'undefined' ? self : this), function(__WEBPACK_EXTERNAL_MODULE__757__) {
return /******/ (function() { // webpackBootstrap
/******/ "use strict";
/******/ var __webpack_modules__ = ({
/***/ 757:
/***/ (function(module) {
module.exports = __WEBPACK_EXTERNAL_MODULE__757__;
/***/ })
/******/ });
/************************************************************************/
/******/ // The module cache
/******/ var __webpack_module_cache__ = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/ // Check if module is in cache
/******/ var cachedModule = __webpack_module_cache__[moduleId];
/******/ if (cachedModule !== undefined) {
/******/ return cachedModule.exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = __webpack_module_cache__[moduleId] = {
/******/ // no module.id needed
/******/ // no module.loaded needed
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/************************************************************************/
/******/ /* webpack/runtime/compat get default export */
/******/ !function() {
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function() { return module['default']; } :
/******/ function() { return module; };
/******/ __webpack_require__.d(getter, { a: getter });
/******/ return getter;
/******/ };
/******/ }();
/******/
/******/ /* webpack/runtime/define property getters */
/******/ !function() {
/******/ // define getter functions for harmony exports
/******/ __webpack_require__.d = function(exports, definition) {
/******/ for(var key in definition) {
/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
/******/ }
/******/ }
/******/ };
/******/ }();
/******/
/******/ /* webpack/runtime/hasOwnProperty shorthand */
/******/ !function() {
/******/ __webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }
/******/ }();
/******/
/************************************************************************/
var __webpack_exports__ = {};
/* harmony import */ var katex__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(757);
/* harmony import */ var katex__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(katex__WEBPACK_IMPORTED_MODULE_0__);
let scripts = document.body.getElementsByTagName("script");
scripts = Array.prototype.slice.call(scripts);
scripts.forEach(function (script) {
if (!script.type || !script.type.match(/math\/tex/i)) {
return -1;
}
const display = script.type.match(/mode\s*=\s*display(;|\s|\n|$)/) != null;
const katexElement = document.createElement(display ? "div" : "span");
katexElement.setAttribute("class", display ? "equation" : "inline-equation");
try {
katex__WEBPACK_IMPORTED_MODULE_0___default().render(script.text, katexElement, {
displayMode: display
});
} catch (err) {
//console.error(err); linter doesn't like this
katexElement.textContent = script.text;
}
script.parentNode.replaceChild(katexElement, script);
});
__webpack_exports__ = __webpack_exports__["default"];
/******/ return __webpack_exports__;
/******/ })()
;
});
```
## /assets/katex/contrib/mathtex-script-type.min.js
```js path="/assets/katex/contrib/mathtex-script-type.min.js"
!function(e,t){if("object"==typeof exports&&"object"==typeof module)module.exports=t(require("katex"));else if("function"==typeof define&&define.amd)define(["katex"],t);else{var n="object"==typeof exports?t(require("katex")):t(e.katex);for(var r in n)("object"==typeof exports?exports:e)[r]=n[r]}}("undefined"!=typeof self?self:this,(function(e){return function(){"use strict";var t={757:function(t){t.exports=e}},n={};function r(e){var o=n[e];if(void 0!==o)return o.exports;var i=n[e]={exports:{}};return t[e](i,i.exports,r),i.exports}r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,{a:t}),t},r.d=function(e,t){for(var n in t)r.o(t,n)&&!r.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)};var o={},i=r(757),a=r.n(i);let u=document.body.getElementsByTagName("script");return u=Array.prototype.slice.call(u),u.forEach((function(e){if(!e.type||!e.type.match(/math\/tex/i))return-1;const t=null!=e.type.match(/mode\s*=\s*display(;|\s|\n|$)/),n=document.createElement(t?"div":"span");n.setAttribute("class",t?"equation":"inline-equation");try{a().render(e.text,n,{displayMode:t})}catch(t){n.textContent=e.text}e.parentNode.replaceChild(n,e)})),o=o.default}()}));
```
## /assets/katex/contrib/mathtex-script-type.mjs
```mjs path="/assets/katex/contrib/mathtex-script-type.mjs"
import katex from '../katex.mjs';
var scripts = document.body.getElementsByTagName("script");
scripts = Array.prototype.slice.call(scripts);
scripts.forEach(function (script) {
if (!script.type || !script.type.match(/math\/tex/i)) {
return -1;
}
var display = script.type.match(/mode\s*=\s*display(;|\s|\n|$)/) != null;
var katexElement = document.createElement(display ? "div" : "span");
katexElement.setAttribute("class", display ? "equation" : "inline-equation");
try {
katex.render(script.text, katexElement, {
displayMode: display
});
} catch (err) {
//console.error(err); linter doesn't like this
katexElement.textContent = script.text;
}
script.parentNode.replaceChild(katexElement, script);
});
```
## /assets/katex/contrib/mhchem.js
```js path="/assets/katex/contrib/mhchem.js"
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory(require("katex"));
else if(typeof define === 'function' && define.amd)
define(["katex"], factory);
else {
var a = typeof exports === 'object' ? factory(require("katex")) : factory(root["katex"]);
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})((typeof self !== 'undefined' ? self : this), function(__WEBPACK_EXTERNAL_MODULE__757__) {
return /******/ (function() { // webpackBootstrap
/******/ "use strict";
/******/ var __webpack_modules__ = ({
/***/ 757:
/***/ (function(module) {
module.exports = __WEBPACK_EXTERNAL_MODULE__757__;
/***/ })
/******/ });
/************************************************************************/
/******/ // The module cache
/******/ var __webpack_module_cache__ = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/ // Check if module is in cache
/******/ var cachedModule = __webpack_module_cache__[moduleId];
/******/ if (cachedModule !== undefined) {
/******/ return cachedModule.exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = __webpack_module_cache__[moduleId] = {
/******/ // no module.id needed
/******/ // no module.loaded needed
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/************************************************************************/
/******/ /* webpack/runtime/compat get default export */
/******/ !function() {
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function() { return module['default']; } :
/******/ function() { return module; };
/******/ __webpack_require__.d(getter, { a: getter });
/******/ return getter;
/******/ };
/******/ }();
/******/
/******/ /* webpack/runtime/define property getters */
/******/ !function() {
/******/ // define getter functions for harmony exports
/******/ __webpack_require__.d = function(exports, definition) {
/******/ for(var key in definition) {
/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
/******/ }
/******/ }
/******/ };
/******/ }();
/******/
/******/ /* webpack/runtime/hasOwnProperty shorthand */
/******/ !function() {
/******/ __webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }
/******/ }();
/******/
/************************************************************************/
var __webpack_exports__ = {};
/* harmony import */ var katex__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(757);
/* harmony import */ var katex__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(katex__WEBPACK_IMPORTED_MODULE_0__);
/* eslint-disable */
/* -*- Mode: JavaScript; indent-tabs-mode:nil; js-indent-level: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/*************************************************************
*
* KaTeX mhchem.js
*
* This file implements a KaTeX version of mhchem version 3.3.0.
* It is adapted from MathJax/extensions/TeX/mhchem.js
* It differs from the MathJax version as follows:
* 1. The interface is changed so that it can be called from KaTeX, not MathJax.
* 2. \rlap and \llap are replaced with \mathrlap and \mathllap.
* 3. Four lines of code are edited in order to use \raisebox instead of \raise.
* 4. The reaction arrow code is simplified. All reaction arrows are rendered
* using KaTeX extensible arrows instead of building non-extensible arrows.
* 5. \tripledash vertical alignment is slightly adjusted.
*
* This code, as other KaTeX code, is released under the MIT license.
*
* /*************************************************************
*
* MathJax/extensions/TeX/mhchem.js
*
* Implements the \ce command for handling chemical formulas
* from the mhchem LaTeX package.
*
* ---------------------------------------------------------------------
*
* Copyright (c) 2011-2015 The MathJax Consortium
* Copyright (c) 2015-2018 Martin Hensel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Coding Style
// - use '' for identifiers that can by minified/uglified
// - use "" for strings that need to stay untouched
// version: "3.3.0" for MathJax and KaTeX
// Add \ce, \pu, and \tripledash to the KaTeX macros.
katex__WEBPACK_IMPORTED_MODULE_0___default().__defineMacro("\\ce", function (context) {
return chemParse(context.consumeArgs(1)[0], "ce");
});
katex__WEBPACK_IMPORTED_MODULE_0___default().__defineMacro("\\pu", function (context) {
return chemParse(context.consumeArgs(1)[0], "pu");
}); // Needed for \bond for the ~ forms
// Raise by 2.56mu, not 2mu. We're raising a hyphen-minus, U+002D, not
// a mathematical minus, U+2212. So we need that extra 0.56.
katex__WEBPACK_IMPORTED_MODULE_0___default().__defineMacro("\\tripledash", "{\\vphantom{-}\\raisebox{2.56mu}{$\\mkern2mu" + "\\tiny\\text{-}\\mkern1mu\\text{-}\\mkern1mu\\text{-}\\mkern2mu$}}");
//
// This is the main function for handing the \ce and \pu commands.
// It takes the argument to \ce or \pu and returns the corresponding TeX string.
//
var chemParse = function (tokens, stateMachine) {
// Recreate the argument string from KaTeX's array of tokens.
var str = "";
var expectedLoc = tokens.length && tokens[tokens.length - 1].loc.start;
for (var i = tokens.length - 1; i >= 0; i--) {
if (tokens[i].loc.start > expectedLoc) {
// context.consumeArgs has eaten a space.
str += " ";
expectedLoc = tokens[i].loc.start;
}
str += tokens[i].text;
expectedLoc += tokens[i].text.length;
}
var tex = texify.go(mhchemParser.go(str, stateMachine));
return tex;
}; //
// Core parser for mhchem syntax (recursive)
//
/** @type {MhchemParser} */
var mhchemParser = {
//
// Parses mchem \ce syntax
//
// Call like
// go("H2O");
//
go: function (input, stateMachine) {
if (!input) {
return [];
}
if (stateMachine === undefined) {
stateMachine = 'ce';
}
var state = '0'; //
// String buffers for parsing:
//
// buffer.a == amount
// buffer.o == element
// buffer.b == left-side superscript
// buffer.p == left-side subscript
// buffer.q == right-side subscript
// buffer.d == right-side superscript
//
// buffer.r == arrow
// buffer.rdt == arrow, script above, type
// buffer.rd == arrow, script above, content
// buffer.rqt == arrow, script below, type
// buffer.rq == arrow, script below, content
//
// buffer.text_
// buffer.rm
// etc.
//
// buffer.parenthesisLevel == int, starting at 0
// buffer.sb == bool, space before
// buffer.beginsWithBond == bool
//
// These letters are also used as state names.
//
// Other states:
// 0 == begin of main part (arrow/operator unlikely)
// 1 == next entity
// 2 == next entity (arrow/operator unlikely)
// 3 == next atom
// c == macro
//
/** @type {Buffer} */
var buffer = {};
buffer['parenthesisLevel'] = 0;
input = input.replace(/\n/g, " ");
input = input.replace(/[\u2212\u2013\u2014\u2010]/g, "-");
input = input.replace(/[\u2026]/g, "..."); //
// Looks through mhchemParser.transitions, to execute a matching action
// (recursive)
//
var lastInput;
var watchdog = 10;
/** @type {ParserOutput[]} */
var output = [];
while (true) {
if (lastInput !== input) {
watchdog = 10;
lastInput = input;
} else {
watchdog--;
} //
// Find actions in transition table
//
var machine = mhchemParser.stateMachines[stateMachine];
var t = machine.transitions[state] || machine.transitions['*'];
iterateTransitions: for (var i = 0; i < t.length; i++) {
var matches = mhchemParser.patterns.match_(t[i].pattern, input);
if (matches) {
//
// Execute actions
//
var task = t[i].task;
for (var iA = 0; iA < task.action_.length; iA++) {
var o; //
// Find and execute action
//
if (machine.actions[task.action_[iA].type_]) {
o = machine.actions[task.action_[iA].type_](buffer, matches.match_, task.action_[iA].option);
} else if (mhchemParser.actions[task.action_[iA].type_]) {
o = mhchemParser.actions[task.action_[iA].type_](buffer, matches.match_, task.action_[iA].option);
} else {
throw ["MhchemBugA", "mhchem bug A. Please report. (" + task.action_[iA].type_ + ")"]; // Trying to use non-existing action
} //
// Add output
//
mhchemParser.concatArray(output, o);
} //
// Set next state,
// Shorten input,
// Continue with next character
// (= apply only one transition per position)
//
state = task.nextState || state;
if (input.length > 0) {
if (!task.revisit) {
input = matches.remainder;
}
if (!task.toContinue) {
break iterateTransitions;
}
} else {
return output;
}
}
} //
// Prevent infinite loop
//
if (watchdog <= 0) {
throw ["MhchemBugU", "mhchem bug U. Please report."]; // Unexpected character
}
}
},
concatArray: function (a, b) {
if (b) {
if (Array.isArray(b)) {
for (var iB = 0; iB < b.length; iB++) {
a.push(b[iB]);
}
} else {
a.push(b);
}
}
},
patterns: {
//
// Matching patterns
// either regexps or function that return null or {match_:"a", remainder:"bc"}
//
patterns: {
// property names must not look like integers ("2") for correct property traversal order, later on
'empty': /^$/,
'else': /^./,
'else2': /^./,
'space': /^\s/,
'space A': /^\s(?=[A-Z\\$])/,
'space$': /^\s$/,
'a-z': /^[a-z]/,
'x': /^x/,
'x$': /^x$/,
'i$': /^i$/,
'letters': /^(?:[a-zA-Z\u03B1-\u03C9\u0391-\u03A9?@]|(?:\\(?:alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega|Gamma|Delta|Theta|Lambda|Xi|Pi|Sigma|Upsilon|Phi|Psi|Omega)(?:\s+|\{\}|(?![a-zA-Z]))))+/,
'\\greek': /^\\(?:alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega|Gamma|Delta|Theta|Lambda|Xi|Pi|Sigma|Upsilon|Phi|Psi|Omega)(?:\s+|\{\}|(?![a-zA-Z]))/,
'one lowercase latin letter $': /^(?:([a-z])(?:$|[^a-zA-Z]))$/,
'$one lowercase latin letter$ $': /^\$(?:([a-z])(?:$|[^a-zA-Z]))\$$/,
'one lowercase greek letter $': /^(?:\$?[\u03B1-\u03C9]\$?|\$?\\(?:alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega)\s*\$?)(?:\s+|\{\}|(?![a-zA-Z]))$/,
'digits': /^[0-9]+/,
'-9.,9': /^[+\-]?(?:[0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+))/,
'-9.,9 no missing 0': /^[+\-]?[0-9]+(?:[.,][0-9]+)?/,
'(-)(9.,9)(e)(99)': function (input) {
var m = input.match(/^(\+\-|\+\/\-|\+|\-|\\pm\s?)?([0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+))?(\((?:[0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+))\))?(?:([eE]|\s*(\*|x|\\times|\u00D7)\s*10\^)([+\-]?[0-9]+|\{[+\-]?[0-9]+\}))?/);
if (m && m[0]) {
return {
match_: m.splice(1),
remainder: input.substr(m[0].length)
};
}
return null;
},
'(-)(9)^(-9)': function (input) {
var m = input.match(/^(\+\-|\+\/\-|\+|\-|\\pm\s?)?([0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+)?)\^([+\-]?[0-9]+|\{[+\-]?[0-9]+\})/);
if (m && m[0]) {
return {
match_: m.splice(1),
remainder: input.substr(m[0].length)
};
}
return null;
},
'state of aggregation $': function (input) {
// ... or crystal system
var a = mhchemParser.patterns.findObserveGroups(input, "", /^\([a-z]{1,3}(?=[\),])/, ")", ""); // (aq), (aq,$\infty$), (aq, sat)
if (a && a.remainder.match(/^($|[\s,;\)\]\}])/)) {
return a;
} // AND end of 'phrase'
var m = input.match(/^(?:\((?:\\ca\s?)?\$[amothc]\$\))/); // OR crystal system ($o$) (\ca$c$)
if (m) {
return {
match_: m[0],
remainder: input.substr(m[0].length)
};
}
return null;
},
'_{(state of aggregation)}$': /^_\{(\([a-z]{1,3}\))\}/,
'{[(': /^(?:\\\{|\[|\()/,
')]}': /^(?:\)|\]|\\\})/,
', ': /^[,;]\s*/,
',': /^[,;]/,
'.': /^[.]/,
'. ': /^([.\u22C5\u00B7\u2022])\s*/,
'...': /^\.\.\.(?=$|[^.])/,
'* ': /^([*])\s*/,
'^{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "^{", "", "", "}");
},
'^($...$)': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "^", "$", "$", "");
},
'^a': /^\^([0-9]+|[^\\_])/,
'^\\x{}{}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "^", /^\\[a-zA-Z]+\{/, "}", "", "", "{", "}", "", true);
},
'^\\x{}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "^", /^\\[a-zA-Z]+\{/, "}", "");
},
'^\\x': /^\^(\\[a-zA-Z]+)\s*/,
'^(-1)': /^\^(-?\d+)/,
'\'': /^'/,
'_{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "_{", "", "", "}");
},
'_($...$)': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "_", "$", "$", "");
},
'_9': /^_([+\-]?[0-9]+|[^\\])/,
'_\\x{}{}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "_", /^\\[a-zA-Z]+\{/, "}", "", "", "{", "}", "", true);
},
'_\\x{}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "_", /^\\[a-zA-Z]+\{/, "}", "");
},
'_\\x': /^_(\\[a-zA-Z]+)\s*/,
'^_': /^(?:\^(?=_)|\_(?=\^)|[\^_]$)/,
'{}': /^\{\}/,
'{...}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "", "{", "}", "");
},
'{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "{", "", "", "}");
},
'$...$': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "", "$", "$", "");
},
'${(...)}$': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "${", "", "", "}$");
},
'$(...)$': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "$", "", "", "$");
},
'=<>': /^[=<>]/,
'#': /^[#\u2261]/,
'+': /^\+/,
'-$': /^-(?=[\s_},;\]/]|$|\([a-z]+\))/,
// -space -, -; -] -/ -$ -state-of-aggregation
'-9': /^-(?=[0-9])/,
'- orbital overlap': /^-(?=(?:[spd]|sp)(?:$|[\s,;\)\]\}]))/,
'-': /^-/,
'pm-operator': /^(?:\\pm|\$\\pm\$|\+-|\+\/-)/,
'operator': /^(?:\+|(?:[\-=<>]|<<|>>|\\approx|\$\\approx\$)(?=\s|$|-?[0-9]))/,
'arrowUpDown': /^(?:v|\(v\)|\^|\(\^\))(?=$|[\s,;\)\]\}])/,
'\\bond{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\bond{", "", "", "}");
},
'->': /^(?:<->|<-->|->|<-|<=>>|<<=>|<=>|[\u2192\u27F6\u21CC])/,
'CMT': /^[CMT](?=\[)/,
'[(...)]': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "[", "", "", "]");
},
'1st-level escape': /^(&|\\\\|\\hline)\s*/,
'\\,': /^(?:\\[,\ ;:])/,
// \\x - but output no space before
'\\x{}{}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "", /^\\[a-zA-Z]+\{/, "}", "", "", "{", "}", "", true);
},
'\\x{}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "", /^\\[a-zA-Z]+\{/, "}", "");
},
'\\ca': /^\\ca(?:\s+|(?![a-zA-Z]))/,
'\\x': /^(?:\\[a-zA-Z]+\s*|\\[_&{}%])/,
'orbital': /^(?:[0-9]{1,2}[spdfgh]|[0-9]{0,2}sp)(?=$|[^a-zA-Z])/,
// only those with numbers in front, because the others will be formatted correctly anyway
'others': /^[\/~|]/,
'\\frac{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\frac{", "", "", "}", "{", "", "", "}");
},
'\\overset{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\overset{", "", "", "}", "{", "", "", "}");
},
'\\underset{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\underset{", "", "", "}", "{", "", "", "}");
},
'\\underbrace{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\underbrace{", "", "", "}_", "{", "", "", "}");
},
'\\color{(...)}0': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\color{", "", "", "}");
},
'\\color{(...)}{(...)}1': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\color{", "", "", "}", "{", "", "", "}");
},
'\\color(...){(...)}2': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\color", "\\", "", /^(?=\{)/, "{", "", "", "}");
},
'\\ce{(...)}': function (input) {
return mhchemParser.patterns.findObserveGroups(input, "\\ce{", "", "", "}");
},
'oxidation$': /^(?:[+-][IVX]+|\\pm\s*0|\$\\pm\$\s*0)$/,
'd-oxidation$': /^(?:[+-]?\s?[IVX]+|\\pm\s*0|\$\\pm\$\s*0)$/,
// 0 could be oxidation or charge
'roman numeral': /^[IVX]+/,
'1/2$': /^[+\-]?(?:[0-9]+|\$[a-z]\$|[a-z])\/[0-9]+(?:\$[a-z]\$|[a-z])?$/,
'amount': function (input) {
var match; // e.g. 2, 0.5, 1/2, -2, n/2, +; $a$ could be added later in parsing
match = input.match(/^(?:(?:(?:\([+\-]?[0-9]+\/[0-9]+\)|[+\-]?(?:[0-9]+|\$[a-z]\$|[a-z])\/[0-9]+|[+\-]?[0-9]+[.,][0-9]+|[+\-]?\.[0-9]+|[+\-]?[0-9]+)(?:[a-z](?=\s*[A-Z]))?)|[+\-]?[a-z](?=\s*[A-Z])|\+(?!\s))/);
if (match) {
return {
match_: match[0],
remainder: input.substr(match[0].length)
};
}
var a = mhchemParser.patterns.findObserveGroups(input, "", "$", "$", "");
if (a) {
// e.g. $2n-1$, $-$
match = a.match_.match(/^\$(?:\(?[+\-]?(?:[0-9]*[a-z]?[+\-])?[0-9]*[a-z](?:[+\-][0-9]*[a-z]?)?\)?|\+|-)\$$/);
if (match) {
return {
match_: match[0],
remainder: input.substr(match[0].length)
};
}
}
return null;
},
'amount2': function (input) {
return this['amount'](input);
},
'(KV letters),': /^(?:[A-Z][a-z]{0,2}|i)(?=,)/,
'formula$': function (input) {
if (input.match(/^\([a-z]+\)$/)) {
return null;
} // state of aggregation = no formula
var match = input.match(/^(?:[a-z]|(?:[0-9\ \+\-\,\.\(\)]+[a-z])+[0-9\ \+\-\,\.\(\)]*|(?:[a-z][0-9\ \+\-\,\.\(\)]+)+[a-z]?)$/);
if (match) {
return {
match_: match[0],
remainder: input.substr(match[0].length)
};
}
return null;
},
'uprightEntities': /^(?:pH|pOH|pC|pK|iPr|iBu)(?=$|[^a-zA-Z])/,
'/': /^\s*(\/)\s*/,
'//': /^\s*(\/\/)\s*/,
'*': /^\s*[*.]\s*/
},
findObserveGroups: function (input, begExcl, begIncl, endIncl, endExcl, beg2Excl, beg2Incl, end2Incl, end2Excl, combine) {
/** @type {{(input: string, pattern: string | RegExp): string | string[] | null;}} */
var _match = function (input, pattern) {
if (typeof pattern === "string") {
if (input.indexOf(pattern) !== 0) {
return null;
}
return pattern;
} else {
var match = input.match(pattern);
if (!match) {
return null;
}
return match[0];
}
};
/** @type {{(input: string, i: number, endChars: string | RegExp): {endMatchBegin: number, endMatchEnd: number} | null;}} */
var _findObserveGroups = function (input, i, endChars) {
var braces = 0;
while (i < input.length) {
var a = input.charAt(i);
var match = _match(input.substr(i), endChars);
if (match !== null && braces === 0) {
return {
endMatchBegin: i,
endMatchEnd: i + match.length
};
} else if (a === "{") {
braces++;
} else if (a === "}") {
if (braces === 0) {
throw ["ExtraCloseMissingOpen", "Extra close brace or missing open brace"];
} else {
braces--;
}
}
i++;
}
if (braces > 0) {
return null;
}
return null;
};
var match = _match(input, begExcl);
if (match === null) {
return null;
}
input = input.substr(match.length);
match = _match(input, begIncl);
if (match === null) {
return null;
}
var e = _findObserveGroups(input, match.length, endIncl || endExcl);
if (e === null) {
return null;
}
var match1 = input.substring(0, endIncl ? e.endMatchEnd : e.endMatchBegin);
if (!(beg2Excl || beg2Incl)) {
return {
match_: match1,
remainder: input.substr(e.endMatchEnd)
};
} else {
var group2 = this.findObserveGroups(input.substr(e.endMatchEnd), beg2Excl, beg2Incl, end2Incl, end2Excl);
if (group2 === null) {
return null;
}
/** @type {string[]} */
var matchRet = [match1, group2.match_];
return {
match_: combine ? matchRet.join("") : matchRet,
remainder: group2.remainder
};
}
},
//
// Matching function
// e.g. match("a", input) will look for the regexp called "a" and see if it matches
// returns null or {match_:"a", remainder:"bc"}
//
match_: function (m, input) {
var pattern = mhchemParser.patterns.patterns[m];
if (pattern === undefined) {
throw ["MhchemBugP", "mhchem bug P. Please report. (" + m + ")"]; // Trying to use non-existing pattern
} else if (typeof pattern === "function") {
return mhchemParser.patterns.patterns[m](input); // cannot use cached var pattern here, because some pattern functions need this===mhchemParser
} else {
// RegExp
var match = input.match(pattern);
if (match) {
var mm;
if (match[2]) {
mm = [match[1], match[2]];
} else if (match[1]) {
mm = match[1];
} else {
mm = match[0];
}
return {
match_: mm,
remainder: input.substr(match[0].length)
};
}
return null;
}
}
},
//
// Generic state machine actions
//
actions: {
'a=': function (buffer, m) {
buffer.a = (buffer.a || "") + m;
},
'b=': function (buffer, m) {
buffer.b = (buffer.b || "") + m;
},
'p=': function (buffer, m) {
buffer.p = (buffer.p || "") + m;
},
'o=': function (buffer, m) {
buffer.o = (buffer.o || "") + m;
},
'q=': function (buffer, m) {
buffer.q = (buffer.q || "") + m;
},
'd=': function (buffer, m) {
buffer.d = (buffer.d || "") + m;
},
'rm=': function (buffer, m) {
buffer.rm = (buffer.rm || "") + m;
},
'text=': function (buffer, m) {
buffer.text_ = (buffer.text_ || "") + m;
},
'insert': function (buffer, m, a) {
return {
type_: a
};
},
'insert+p1': function (buffer, m, a) {
return {
type_: a,
p1: m
};
},
'insert+p1+p2': function (buffer, m, a) {
return {
type_: a,
p1: m[0],
p2: m[1]
};
},
'copy': function (buffer, m) {
return m;
},
'rm': function (buffer, m) {
return {
type_: 'rm',
p1: m || ""
};
},
'text': function (buffer, m) {
return mhchemParser.go(m, 'text');
},
'{text}': function (buffer, m) {
var ret = ["{"];
mhchemParser.concatArray(ret, mhchemParser.go(m, 'text'));
ret.push("}");
return ret;
},
'tex-math': function (buffer, m) {
return mhchemParser.go(m, 'tex-math');
},
'tex-math tight': function (buffer, m) {
return mhchemParser.go(m, 'tex-math tight');
},
'bond': function (buffer, m, k) {
return {
type_: 'bond',
kind_: k || m
};
},
'color0-output': function (buffer, m) {
return {
type_: 'color0',
color: m[0]
};
},
'ce': function (buffer, m) {
return mhchemParser.go(m);
},
'1/2': function (buffer, m) {
/** @type {ParserOutput[]} */
var ret = [];
if (m.match(/^[+\-]/)) {
ret.push(m.substr(0, 1));
m = m.substr(1);
}
var n = m.match(/^([0-9]+|\$[a-z]\$|[a-z])\/([0-9]+)(\$[a-z]\$|[a-z])?$/);
n[1] = n[1].replace(/\$/g, "");
ret.push({
type_: 'frac',
p1: n[1],
p2: n[2]
});
if (n[3]) {
n[3] = n[3].replace(/\$/g, "");
ret.push({
type_: 'tex-math',
p1: n[3]
});
}
return ret;
},
'9,9': function (buffer, m) {
return mhchemParser.go(m, '9,9');
}
},
//
// createTransitions
// convert { 'letter': { 'state': { action_: 'output' } } } to { 'state' => [ { pattern: 'letter', task: { action_: [{type_: 'output'}] } } ] }
// with expansion of 'a|b' to 'a' and 'b' (at 2 places)
//
createTransitions: function (o) {
var pattern, state;
/** @type {string[]} */
var stateArray;
var i; //
// 1. Collect all states
//
/** @type {Transitions} */
var transitions = {};
for (pattern in o) {
for (state in o[pattern]) {
stateArray = state.split("|");
o[pattern][state].stateArray = stateArray;
for (i = 0; i < stateArray.length; i++) {
transitions[stateArray[i]] = [];
}
}
} //
// 2. Fill states
//
for (pattern in o) {
for (state in o[pattern]) {
stateArray = o[pattern][state].stateArray || [];
for (i = 0; i < stateArray.length; i++) {
//
// 2a. Normalize actions into array: 'text=' ==> [{type_:'text='}]
// (Note to myself: Resolving the function here would be problematic. It would need .bind (for *this*) and currying (for *option*).)
//
/** @type {any} */
var p = o[pattern][state];
if (p.action_) {
p.action_ = [].concat(p.action_);
for (var k = 0; k < p.action_.length; k++) {
if (typeof p.action_[k] === "string") {
p.action_[k] = {
type_: p.action_[k]
};
}
}
} else {
p.action_ = [];
} //
// 2.b Multi-insert
//
var patternArray = pattern.split("|");
for (var j = 0; j < patternArray.length; j++) {
if (stateArray[i] === '*') {
// insert into all
for (var t in transitions) {
transitions[t].push({
pattern: patternArray[j],
task: p
});
}
} else {
transitions[stateArray[i]].push({
pattern: patternArray[j],
task: p
});
}
}
}
}
}
return transitions;
},
stateMachines: {}
}; //
// Definition of state machines
//
mhchemParser.stateMachines = {
//
// \ce state machines
//
//#region ce
'ce': {
// main parser
transitions: mhchemParser.createTransitions({
'empty': {
'*': {
action_: 'output'
}
},
'else': {
'0|1|2': {
action_: 'beginsWithBond=false',
revisit: true,
toContinue: true
}
},
'oxidation$': {
'0': {
action_: 'oxidation-output'
}
},
'CMT': {
'r': {
action_: 'rdt=',
nextState: 'rt'
},
'rd': {
action_: 'rqt=',
nextState: 'rdt'
}
},
'arrowUpDown': {
'0|1|2|as': {
action_: ['sb=false', 'output', 'operator'],
nextState: '1'
}
},
'uprightEntities': {
'0|1|2': {
action_: ['o=', 'output'],
nextState: '1'
}
},
'orbital': {
'0|1|2|3': {
action_: 'o=',
nextState: 'o'
}
},
'->': {
'0|1|2|3': {
action_: 'r=',
nextState: 'r'
},
'a|as': {
action_: ['output', 'r='],
nextState: 'r'
},
'*': {
action_: ['output', 'r='],
nextState: 'r'
}
},
'+': {
'o': {
action_: 'd= kv',
nextState: 'd'
},
'd|D': {
action_: 'd=',
nextState: 'd'
},
'q': {
action_: 'd=',
nextState: 'qd'
},
'qd|qD': {
action_: 'd=',
nextState: 'qd'
},
'dq': {
action_: ['output', 'd='],
nextState: 'd'
},
'3': {
action_: ['sb=false', 'output', 'operator'],
nextState: '0'
}
},
'amount': {
'0|2': {
action_: 'a=',
nextState: 'a'
}
},
'pm-operator': {
'0|1|2|a|as': {
action_: ['sb=false', 'output', {
type_: 'operator',
option: '\\pm'
}],
nextState: '0'
}
},
'operator': {
'0|1|2|a|as': {
action_: ['sb=false', 'output', 'operator'],
nextState: '0'
}
},
'-$': {
'o|q': {
action_: ['charge or bond', 'output'],
nextState: 'qd'
},
'd': {
action_: 'd=',
nextState: 'd'
},
'D': {
action_: ['output', {
type_: 'bond',
option: "-"
}],
nextState: '3'
},
'q': {
action_: 'd=',
nextState: 'qd'
},
'qd': {
action_: 'd=',
nextState: 'qd'
},
'qD|dq': {
action_: ['output', {
type_: 'bond',
option: "-"
}],
nextState: '3'
}
},
'-9': {
'3|o': {
action_: ['output', {
type_: 'insert',
option: 'hyphen'
}],
nextState: '3'
}
},
'- orbital overlap': {
'o': {
action_: ['output', {
type_: 'insert',
option: 'hyphen'
}],
nextState: '2'
},
'd': {
action_: ['output', {
type_: 'insert',
option: 'hyphen'
}],
nextState: '2'
}
},
'-': {
'0|1|2': {
action_: [{
type_: 'output',
option: 1
}, 'beginsWithBond=true', {
type_: 'bond',
option: "-"
}],
nextState: '3'
},
'3': {
action_: {
type_: 'bond',
option: "-"
}
},
'a': {
action_: ['output', {
type_: 'insert',
option: 'hyphen'
}],
nextState: '2'
},
'as': {
action_: [{
type_: 'output',
option: 2
}, {
type_: 'bond',
option: "-"
}],
nextState: '3'
},
'b': {
action_: 'b='
},
'o': {
action_: {
type_: '- after o/d',
option: false
},
nextState: '2'
},
'q': {
action_: {
type_: '- after o/d',
option: false
},
nextState: '2'
},
'd|qd|dq': {
action_: {
type_: '- after o/d',
option: true
},
nextState: '2'
},
'D|qD|p': {
action_: ['output', {
type_: 'bond',
option: "-"
}],
nextState: '3'
}
},
'amount2': {
'1|3': {
action_: 'a=',
nextState: 'a'
}
},
'letters': {
'0|1|2|3|a|as|b|p|bp|o': {
action_: 'o=',
nextState: 'o'
},
'q|dq': {
action_: ['output', 'o='],
nextState: 'o'
},
'd|D|qd|qD': {
action_: 'o after d',
nextState: 'o'
}
},
'digits': {
'o': {
action_: 'q=',
nextState: 'q'
},
'd|D': {
action_: 'q=',
nextState: 'dq'
},
'q': {
action_: ['output', 'o='],
nextState: 'o'
},
'a': {
action_: 'o=',
nextState: 'o'
}
},
'space A': {
'b|p|bp': {}
},
'space': {
'a': {
nextState: 'as'
},
'0': {
action_: 'sb=false'
},
'1|2': {
action_: 'sb=true'
},
'r|rt|rd|rdt|rdq': {
action_: 'output',
nextState: '0'
},
'*': {
action_: ['output', 'sb=true'],
nextState: '1'
}
},
'1st-level escape': {
'1|2': {
action_: ['output', {
type_: 'insert+p1',
option: '1st-level escape'
}]
},
'*': {
action_: ['output', {
type_: 'insert+p1',
option: '1st-level escape'
}],
nextState: '0'
}
},
'[(...)]': {
'r|rt': {
action_: 'rd=',
nextState: 'rd'
},
'rd|rdt': {
action_: 'rq=',
nextState: 'rdq'
}
},
'...': {
'o|d|D|dq|qd|qD': {
action_: ['output', {
type_: 'bond',
option: "..."
}],
nextState: '3'
},
'*': {
action_: [{
type_: 'output',
option: 1
}, {
type_: 'insert',
option: 'ellipsis'
}],
nextState: '1'
}
},
'. |* ': {
'*': {
action_: ['output', {
type_: 'insert',
option: 'addition compound'
}],
nextState: '1'
}
},
'state of aggregation $': {
'*': {
action_: ['output', 'state of aggregation'],
nextState: '1'
}
},
'{[(': {
'a|as|o': {
action_: ['o=', 'output', 'parenthesisLevel++'],
nextState: '2'
},
'0|1|2|3': {
action_: ['o=', 'output', 'parenthesisLevel++'],
nextState: '2'
},
'*': {
action_: ['output', 'o=', 'output', 'parenthesisLevel++'],
nextState: '2'
}
},
')]}': {
'0|1|2|3|b|p|bp|o': {
action_: ['o=', 'parenthesisLevel--'],
nextState: 'o'
},
'a|as|d|D|q|qd|qD|dq': {
action_: ['output', 'o=', 'parenthesisLevel--'],
nextState: 'o'
}
},
', ': {
'*': {
action_: ['output', 'comma'],
nextState: '0'
}
},
'^_': {
// ^ and _ without a sensible argument
'*': {}
},
'^{(...)}|^($...$)': {
'0|1|2|as': {
action_: 'b=',
nextState: 'b'
},
'p': {
action_: 'b=',
nextState: 'bp'
},
'3|o': {
action_: 'd= kv',
nextState: 'D'
},
'q': {
action_: 'd=',
nextState: 'qD'
},
'd|D|qd|qD|dq': {
action_: ['output', 'd='],
nextState: 'D'
}
},
'^a|^\\x{}{}|^\\x{}|^\\x|\'': {
'0|1|2|as': {
action_: 'b=',
nextState: 'b'
},
'p': {
action_: 'b=',
nextState: 'bp'
},
'3|o': {
action_: 'd= kv',
nextState: 'd'
},
'q': {
action_: 'd=',
nextState: 'qd'
},
'd|qd|D|qD': {
action_: 'd='
},
'dq': {
action_: ['output', 'd='],
nextState: 'd'
}
},
'_{(state of aggregation)}$': {
'd|D|q|qd|qD|dq': {
action_: ['output', 'q='],
nextState: 'q'
}
},
'_{(...)}|_($...$)|_9|_\\x{}{}|_\\x{}|_\\x': {
'0|1|2|as': {
action_: 'p=',
nextState: 'p'
},
'b': {
action_: 'p=',
nextState: 'bp'
},
'3|o': {
action_: 'q=',
nextState: 'q'
},
'd|D': {
action_: 'q=',
nextState: 'dq'
},
'q|qd|qD|dq': {
action_: ['output', 'q='],
nextState: 'q'
}
},
'=<>': {
'0|1|2|3|a|as|o|q|d|D|qd|qD|dq': {
action_: [{
type_: 'output',
option: 2
}, 'bond'],
nextState: '3'
}
},
'#': {
'0|1|2|3|a|as|o': {
action_: [{
type_: 'output',
option: 2
}, {
type_: 'bond',
option: "#"
}],
nextState: '3'
}
},
'{}': {
'*': {
action_: {
type_: 'output',
option: 1
},
nextState: '1'
}
},
'{...}': {
'0|1|2|3|a|as|b|p|bp': {
action_: 'o=',
nextState: 'o'
},
'o|d|D|q|qd|qD|dq': {
action_: ['output', 'o='],
nextState: 'o'
}
},
'$...$': {
'a': {
action_: 'a='
},
// 2$n$
'0|1|2|3|as|b|p|bp|o': {
action_: 'o=',
nextState: 'o'
},
// not 'amount'
'as|o': {
action_: 'o='
},
'q|d|D|qd|qD|dq': {
action_: ['output', 'o='],
nextState: 'o'
}
},
'\\bond{(...)}': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'bond'],
nextState: "3"
}
},
'\\frac{(...)}': {
'*': {
action_: [{
type_: 'output',
option: 1
}, 'frac-output'],
nextState: '3'
}
},
'\\overset{(...)}': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'overset-output'],
nextState: '3'
}
},
'\\underset{(...)}': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'underset-output'],
nextState: '3'
}
},
'\\underbrace{(...)}': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'underbrace-output'],
nextState: '3'
}
},
'\\color{(...)}{(...)}1|\\color(...){(...)}2': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'color-output'],
nextState: '3'
}
},
'\\color{(...)}0': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'color0-output']
}
},
'\\ce{(...)}': {
'*': {
action_: [{
type_: 'output',
option: 2
}, 'ce'],
nextState: '3'
}
},
'\\,': {
'*': {
action_: [{
type_: 'output',
option: 1
}, 'copy'],
nextState: '1'
}
},
'\\x{}{}|\\x{}|\\x': {
'0|1|2|3|a|as|b|p|bp|o|c0': {
action_: ['o=', 'output'],
nextState: '3'
},
'*': {
action_: ['output', 'o=', 'output'],
nextState: '3'
}
},
'others': {
'*': {
action_: [{
type_: 'output',
option: 1
}, 'copy'],
nextState: '3'
}
},
'else2': {
'a': {
action_: 'a to o',
nextState: 'o',
revisit: true
},
'as': {
action_: ['output', 'sb=true'],
nextState: '1',
revisit: true
},
'r|rt|rd|rdt|rdq': {
action_: ['output'],
nextState: '0',
revisit: true
},
'*': {
action_: ['output', 'copy'],
nextState: '3'
}
}
}),
actions: {
'o after d': function (buffer, m) {
var ret;
if ((buffer.d || "").match(/^[0-9]+$/)) {
var tmp = buffer.d;
buffer.d = undefined;
ret = this['output'](buffer);
buffer.b = tmp;
} else {
ret = this['output'](buffer);
}
mhchemParser.actions['o='](buffer, m);
return ret;
},
'd= kv': function (buffer, m) {
buffer.d = m;
buffer.dType = 'kv';
},
'charge or bond': function (buffer, m) {
if (buffer['beginsWithBond']) {
/** @type {ParserOutput[]} */
var ret = [];
mhchemParser.concatArray(ret, this['output'](buffer));
mhchemParser.concatArray(ret, mhchemParser.actions['bond'](buffer, m, "-"));
return ret;
} else {
buffer.d = m;
}
},
'- after o/d': function (buffer, m, isAfterD) {
var c1 = mhchemParser.patterns.match_('orbital', buffer.o || "");
var c2 = mhchemParser.patterns.match_('one lowercase greek letter $', buffer.o || "");
var c3 = mhchemParser.patterns.match_('one lowercase latin letter $', buffer.o || "");
var c4 = mhchemParser.patterns.match_('$one lowercase latin letter$ $', buffer.o || "");
var hyphenFollows = m === "-" && (c1 && c1.remainder === "" || c2 || c3 || c4);
if (hyphenFollows && !buffer.a && !buffer.b && !buffer.p && !buffer.d && !buffer.q && !c1 && c3) {
buffer.o = '$' + buffer.o + '$';
}
/** @type {ParserOutput[]} */
var ret = [];
if (hyphenFollows) {
mhchemParser.concatArray(ret, this['output'](buffer));
ret.push({
type_: 'hyphen'
});
} else {
c1 = mhchemParser.patterns.match_('digits', buffer.d || "");
if (isAfterD && c1 && c1.remainder === '') {
mhchemParser.concatArray(ret, mhchemParser.actions['d='](buffer, m));
mhchemParser.concatArray(ret, this['output'](buffer));
} else {
mhchemParser.concatArray(ret, this['output'](buffer));
mhchemParser.concatArray(ret, mhchemParser.actions['bond'](buffer, m, "-"));
}
}
return ret;
},
'a to o': function (buffer) {
buffer.o = buffer.a;
buffer.a = undefined;
},
'sb=true': function (buffer) {
buffer.sb = true;
},
'sb=false': function (buffer) {
buffer.sb = false;
},
'beginsWithBond=true': function (buffer) {
buffer['beginsWithBond'] = true;
},
'beginsWithBond=false': function (buffer) {
buffer['beginsWithBond'] = false;
},
'parenthesisLevel++': function (buffer) {
buffer['parenthesisLevel']++;
},
'parenthesisLevel--': function (buffer) {
buffer['parenthesisLevel']--;
},
'state of aggregation': function (buffer, m) {
return {
type_: 'state of aggregation',
p1: mhchemParser.go(m, 'o')
};
},
'comma': function (buffer, m) {
var a = m.replace(/\s*$/, '');
var withSpace = a !== m;
if (withSpace && buffer['parenthesisLevel'] === 0) {
return {
type_: 'comma enumeration L',
p1: a
};
} else {
return {
type_: 'comma enumeration M',
p1: a
};
}
},
'output': function (buffer, m, entityFollows) {
// entityFollows:
// undefined = if we have nothing else to output, also ignore the just read space (buffer.sb)
// 1 = an entity follows, never omit the space if there was one just read before (can only apply to state 1)
// 2 = 1 + the entity can have an amount, so output a\, instead of converting it to o (can only apply to states a|as)
/** @type {ParserOutput | ParserOutput[]} */
var ret;
if (!buffer.r) {
ret = [];
if (!buffer.a && !buffer.b && !buffer.p && !buffer.o && !buffer.q && !buffer.d && !entityFollows) {//ret = [];
} else {
if (buffer.sb) {
ret.push({
type_: 'entitySkip'
});
}
if (!buffer.o && !buffer.q && !buffer.d && !buffer.b && !buffer.p && entityFollows !== 2) {
buffer.o = buffer.a;
buffer.a = undefined;
} else if (!buffer.o && !buffer.q && !buffer.d && (buffer.b || buffer.p)) {
buffer.o = buffer.a;
buffer.d = buffer.b;
buffer.q = buffer.p;
buffer.a = buffer.b = buffer.p = undefined;
} else {
if (buffer.o && buffer.dType === 'kv' && mhchemParser.patterns.match_('d-oxidation$', buffer.d || "")) {
buffer.dType = 'oxidation';
} else if (buffer.o && buffer.dType === 'kv' && !buffer.q) {
buffer.dType = undefined;
}
}
ret.push({
type_: 'chemfive',
a: mhchemParser.go(buffer.a, 'a'),
b: mhchemParser.go(buffer.b, 'bd'),
p: mhchemParser.go(buffer.p, 'pq'),
o: mhchemParser.go(buffer.o, 'o'),
q: mhchemParser.go(buffer.q, 'pq'),
d: mhchemParser.go(buffer.d, buffer.dType === 'oxidation' ? 'oxidation' : 'bd'),
dType: buffer.dType
});
}
} else {
// r
/** @type {ParserOutput[]} */
var rd;
if (buffer.rdt === 'M') {
rd = mhchemParser.go(buffer.rd, 'tex-math');
} else if (buffer.rdt === 'T') {
rd = [{
type_: 'text',
p1: buffer.rd || ""
}];
} else {
rd = mhchemParser.go(buffer.rd);
}
/** @type {ParserOutput[]} */
var rq;
if (buffer.rqt === 'M') {
rq = mhchemParser.go(buffer.rq, 'tex-math');
} else if (buffer.rqt === 'T') {
rq = [{
type_: 'text',
p1: buffer.rq || ""
}];
} else {
rq = mhchemParser.go(buffer.rq);
}
ret = {
type_: 'arrow',
r: buffer.r,
rd: rd,
rq: rq
};
}
for (var p in buffer) {
if (p !== 'parenthesisLevel' && p !== 'beginsWithBond') {
delete buffer[p];
}
}
return ret;
},
'oxidation-output': function (buffer, m) {
var ret = ["{"];
mhchemParser.concatArray(ret, mhchemParser.go(m, 'oxidation'));
ret.push("}");
return ret;
},
'frac-output': function (buffer, m) {
return {
type_: 'frac-ce',
p1: mhchemParser.go(m[0]),
p2: mhchemParser.go(m[1])
};
},
'overset-output': function (buffer, m) {
return {
type_: 'overset',
p1: mhchemParser.go(m[0]),
p2: mhchemParser.go(m[1])
};
},
'underset-output': function (buffer, m) {
return {
type_: 'underset',
p1: mhchemParser.go(m[0]),
p2: mhchemParser.go(m[1])
};
},
'underbrace-output': function (buffer, m) {
return {
type_: 'underbrace',
p1: mhchemParser.go(m[0]),
p2: mhchemParser.go(m[1])
};
},
'color-output': function (buffer, m) {
return {
type_: 'color',
color1: m[0],
color2: mhchemParser.go(m[1])
};
},
'r=': function (buffer, m) {
buffer.r = m;
},
'rdt=': function (buffer, m) {
buffer.rdt = m;
},
'rd=': function (buffer, m) {
buffer.rd = m;
},
'rqt=': function (buffer, m) {
buffer.rqt = m;
},
'rq=': function (buffer, m) {
buffer.rq = m;
},
'operator': function (buffer, m, p1) {
return {
type_: 'operator',
kind_: p1 || m
};
}
}
},
'a': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {}
},
'1/2$': {
'0': {
action_: '1/2'
}
},
'else': {
'0': {
nextState: '1',
revisit: true
}
},
'$(...)$': {
'*': {
action_: 'tex-math tight',
nextState: '1'
}
},
',': {
'*': {
action_: {
type_: 'insert',
option: 'commaDecimal'
}
}
},
'else2': {
'*': {
action_: 'copy'
}
}
}),
actions: {}
},
'o': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {}
},
'1/2$': {
'0': {
action_: '1/2'
}
},
'else': {
'0': {
nextState: '1',
revisit: true
}
},
'letters': {
'*': {
action_: 'rm'
}
},
'\\ca': {
'*': {
action_: {
type_: 'insert',
option: 'circa'
}
}
},
'\\x{}{}|\\x{}|\\x': {
'*': {
action_: 'copy'
}
},
'${(...)}$|$(...)$': {
'*': {
action_: 'tex-math'
}
},
'{(...)}': {
'*': {
action_: '{text}'
}
},
'else2': {
'*': {
action_: 'copy'
}
}
}),
actions: {}
},
'text': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {
action_: 'output'
}
},
'{...}': {
'*': {
action_: 'text='
}
},
'${(...)}$|$(...)$': {
'*': {
action_: 'tex-math'
}
},
'\\greek': {
'*': {
action_: ['output', 'rm']
}
},
'\\,|\\x{}{}|\\x{}|\\x': {
'*': {
action_: ['output', 'copy']
}
},
'else': {
'*': {
action_: 'text='
}
}
}),
actions: {
'output': function (buffer) {
if (buffer.text_) {
/** @type {ParserOutput} */
var ret = {
type_: 'text',
p1: buffer.text_
};
for (var p in buffer) {
delete buffer[p];
}
return ret;
}
}
}
},
'pq': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {}
},
'state of aggregation $': {
'*': {
action_: 'state of aggregation'
}
},
'i$': {
'0': {
nextState: '!f',
revisit: true
}
},
'(KV letters),': {
'0': {
action_: 'rm',
nextState: '0'
}
},
'formula$': {
'0': {
nextState: 'f',
revisit: true
}
},
'1/2$': {
'0': {
action_: '1/2'
}
},
'else': {
'0': {
nextState: '!f',
revisit: true
}
},
'${(...)}$|$(...)$': {
'*': {
action_: 'tex-math'
}
},
'{(...)}': {
'*': {
action_: 'text'
}
},
'a-z': {
'f': {
action_: 'tex-math'
}
},
'letters': {
'*': {
action_: 'rm'
}
},
'-9.,9': {
'*': {
action_: '9,9'
}
},
',': {
'*': {
action_: {
type_: 'insert+p1',
option: 'comma enumeration S'
}
}
},
'\\color{(...)}{(...)}1|\\color(...){(...)}2': {
'*': {
action_: 'color-output'
}
},
'\\color{(...)}0': {
'*': {
action_: 'color0-output'
}
},
'\\ce{(...)}': {
'*': {
action_: 'ce'
}
},
'\\,|\\x{}{}|\\x{}|\\x': {
'*': {
action_: 'copy'
}
},
'else2': {
'*': {
action_: 'copy'
}
}
}),
actions: {
'state of aggregation': function (buffer, m) {
return {
type_: 'state of aggregation subscript',
p1: mhchemParser.go(m, 'o')
};
},
'color-output': function (buffer, m) {
return {
type_: 'color',
color1: m[0],
color2: mhchemParser.go(m[1], 'pq')
};
}
}
},
'bd': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {}
},
'x$': {
'0': {
nextState: '!f',
revisit: true
}
},
'formula$': {
'0': {
nextState: 'f',
revisit: true
}
},
'else': {
'0': {
nextState: '!f',
revisit: true
}
},
'-9.,9 no missing 0': {
'*': {
action_: '9,9'
}
},
'.': {
'*': {
action_: {
type_: 'insert',
option: 'electron dot'
}
}
},
'a-z': {
'f': {
action_: 'tex-math'
}
},
'x': {
'*': {
action_: {
type_: 'insert',
option: 'KV x'
}
}
},
'letters': {
'*': {
action_: 'rm'
}
},
'\'': {
'*': {
action_: {
type_: 'insert',
option: 'prime'
}
}
},
'${(...)}$|$(...)$': {
'*': {
action_: 'tex-math'
}
},
'{(...)}': {
'*': {
action_: 'text'
}
},
'\\color{(...)}{(...)}1|\\color(...){(...)}2': {
'*': {
action_: 'color-output'
}
},
'\\color{(...)}0': {
'*': {
action_: 'color0-output'
}
},
'\\ce{(...)}': {
'*': {
action_: 'ce'
}
},
'\\,|\\x{}{}|\\x{}|\\x': {
'*': {
action_: 'copy'
}
},
'else2': {
'*': {
action_: 'copy'
}
}
}),
actions: {
'color-output': function (buffer, m) {
return {
type_: 'color',
color1: m[0],
color2: mhchemParser.go(m[1], 'bd')
};
}
}
},
'oxidation': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {}
},
'roman numeral': {
'*': {
action_: 'roman-numeral'
}
},
'${(...)}$|$(...)$': {
'*': {
action_: 'tex-math'
}
},
'else': {
'*': {
action_: 'copy'
}
}
}),
actions: {
'roman-numeral': function (buffer, m) {
return {
type_: 'roman numeral',
p1: m || ""
};
}
}
},
'tex-math': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {
action_: 'output'
}
},
'\\ce{(...)}': {
'*': {
action_: ['output', 'ce']
}
},
'{...}|\\,|\\x{}{}|\\x{}|\\x': {
'*': {
action_: 'o='
}
},
'else': {
'*': {
action_: 'o='
}
}
}),
actions: {
'output': function (buffer) {
if (buffer.o) {
/** @type {ParserOutput} */
var ret = {
type_: 'tex-math',
p1: buffer.o
};
for (var p in buffer) {
delete buffer[p];
}
return ret;
}
}
}
},
'tex-math tight': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {
action_: 'output'
}
},
'\\ce{(...)}': {
'*': {
action_: ['output', 'ce']
}
},
'{...}|\\,|\\x{}{}|\\x{}|\\x': {
'*': {
action_: 'o='
}
},
'-|+': {
'*': {
action_: 'tight operator'
}
},
'else': {
'*': {
action_: 'o='
}
}
}),
actions: {
'tight operator': function (buffer, m) {
buffer.o = (buffer.o || "") + "{" + m + "}";
},
'output': function (buffer) {
if (buffer.o) {
/** @type {ParserOutput} */
var ret = {
type_: 'tex-math',
p1: buffer.o
};
for (var p in buffer) {
delete buffer[p];
}
return ret;
}
}
}
},
'9,9': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {}
},
',': {
'*': {
action_: 'comma'
}
},
'else': {
'*': {
action_: 'copy'
}
}
}),
actions: {
'comma': function () {
return {
type_: 'commaDecimal'
};
}
}
},
//#endregion
//
// \pu state machines
//
//#region pu
'pu': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {
action_: 'output'
}
},
'space$': {
'*': {
action_: ['output', 'space']
}
},
'{[(|)]}': {
'0|a': {
action_: 'copy'
}
},
'(-)(9)^(-9)': {
'0': {
action_: 'number^',
nextState: 'a'
}
},
'(-)(9.,9)(e)(99)': {
'0': {
action_: 'enumber',
nextState: 'a'
}
},
'space': {
'0|a': {}
},
'pm-operator': {
'0|a': {
action_: {
type_: 'operator',
option: '\\pm'
},
nextState: '0'
}
},
'operator': {
'0|a': {
action_: 'copy',
nextState: '0'
}
},
'//': {
'd': {
action_: 'o=',
nextState: '/'
}
},
'/': {
'd': {
action_: 'o=',
nextState: '/'
}
},
'{...}|else': {
'0|d': {
action_: 'd=',
nextState: 'd'
},
'a': {
action_: ['space', 'd='],
nextState: 'd'
},
'/|q': {
action_: 'q=',
nextState: 'q'
}
}
}),
actions: {
'enumber': function (buffer, m) {
/** @type {ParserOutput[]} */
var ret = [];
if (m[0] === "+-" || m[0] === "+/-") {
ret.push("\\pm ");
} else if (m[0]) {
ret.push(m[0]);
}
if (m[1]) {
mhchemParser.concatArray(ret, mhchemParser.go(m[1], 'pu-9,9'));
if (m[2]) {
if (m[2].match(/[,.]/)) {
mhchemParser.concatArray(ret, mhchemParser.go(m[2], 'pu-9,9'));
} else {
ret.push(m[2]);
}
}
m[3] = m[4] || m[3];
if (m[3]) {
m[3] = m[3].trim();
if (m[3] === "e" || m[3].substr(0, 1) === "*") {
ret.push({
type_: 'cdot'
});
} else {
ret.push({
type_: 'times'
});
}
}
}
if (m[3]) {
ret.push("10^{" + m[5] + "}");
}
return ret;
},
'number^': function (buffer, m) {
/** @type {ParserOutput[]} */
var ret = [];
if (m[0] === "+-" || m[0] === "+/-") {
ret.push("\\pm ");
} else if (m[0]) {
ret.push(m[0]);
}
mhchemParser.concatArray(ret, mhchemParser.go(m[1], 'pu-9,9'));
ret.push("^{" + m[2] + "}");
return ret;
},
'operator': function (buffer, m, p1) {
return {
type_: 'operator',
kind_: p1 || m
};
},
'space': function () {
return {
type_: 'pu-space-1'
};
},
'output': function (buffer) {
/** @type {ParserOutput | ParserOutput[]} */
var ret;
var md = mhchemParser.patterns.match_('{(...)}', buffer.d || "");
if (md && md.remainder === '') {
buffer.d = md.match_;
}
var mq = mhchemParser.patterns.match_('{(...)}', buffer.q || "");
if (mq && mq.remainder === '') {
buffer.q = mq.match_;
}
if (buffer.d) {
buffer.d = buffer.d.replace(/\u00B0C|\^oC|\^{o}C/g, "{}^{\\circ}C");
buffer.d = buffer.d.replace(/\u00B0F|\^oF|\^{o}F/g, "{}^{\\circ}F");
}
if (buffer.q) {
// fraction
buffer.q = buffer.q.replace(/\u00B0C|\^oC|\^{o}C/g, "{}^{\\circ}C");
buffer.q = buffer.q.replace(/\u00B0F|\^oF|\^{o}F/g, "{}^{\\circ}F");
var b5 = {
d: mhchemParser.go(buffer.d, 'pu'),
q: mhchemParser.go(buffer.q, 'pu')
};
if (buffer.o === '//') {
ret = {
type_: 'pu-frac',
p1: b5.d,
p2: b5.q
};
} else {
ret = b5.d;
if (b5.d.length > 1 || b5.q.length > 1) {
ret.push({
type_: ' / '
});
} else {
ret.push({
type_: '/'
});
}
mhchemParser.concatArray(ret, b5.q);
}
} else {
// no fraction
ret = mhchemParser.go(buffer.d, 'pu-2');
}
for (var p in buffer) {
delete buffer[p];
}
return ret;
}
}
},
'pu-2': {
transitions: mhchemParser.createTransitions({
'empty': {
'*': {
action_: 'output'
}
},
'*': {
'*': {
action_: ['output', 'cdot'],
nextState: '0'
}
},
'\\x': {
'*': {
action_: 'rm='
}
},
'space': {
'*': {
action_: ['output', 'space'],
nextState: '0'
}
},
'^{(...)}|^(-1)': {
'1': {
action_: '^(-1)'
}
},
'-9.,9': {
'0': {
action_: 'rm=',
nextState: '0'
},
'1': {
action_: '^(-1)',
nextState: '0'
}
},
'{...}|else': {
'*': {
action_: 'rm=',
nextState: '1'
}
}
}),
actions: {
'cdot': function () {
return {
type_: 'tight cdot'
};
},
'^(-1)': function (buffer, m) {
buffer.rm += "^{" + m + "}";
},
'space': function () {
return {
type_: 'pu-space-2'
};
},
'output': function (buffer) {
/** @type {ParserOutput | ParserOutput[]} */
var ret = [];
if (buffer.rm) {
var mrm = mhchemParser.patterns.match_('{(...)}', buffer.rm || "");
if (mrm && mrm.remainder === '') {
ret = mhchemParser.go(mrm.match_, 'pu');
} else {
ret = {
type_: 'rm',
p1: buffer.rm
};
}
}
for (var p in buffer) {
delete buffer[p];
}
return ret;
}
}
},
'pu-9,9': {
transitions: mhchemParser.createTransitions({
'empty': {
'0': {
action_: 'output-0'
},
'o': {
action_: 'output-o'
}
},
',': {
'0': {
action_: ['output-0', 'comma'],
nextState: 'o'
}
},
'.': {
'0': {
action_: ['output-0', 'copy'],
nextState: 'o'
}
},
'else': {
'*': {
action_: 'text='
}
}
}),
actions: {
'comma': function () {
return {
type_: 'commaDecimal'
};
},
'output-0': function (buffer) {
/** @type {ParserOutput[]} */
var ret = [];
buffer.text_ = buffer.text_ || "";
if (buffer.text_.length > 4) {
var a = buffer.text_.length % 3;
if (a === 0) {
a = 3;
}
for (var i = buffer.text_.length - 3; i > 0; i -= 3) {
ret.push(buffer.text_.substr(i, 3));
ret.push({
type_: '1000 separator'
});
}
ret.push(buffer.text_.substr(0, a));
ret.reverse();
} else {
ret.push(buffer.text_);
}
for (var p in buffer) {
delete buffer[p];
}
return ret;
},
'output-o': function (buffer) {
/** @type {ParserOutput[]} */
var ret = [];
buffer.text_ = buffer.text_ || "";
if (buffer.text_.length > 4) {
var a = buffer.text_.length - 3;
for (var i = 0; i < a; i += 3) {
ret.push(buffer.text_.substr(i, 3));
ret.push({
type_: '1000 separator'
});
}
ret.push(buffer.text_.substr(i));
} else {
ret.push(buffer.text_);
}
for (var p in buffer) {
delete buffer[p];
}
return ret;
}
}
} //#endregion
}; //
// texify: Take MhchemParser output and convert it to TeX
//
/** @type {Texify} */
var texify = {
go: function (input, isInner) {
// (recursive, max 4 levels)
if (!input) {
return "";
}
var res = "";
var cee = false;
for (var i = 0; i < input.length; i++) {
var inputi = input[i];
if (typeof inputi === "string") {
res += inputi;
} else {
res += texify._go2(inputi);
if (inputi.type_ === '1st-level escape') {
cee = true;
}
}
}
if (!isInner && !cee && res) {
res = "{" + res + "}";
}
return res;
},
_goInner: function (input) {
if (!input) {
return input;
}
return texify.go(input, true);
},
_go2: function (buf) {
/** @type {undefined | string} */
var res;
switch (buf.type_) {
case 'chemfive':
res = "";
var b5 = {
a: texify._goInner(buf.a),
b: texify._goInner(buf.b),
p: texify._goInner(buf.p),
o: texify._goInner(buf.o),
q: texify._goInner(buf.q),
d: texify._goInner(buf.d)
}; //
// a
//
if (b5.a) {
if (b5.a.match(/^[+\-]/)) {
b5.a = "{" + b5.a + "}";
}
res += b5.a + "\\,";
} //
// b and p
//
if (b5.b || b5.p) {
res += "{\\vphantom{X}}";
res += "^{\\hphantom{" + (b5.b || "") + "}}_{\\hphantom{" + (b5.p || "") + "}}";
res += "{\\vphantom{X}}";
res += "^{\\smash[t]{\\vphantom{2}}\\mathllap{" + (b5.b || "") + "}}";
res += "_{\\vphantom{2}\\mathllap{\\smash[t]{" + (b5.p || "") + "}}}";
} //
// o
//
if (b5.o) {
if (b5.o.match(/^[+\-]/)) {
b5.o = "{" + b5.o + "}";
}
res += b5.o;
} //
// q and d
//
if (buf.dType === 'kv') {
if (b5.d || b5.q) {
res += "{\\vphantom{X}}";
}
if (b5.d) {
res += "^{" + b5.d + "}";
}
if (b5.q) {
res += "_{\\smash[t]{" + b5.q + "}}";
}
} else if (buf.dType === 'oxidation') {
if (b5.d) {
res += "{\\vphantom{X}}";
res += "^{" + b5.d + "}";
}
if (b5.q) {
res += "{\\vphantom{X}}";
res += "_{\\smash[t]{" + b5.q + "}}";
}
} else {
if (b5.q) {
res += "{\\vphantom{X}}";
res += "_{\\smash[t]{" + b5.q + "}}";
}
if (b5.d) {
res += "{\\vphantom{X}}";
res += "^{" + b5.d + "}";
}
}
break;
case 'rm':
res = "\\mathrm{" + buf.p1 + "}";
break;
case 'text':
if (buf.p1.match(/[\^_]/)) {
buf.p1 = buf.p1.replace(" ", "~").replace("-", "\\text{-}");
res = "\\mathrm{" + buf.p1 + "}";
} else {
res = "\\text{" + buf.p1 + "}";
}
break;
case 'roman numeral':
res = "\\mathrm{" + buf.p1 + "}";
break;
case 'state of aggregation':
res = "\\mskip2mu " + texify._goInner(buf.p1);
break;
case 'state of aggregation subscript':
res = "\\mskip1mu " + texify._goInner(buf.p1);
break;
case 'bond':
res = texify._getBond(buf.kind_);
if (!res) {
throw ["MhchemErrorBond", "mhchem Error. Unknown bond type (" + buf.kind_ + ")"];
}
break;
case 'frac':
var c = "\\frac{" + buf.p1 + "}{" + buf.p2 + "}";
res = "\\mathchoice{\\textstyle" + c + "}{" + c + "}{" + c + "}{" + c + "}";
break;
case 'pu-frac':
var d = "\\frac{" + texify._goInner(buf.p1) + "}{" + texify._goInner(buf.p2) + "}";
res = "\\mathchoice{\\textstyle" + d + "}{" + d + "}{" + d + "}{" + d + "}";
break;
case 'tex-math':
res = buf.p1 + " ";
break;
case 'frac-ce':
res = "\\frac{" + texify._goInner(buf.p1) + "}{" + texify._goInner(buf.p2) + "}";
break;
case 'overset':
res = "\\overset{" + texify._goInner(buf.p1) + "}{" + texify._goInner(buf.p2) + "}";
break;
case 'underset':
res = "\\underset{" + texify._goInner(buf.p1) + "}{" + texify._goInner(buf.p2) + "}";
break;
case 'underbrace':
res = "\\underbrace{" + texify._goInner(buf.p1) + "}_{" + texify._goInner(buf.p2) + "}";
break;
case 'color':
res = "{\\color{" + buf.color1 + "}{" + texify._goInner(buf.color2) + "}}";
break;
case 'color0':
res = "\\color{" + buf.color + "}";
break;
case 'arrow':
var b6 = {
rd: texify._goInner(buf.rd),
rq: texify._goInner(buf.rq)
};
var arrow = "\\x" + texify._getArrow(buf.r);
if (b6.rq) {
arrow += "[{" + b6.rq + "}]";
}
if (b6.rd) {
arrow += "{" + b6.rd + "}";
} else {
arrow += "{}";
}
res = arrow;
break;
case 'operator':
res = texify._getOperator(buf.kind_);
break;
case '1st-level escape':
res = buf.p1 + " "; // &, \\\\, \\hlin
break;
case 'space':
res = " ";
break;
case 'entitySkip':
res = "~";
break;
case 'pu-space-1':
res = "~";
break;
case 'pu-space-2':
res = "\\mkern3mu ";
break;
case '1000 separator':
res = "\\mkern2mu ";
break;
case 'commaDecimal':
res = "{,}";
break;
case 'comma enumeration L':
res = "{" + buf.p1 + "}\\mkern6mu ";
break;
case 'comma enumeration M':
res = "{" + buf.p1 + "}\\mkern3mu ";
break;
case 'comma enumeration S':
res = "{" + buf.p1 + "}\\mkern1mu ";
break;
case 'hyphen':
res = "\\text{-}";
break;
case 'addition compound':
res = "\\,{\\cdot}\\,";
break;
case 'electron dot':
res = "\\mkern1mu \\bullet\\mkern1mu ";
break;
case 'KV x':
res = "{\\times}";
break;
case 'prime':
res = "\\prime ";
break;
case 'cdot':
res = "\\cdot ";
break;
case 'tight cdot':
res = "\\mkern1mu{\\cdot}\\mkern1mu ";
break;
case 'times':
res = "\\times ";
break;
case 'circa':
res = "{\\sim}";
break;
case '^':
res = "uparrow";
break;
case 'v':
res = "downarrow";
break;
case 'ellipsis':
res = "\\ldots ";
break;
case '/':
res = "/";
break;
case ' / ':
res = "\\,/\\,";
break;
default:
assertNever(buf);
throw ["MhchemBugT", "mhchem bug T. Please report."];
// Missing texify rule or unknown MhchemParser output
}
assertString(res);
return res;
},
_getArrow: function (a) {
switch (a) {
case "->":
return "rightarrow";
case "\u2192":
return "rightarrow";
case "\u27F6":
return "rightarrow";
case "<-":
return "leftarrow";
case "<->":
return "leftrightarrow";
case "<-->":
return "rightleftarrows";
case "<=>":
return "rightleftharpoons";
case "\u21CC":
return "rightleftharpoons";
case "<=>>":
return "rightequilibrium";
case "<<=>":
return "leftequilibrium";
default:
assertNever(a);
throw ["MhchemBugT", "mhchem bug T. Please report."];
}
},
_getBond: function (a) {
switch (a) {
case "-":
return "{-}";
case "1":
return "{-}";
case "=":
return "{=}";
case "2":
return "{=}";
case "#":
return "{\\equiv}";
case "3":
return "{\\equiv}";
case "~":
return "{\\tripledash}";
case "~-":
return "{\\mathrlap{\\raisebox{-.1em}{$-$}}\\raisebox{.1em}{$\\tripledash$}}";
case "~=":
return "{\\mathrlap{\\raisebox{-.2em}{$-$}}\\mathrlap{\\raisebox{.2em}{$\\tripledash$}}-}";
case "~--":
return "{\\mathrlap{\\raisebox{-.2em}{$-$}}\\mathrlap{\\raisebox{.2em}{$\\tripledash$}}-}";
case "-~-":
return "{\\mathrlap{\\raisebox{-.2em}{$-$}}\\mathrlap{\\raisebox{.2em}{$-$}}\\tripledash}";
case "...":
return "{{\\cdot}{\\cdot}{\\cdot}}";
case "....":
return "{{\\cdot}{\\cdot}{\\cdot}{\\cdot}}";
case "->":
return "{\\rightarrow}";
case "<-":
return "{\\leftarrow}";
case "<":
return "{<}";
case ">":
return "{>}";
default:
assertNever(a);
throw ["MhchemBugT", "mhchem bug T. Please report."];
}
},
_getOperator: function (a) {
switch (a) {
case "+":
return " {}+{} ";
case "-":
return " {}-{} ";
case "=":
return " {}={} ";
case "<":
return " {}<{} ";
case ">":
return " {}>{} ";
case "<<":
return " {}\\ll{} ";
case ">>":
return " {}\\gg{} ";
case "\\pm":
return " {}\\pm{} ";
case "\\approx":
return " {}\\approx{} ";
case "$\\approx$":
return " {}\\approx{} ";
case "v":
return " \\downarrow{} ";
case "(v)":
return " \\downarrow{} ";
case "^":
return " \\uparrow{} ";
case "(^)":
return " \\uparrow{} ";
default:
assertNever(a);
throw ["MhchemBugT", "mhchem bug T. Please report."];
}
}
}; //
// Helpers for code analysis
// Will show type error at calling position
//
/** @param {number} a */
function assertNever(a) {}
/** @param {string} a */
function assertString(a) {}
__webpack_exports__ = __webpack_exports__["default"];
/******/ return __webpack_exports__;
/******/ })()
;
});
```
## /assets/katex/contrib/mhchem.min.js
```js path="/assets/katex/contrib/mhchem.min.js"
!function(t,e){if("object"==typeof exports&&"object"==typeof module)module.exports=e(require("katex"));else if("function"==typeof define&&define.amd)define(["katex"],e);else{var n="object"==typeof exports?e(require("katex")):e(t.katex);for(var o in n)("object"==typeof exports?exports:t)[o]=n[o]}}("undefined"!=typeof self?self:this,(function(t){return function(){"use strict";var e={757:function(e){e.exports=t}},n={};function o(t){var a=n[t];if(void 0!==a)return a.exports;var r=n[t]={exports:{}};return e[t](r,r.exports,o),r.exports}o.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return o.d(e,{a:e}),e},o.d=function(t,e){for(var n in e)o.o(e,n)&&!o.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:e[n]})},o.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)};var a={},r=o(757),i=o.n(r);i().__defineMacro("\\ce",(function(t){return c(t.consumeArgs(1)[0],"ce")})),i().__defineMacro("\\pu",(function(t){return c(t.consumeArgs(1)[0],"pu")})),i().__defineMacro("\\tripledash","{\\vphantom{-}\\raisebox{2.56mu}{$\\mkern2mu\\tiny\\text{-}\\mkern1mu\\text{-}\\mkern1mu\\text{-}\\mkern2mu$}}");var c=function(t,e){for(var n="",o=t.length&&t[t.length-1].loc.start,a=t.length-1;a>=0;a--)t[a].loc.start>o&&(n+=" ",o=t[a].loc.start),n+=t[a].text,o+=t[a].text.length;return p.go(u.go(n,e))},u={go:function(t,e){if(!t)return[];void 0===e&&(e="ce");var n,o="0",a={};a.parenthesisLevel=0,t=(t=(t=t.replace(/\n/g," ")).replace(/[\u2212\u2013\u2014\u2010]/g,"-")).replace(/[\u2026]/g,"...");for(var r=10,i=[];;){n!==t?(r=10,n=t):r--;var c=u.stateMachines[e],p=c.transitions[o]||c.transitions["*"];t:for(var s=0;s0))return i;if(d.revisit||(t=_.remainder),!d.toContinue)break t}}if(r<=0)throw["MhchemBugU","mhchem bug U. Please report."]}},concatArray:function(t,e){if(e)if(Array.isArray(e))for(var n=0;n":/^[=<>]/,"#":/^[#\u2261]/,"+":/^\+/,"-$":/^-(?=[\s_},;\]/]|$|\([a-z]+\))/,"-9":/^-(?=[0-9])/,"- orbital overlap":/^-(?=(?:[spd]|sp)(?:$|[\s,;\)\]\}]))/,"-":/^-/,"pm-operator":/^(?:\\pm|\$\\pm\$|\+-|\+\/-)/,operator:/^(?:\+|(?:[\-=<>]|<<|>>|\\approx|\$\\approx\$)(?=\s|$|-?[0-9]))/,arrowUpDown:/^(?:v|\(v\)|\^|\(\^\))(?=$|[\s,;\)\]\}])/,"\\bond{(...)}":function(t){return u.patterns.findObserveGroups(t,"\\bond{","","","}")},"->":/^(?:<->|<-->|->|<-|<=>>|<<=>|<=>|[\u2192\u27F6\u21CC])/,CMT:/^[CMT](?=\[)/,"[(...)]":function(t){return u.patterns.findObserveGroups(t,"[","","","]")},"1st-level escape":/^(&|\\\\|\\hline)\s*/,"\\,":/^(?:\\[,\ ;:])/,"\\x{}{}":function(t){return u.patterns.findObserveGroups(t,"",/^\\[a-zA-Z]+\{/,"}","","","{","}","",!0)},"\\x{}":function(t){return u.patterns.findObserveGroups(t,"",/^\\[a-zA-Z]+\{/,"}","")},"\\ca":/^\\ca(?:\s+|(?![a-zA-Z]))/,"\\x":/^(?:\\[a-zA-Z]+\s*|\\[_&{}%])/,orbital:/^(?:[0-9]{1,2}[spdfgh]|[0-9]{0,2}sp)(?=$|[^a-zA-Z])/,others:/^[\/~|]/,"\\frac{(...)}":function(t){return u.patterns.findObserveGroups(t,"\\frac{","","","}","{","","","}")},"\\overset{(...)}":function(t){return u.patterns.findObserveGroups(t,"\\overset{","","","}","{","","","}")},"\\underset{(...)}":function(t){return u.patterns.findObserveGroups(t,"\\underset{","","","}","{","","","}")},"\\underbrace{(...)}":function(t){return u.patterns.findObserveGroups(t,"\\underbrace{","","","}_","{","","","}")},"\\color{(...)}0":function(t){return u.patterns.findObserveGroups(t,"\\color{","","","}")},"\\color{(...)}{(...)}1":function(t){return u.patterns.findObserveGroups(t,"\\color{","","","}","{","","","}")},"\\color(...){(...)}2":function(t){return u.patterns.findObserveGroups(t,"\\color","\\","",/^(?=\{)/,"{","","","}")},"\\ce{(...)}":function(t){return u.patterns.findObserveGroups(t,"\\ce{","","","}")},oxidation$:/^(?:[+-][IVX]+|\\pm\s*0|\$\\pm\$\s*0)$/,"d-oxidation$":/^(?:[+-]?\s?[IVX]+|\\pm\s*0|\$\\pm\$\s*0)$/,"roman numeral":/^[IVX]+/,"1/2$":/^[+\-]?(?:[0-9]+|\$[a-z]\$|[a-z])\/[0-9]+(?:\$[a-z]\$|[a-z])?$/,amount:function(t){var e;if(e=t.match(/^(?:(?:(?:\([+\-]?[0-9]+\/[0-9]+\)|[+\-]?(?:[0-9]+|\$[a-z]\$|[a-z])\/[0-9]+|[+\-]?[0-9]+[.,][0-9]+|[+\-]?\.[0-9]+|[+\-]?[0-9]+)(?:[a-z](?=\s*[A-Z]))?)|[+\-]?[a-z](?=\s*[A-Z])|\+(?!\s))/))return{match_:e[0],remainder:t.substr(e[0].length)};var n=u.patterns.findObserveGroups(t,"","$","$","");return n&&(e=n.match_.match(/^\$(?:\(?[+\-]?(?:[0-9]*[a-z]?[+\-])?[0-9]*[a-z](?:[+\-][0-9]*[a-z]?)?\)?|\+|-)\$$/))?{match_:e[0],remainder:t.substr(e[0].length)}:null},amount2:function(t){return this.amount(t)},"(KV letters),":/^(?:[A-Z][a-z]{0,2}|i)(?=,)/,formula$:function(t){if(t.match(/^\([a-z]+\)$/))return null;var e=t.match(/^(?:[a-z]|(?:[0-9\ \+\-\,\.\(\)]+[a-z])+[0-9\ \+\-\,\.\(\)]*|(?:[a-z][0-9\ \+\-\,\.\(\)]+)+[a-z]?)$/);return e?{match_:e[0],remainder:t.substr(e[0].length)}:null},uprightEntities:/^(?:pH|pOH|pC|pK|iPr|iBu)(?=$|[^a-zA-Z])/,"/":/^\s*(\/)\s*/,"//":/^\s*(\/\/)\s*/,"*":/^\s*[*.]\s*/},findObserveGroups:function(t,e,n,o,a,r,i,c,u,p){var s=function(t,e){if("string"==typeof e)return 0!==t.indexOf(e)?null:e;var n=t.match(e);return n?n[0]:null},_=s(t,e);if(null===_)return null;if(t=t.substr(_.length),null===(_=s(t,n)))return null;var d=function(t,e,n){for(var o=0;e":{"0|1|2|3":{action_:"r=",nextState:"r"},"a|as":{action_:["output","r="],nextState:"r"},"*":{action_:["output","r="],nextState:"r"}},"+":{o:{action_:"d= kv",nextState:"d"},"d|D":{action_:"d=",nextState:"d"},q:{action_:"d=",nextState:"qd"},"qd|qD":{action_:"d=",nextState:"qd"},dq:{action_:["output","d="],nextState:"d"},3:{action_:["sb=false","output","operator"],nextState:"0"}},amount:{"0|2":{action_:"a=",nextState:"a"}},"pm-operator":{"0|1|2|a|as":{action_:["sb=false","output",{type_:"operator",option:"\\pm"}],nextState:"0"}},operator:{"0|1|2|a|as":{action_:["sb=false","output","operator"],nextState:"0"}},"-$":{"o|q":{action_:["charge or bond","output"],nextState:"qd"},d:{action_:"d=",nextState:"d"},D:{action_:["output",{type_:"bond",option:"-"}],nextState:"3"},q:{action_:"d=",nextState:"qd"},qd:{action_:"d=",nextState:"qd"},"qD|dq":{action_:["output",{type_:"bond",option:"-"}],nextState:"3"}},"-9":{"3|o":{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"3"}},"- orbital overlap":{o:{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"2"},d:{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"2"}},"-":{"0|1|2":{action_:[{type_:"output",option:1},"beginsWithBond=true",{type_:"bond",option:"-"}],nextState:"3"},3:{action_:{type_:"bond",option:"-"}},a:{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"2"},as:{action_:[{type_:"output",option:2},{type_:"bond",option:"-"}],nextState:"3"},b:{action_:"b="},o:{action_:{type_:"- after o/d",option:!1},nextState:"2"},q:{action_:{type_:"- after o/d",option:!1},nextState:"2"},"d|qd|dq":{action_:{type_:"- after o/d",option:!0},nextState:"2"},"D|qD|p":{action_:["output",{type_:"bond",option:"-"}],nextState:"3"}},amount2:{"1|3":{action_:"a=",nextState:"a"}},letters:{"0|1|2|3|a|as|b|p|bp|o":{action_:"o=",nextState:"o"},"q|dq":{action_:["output","o="],nextState:"o"},"d|D|qd|qD":{action_:"o after d",nextState:"o"}},digits:{o:{action_:"q=",nextState:"q"},"d|D":{action_:"q=",nextState:"dq"},q:{action_:["output","o="],nextState:"o"},a:{action_:"o=",nextState:"o"}},"space A":{"b|p|bp":{}},space:{a:{nextState:"as"},0:{action_:"sb=false"},"1|2":{action_:"sb=true"},"r|rt|rd|rdt|rdq":{action_:"output",nextState:"0"},"*":{action_:["output","sb=true"],nextState:"1"}},"1st-level escape":{"1|2":{action_:["output",{type_:"insert+p1",option:"1st-level escape"}]},"*":{action_:["output",{type_:"insert+p1",option:"1st-level escape"}],nextState:"0"}},"[(...)]":{"r|rt":{action_:"rd=",nextState:"rd"},"rd|rdt":{action_:"rq=",nextState:"rdq"}},"...":{"o|d|D|dq|qd|qD":{action_:["output",{type_:"bond",option:"..."}],nextState:"3"},"*":{action_:[{type_:"output",option:1},{type_:"insert",option:"ellipsis"}],nextState:"1"}},". |* ":{"*":{action_:["output",{type_:"insert",option:"addition compound"}],nextState:"1"}},"state of aggregation $":{"*":{action_:["output","state of aggregation"],nextState:"1"}},"{[(":{"a|as|o":{action_:["o=","output","parenthesisLevel++"],nextState:"2"},"0|1|2|3":{action_:["o=","output","parenthesisLevel++"],nextState:"2"},"*":{action_:["output","o=","output","parenthesisLevel++"],nextState:"2"}},")]}":{"0|1|2|3|b|p|bp|o":{action_:["o=","parenthesisLevel--"],nextState:"o"},"a|as|d|D|q|qd|qD|dq":{action_:["output","o=","parenthesisLevel--"],nextState:"o"}},", ":{"*":{action_:["output","comma"],nextState:"0"}},"^_":{"*":{}},"^{(...)}|^($...$)":{"0|1|2|as":{action_:"b=",nextState:"b"},p:{action_:"b=",nextState:"bp"},"3|o":{action_:"d= kv",nextState:"D"},q:{action_:"d=",nextState:"qD"},"d|D|qd|qD|dq":{action_:["output","d="],nextState:"D"}},"^a|^\\x{}{}|^\\x{}|^\\x|'":{"0|1|2|as":{action_:"b=",nextState:"b"},p:{action_:"b=",nextState:"bp"},"3|o":{action_:"d= kv",nextState:"d"},q:{action_:"d=",nextState:"qd"},"d|qd|D|qD":{action_:"d="},dq:{action_:["output","d="],nextState:"d"}},"_{(state of aggregation)}$":{"d|D|q|qd|qD|dq":{action_:["output","q="],nextState:"q"}},"_{(...)}|_($...$)|_9|_\\x{}{}|_\\x{}|_\\x":{"0|1|2|as":{action_:"p=",nextState:"p"},b:{action_:"p=",nextState:"bp"},"3|o":{action_:"q=",nextState:"q"},"d|D":{action_:"q=",nextState:"dq"},"q|qd|qD|dq":{action_:["output","q="],nextState:"q"}},"=<>":{"0|1|2|3|a|as|o|q|d|D|qd|qD|dq":{action_:[{type_:"output",option:2},"bond"],nextState:"3"}},"#":{"0|1|2|3|a|as|o":{action_:[{type_:"output",option:2},{type_:"bond",option:"#"}],nextState:"3"}},"{}":{"*":{action_:{type_:"output",option:1},nextState:"1"}},"{...}":{"0|1|2|3|a|as|b|p|bp":{action_:"o=",nextState:"o"},"o|d|D|q|qd|qD|dq":{action_:["output","o="],nextState:"o"}},"$...$":{a:{action_:"a="},"0|1|2|3|as|b|p|bp|o":{action_:"o=",nextState:"o"},"as|o":{action_:"o="},"q|d|D|qd|qD|dq":{action_:["output","o="],nextState:"o"}},"\\bond{(...)}":{"*":{action_:[{type_:"output",option:2},"bond"],nextState:"3"}},"\\frac{(...)}":{"*":{action_:[{type_:"output",option:1},"frac-output"],nextState:"3"}},"\\overset{(...)}":{"*":{action_:[{type_:"output",option:2},"overset-output"],nextState:"3"}},"\\underset{(...)}":{"*":{action_:[{type_:"output",option:2},"underset-output"],nextState:"3"}},"\\underbrace{(...)}":{"*":{action_:[{type_:"output",option:2},"underbrace-output"],nextState:"3"}},"\\color{(...)}{(...)}1|\\color(...){(...)}2":{"*":{action_:[{type_:"output",option:2},"color-output"],nextState:"3"}},"\\color{(...)}0":{"*":{action_:[{type_:"output",option:2},"color0-output"]}},"\\ce{(...)}":{"*":{action_:[{type_:"output",option:2},"ce"],nextState:"3"}},"\\,":{"*":{action_:[{type_:"output",option:1},"copy"],nextState:"1"}},"\\x{}{}|\\x{}|\\x":{"0|1|2|3|a|as|b|p|bp|o|c0":{action_:["o=","output"],nextState:"3"},"*":{action_:["output","o=","output"],nextState:"3"}},others:{"*":{action_:[{type_:"output",option:1},"copy"],nextState:"3"}},else2:{a:{action_:"a to o",nextState:"o",revisit:!0},as:{action_:["output","sb=true"],nextState:"1",revisit:!0},"r|rt|rd|rdt|rdq":{action_:["output"],nextState:"0",revisit:!0},"*":{action_:["output","copy"],nextState:"3"}}}),actions:{"o after d":function(t,e){var n;if((t.d||"").match(/^[0-9]+$/)){var o=t.d;t.d=void 0,n=this.output(t),t.b=o}else n=this.output(t);return u.actions["o="](t,e),n},"d= kv":function(t,e){t.d=e,t.dType="kv"},"charge or bond":function(t,e){if(t.beginsWithBond){var n=[];return u.concatArray(n,this.output(t)),u.concatArray(n,u.actions.bond(t,e,"-")),n}t.d=e},"- after o/d":function(t,e,n){var o=u.patterns.match_("orbital",t.o||""),a=u.patterns.match_("one lowercase greek letter $",t.o||""),r=u.patterns.match_("one lowercase latin letter $",t.o||""),i=u.patterns.match_("$one lowercase latin letter$ $",t.o||""),c="-"===e&&(o&&""===o.remainder||a||r||i);!c||t.a||t.b||t.p||t.d||t.q||o||!r||(t.o="$"+t.o+"$");var p=[];return c?(u.concatArray(p,this.output(t)),p.push({type_:"hyphen"})):(o=u.patterns.match_("digits",t.d||""),n&&o&&""===o.remainder?(u.concatArray(p,u.actions["d="](t,e)),u.concatArray(p,this.output(t))):(u.concatArray(p,this.output(t)),u.concatArray(p,u.actions.bond(t,e,"-")))),p},"a to o":function(t){t.o=t.a,t.a=void 0},"sb=true":function(t){t.sb=!0},"sb=false":function(t){t.sb=!1},"beginsWithBond=true":function(t){t.beginsWithBond=!0},"beginsWithBond=false":function(t){t.beginsWithBond=!1},"parenthesisLevel++":function(t){t.parenthesisLevel++},"parenthesisLevel--":function(t){t.parenthesisLevel--},"state of aggregation":function(t,e){return{type_:"state of aggregation",p1:u.go(e,"o")}},comma:function(t,e){var n=e.replace(/\s*$/,"");return n!==e&&0===t.parenthesisLevel?{type_:"comma enumeration L",p1:n}:{type_:"comma enumeration M",p1:n}},output:function(t,e,n){var o,a,r;t.r?(a="M"===t.rdt?u.go(t.rd,"tex-math"):"T"===t.rdt?[{type_:"text",p1:t.rd||""}]:u.go(t.rd),r="M"===t.rqt?u.go(t.rq,"tex-math"):"T"===t.rqt?[{type_:"text",p1:t.rq||""}]:u.go(t.rq),o={type_:"arrow",r:t.r,rd:a,rq:r}):(o=[],(t.a||t.b||t.p||t.o||t.q||t.d||n)&&(t.sb&&o.push({type_:"entitySkip"}),t.o||t.q||t.d||t.b||t.p||2===n?t.o||t.q||t.d||!t.b&&!t.p?t.o&&"kv"===t.dType&&u.patterns.match_("d-oxidation$",t.d||"")?t.dType="oxidation":t.o&&"kv"===t.dType&&!t.q&&(t.dType=void 0):(t.o=t.a,t.d=t.b,t.q=t.p,t.a=t.b=t.p=void 0):(t.o=t.a,t.a=void 0),o.push({type_:"chemfive",a:u.go(t.a,"a"),b:u.go(t.b,"bd"),p:u.go(t.p,"pq"),o:u.go(t.o,"o"),q:u.go(t.q,"pq"),d:u.go(t.d,"oxidation"===t.dType?"oxidation":"bd"),dType:t.dType})));for(var i in t)"parenthesisLevel"!==i&&"beginsWithBond"!==i&&delete t[i];return o},"oxidation-output":function(t,e){var n=["{"];return u.concatArray(n,u.go(e,"oxidation")),n.push("}"),n},"frac-output":function(t,e){return{type_:"frac-ce",p1:u.go(e[0]),p2:u.go(e[1])}},"overset-output":function(t,e){return{type_:"overset",p1:u.go(e[0]),p2:u.go(e[1])}},"underset-output":function(t,e){return{type_:"underset",p1:u.go(e[0]),p2:u.go(e[1])}},"underbrace-output":function(t,e){return{type_:"underbrace",p1:u.go(e[0]),p2:u.go(e[1])}},"color-output":function(t,e){return{type_:"color",color1:e[0],color2:u.go(e[1])}},"r=":function(t,e){t.r=e},"rdt=":function(t,e){t.rdt=e},"rd=":function(t,e){t.rd=e},"rqt=":function(t,e){t.rqt=e},"rq=":function(t,e){t.rq=e},operator:function(t,e,n){return{type_:"operator",kind_:n||e}}}},a:{transitions:u.createTransitions({empty:{"*":{}},"1/2$":{0:{action_:"1/2"}},else:{0:{nextState:"1",revisit:!0}},"$(...)$":{"*":{action_:"tex-math tight",nextState:"1"}},",":{"*":{action_:{type_:"insert",option:"commaDecimal"}}},else2:{"*":{action_:"copy"}}}),actions:{}},o:{transitions:u.createTransitions({empty:{"*":{}},"1/2$":{0:{action_:"1/2"}},else:{0:{nextState:"1",revisit:!0}},letters:{"*":{action_:"rm"}},"\\ca":{"*":{action_:{type_:"insert",option:"circa"}}},"\\x{}{}|\\x{}|\\x":{"*":{action_:"copy"}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"{(...)}":{"*":{action_:"{text}"}},else2:{"*":{action_:"copy"}}}),actions:{}},text:{transitions:u.createTransitions({empty:{"*":{action_:"output"}},"{...}":{"*":{action_:"text="}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"\\greek":{"*":{action_:["output","rm"]}},"\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:["output","copy"]}},else:{"*":{action_:"text="}}}),actions:{output:function(t){if(t.text_){var e={type_:"text",p1:t.text_};for(var n in t)delete t[n];return e}}}},pq:{transitions:u.createTransitions({empty:{"*":{}},"state of aggregation $":{"*":{action_:"state of aggregation"}},i$:{0:{nextState:"!f",revisit:!0}},"(KV letters),":{0:{action_:"rm",nextState:"0"}},formula$:{0:{nextState:"f",revisit:!0}},"1/2$":{0:{action_:"1/2"}},else:{0:{nextState:"!f",revisit:!0}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"{(...)}":{"*":{action_:"text"}},"a-z":{f:{action_:"tex-math"}},letters:{"*":{action_:"rm"}},"-9.,9":{"*":{action_:"9,9"}},",":{"*":{action_:{type_:"insert+p1",option:"comma enumeration S"}}},"\\color{(...)}{(...)}1|\\color(...){(...)}2":{"*":{action_:"color-output"}},"\\color{(...)}0":{"*":{action_:"color0-output"}},"\\ce{(...)}":{"*":{action_:"ce"}},"\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"copy"}},else2:{"*":{action_:"copy"}}}),actions:{"state of aggregation":function(t,e){return{type_:"state of aggregation subscript",p1:u.go(e,"o")}},"color-output":function(t,e){return{type_:"color",color1:e[0],color2:u.go(e[1],"pq")}}}},bd:{transitions:u.createTransitions({empty:{"*":{}},x$:{0:{nextState:"!f",revisit:!0}},formula$:{0:{nextState:"f",revisit:!0}},else:{0:{nextState:"!f",revisit:!0}},"-9.,9 no missing 0":{"*":{action_:"9,9"}},".":{"*":{action_:{type_:"insert",option:"electron dot"}}},"a-z":{f:{action_:"tex-math"}},x:{"*":{action_:{type_:"insert",option:"KV x"}}},letters:{"*":{action_:"rm"}},"'":{"*":{action_:{type_:"insert",option:"prime"}}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"{(...)}":{"*":{action_:"text"}},"\\color{(...)}{(...)}1|\\color(...){(...)}2":{"*":{action_:"color-output"}},"\\color{(...)}0":{"*":{action_:"color0-output"}},"\\ce{(...)}":{"*":{action_:"ce"}},"\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"copy"}},else2:{"*":{action_:"copy"}}}),actions:{"color-output":function(t,e){return{type_:"color",color1:e[0],color2:u.go(e[1],"bd")}}}},oxidation:{transitions:u.createTransitions({empty:{"*":{}},"roman numeral":{"*":{action_:"roman-numeral"}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},else:{"*":{action_:"copy"}}}),actions:{"roman-numeral":function(t,e){return{type_:"roman numeral",p1:e||""}}}},"tex-math":{transitions:u.createTransitions({empty:{"*":{action_:"output"}},"\\ce{(...)}":{"*":{action_:["output","ce"]}},"{...}|\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"o="}},else:{"*":{action_:"o="}}}),actions:{output:function(t){if(t.o){var e={type_:"tex-math",p1:t.o};for(var n in t)delete t[n];return e}}}},"tex-math tight":{transitions:u.createTransitions({empty:{"*":{action_:"output"}},"\\ce{(...)}":{"*":{action_:["output","ce"]}},"{...}|\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"o="}},"-|+":{"*":{action_:"tight operator"}},else:{"*":{action_:"o="}}}),actions:{"tight operator":function(t,e){t.o=(t.o||"")+"{"+e+"}"},output:function(t){if(t.o){var e={type_:"tex-math",p1:t.o};for(var n in t)delete t[n];return e}}}},"9,9":{transitions:u.createTransitions({empty:{"*":{}},",":{"*":{action_:"comma"}},else:{"*":{action_:"copy"}}}),actions:{comma:function(){return{type_:"commaDecimal"}}}},pu:{transitions:u.createTransitions({empty:{"*":{action_:"output"}},space$:{"*":{action_:["output","space"]}},"{[(|)]}":{"0|a":{action_:"copy"}},"(-)(9)^(-9)":{0:{action_:"number^",nextState:"a"}},"(-)(9.,9)(e)(99)":{0:{action_:"enumber",nextState:"a"}},space:{"0|a":{}},"pm-operator":{"0|a":{action_:{type_:"operator",option:"\\pm"},nextState:"0"}},operator:{"0|a":{action_:"copy",nextState:"0"}},"//":{d:{action_:"o=",nextState:"/"}},"/":{d:{action_:"o=",nextState:"/"}},"{...}|else":{"0|d":{action_:"d=",nextState:"d"},a:{action_:["space","d="],nextState:"d"},"/|q":{action_:"q=",nextState:"q"}}}),actions:{enumber:function(t,e){var n=[];return"+-"===e[0]||"+/-"===e[0]?n.push("\\pm "):e[0]&&n.push(e[0]),e[1]&&(u.concatArray(n,u.go(e[1],"pu-9,9")),e[2]&&(e[2].match(/[,.]/)?u.concatArray(n,u.go(e[2],"pu-9,9")):n.push(e[2])),e[3]=e[4]||e[3],e[3]&&(e[3]=e[3].trim(),"e"===e[3]||"*"===e[3].substr(0,1)?n.push({type_:"cdot"}):n.push({type_:"times"}))),e[3]&&n.push("10^{"+e[5]+"}"),n},"number^":function(t,e){var n=[];return"+-"===e[0]||"+/-"===e[0]?n.push("\\pm "):e[0]&&n.push(e[0]),u.concatArray(n,u.go(e[1],"pu-9,9")),n.push("^{"+e[2]+"}"),n},operator:function(t,e,n){return{type_:"operator",kind_:n||e}},space:function(){return{type_:"pu-space-1"}},output:function(t){var e,n=u.patterns.match_("{(...)}",t.d||"");n&&""===n.remainder&&(t.d=n.match_);var o=u.patterns.match_("{(...)}",t.q||"");if(o&&""===o.remainder&&(t.q=o.match_),t.d&&(t.d=t.d.replace(/\u00B0C|\^oC|\^{o}C/g,"{}^{\\circ}C"),t.d=t.d.replace(/\u00B0F|\^oF|\^{o}F/g,"{}^{\\circ}F")),t.q){t.q=t.q.replace(/\u00B0C|\^oC|\^{o}C/g,"{}^{\\circ}C"),t.q=t.q.replace(/\u00B0F|\^oF|\^{o}F/g,"{}^{\\circ}F");var a={d:u.go(t.d,"pu"),q:u.go(t.q,"pu")};"//"===t.o?e={type_:"pu-frac",p1:a.d,p2:a.q}:(e=a.d,a.d.length>1||a.q.length>1?e.push({type_:" / "}):e.push({type_:"/"}),u.concatArray(e,a.q))}else e=u.go(t.d,"pu-2");for(var r in t)delete t[r];return e}}},"pu-2":{transitions:u.createTransitions({empty:{"*":{action_:"output"}},"*":{"*":{action_:["output","cdot"],nextState:"0"}},"\\x":{"*":{action_:"rm="}},space:{"*":{action_:["output","space"],nextState:"0"}},"^{(...)}|^(-1)":{1:{action_:"^(-1)"}},"-9.,9":{0:{action_:"rm=",nextState:"0"},1:{action_:"^(-1)",nextState:"0"}},"{...}|else":{"*":{action_:"rm=",nextState:"1"}}}),actions:{cdot:function(){return{type_:"tight cdot"}},"^(-1)":function(t,e){t.rm+="^{"+e+"}"},space:function(){return{type_:"pu-space-2"}},output:function(t){var e=[];if(t.rm){var n=u.patterns.match_("{(...)}",t.rm||"");e=n&&""===n.remainder?u.go(n.match_,"pu"):{type_:"rm",p1:t.rm}}for(var o in t)delete t[o];return e}}},"pu-9,9":{transitions:u.createTransitions({empty:{0:{action_:"output-0"},o:{action_:"output-o"}},",":{0:{action_:["output-0","comma"],nextState:"o"}},".":{0:{action_:["output-0","copy"],nextState:"o"}},else:{"*":{action_:"text="}}}),actions:{comma:function(){return{type_:"commaDecimal"}},"output-0":function(t){var e=[];if(t.text_=t.text_||"",t.text_.length>4){var n=t.text_.length%3;0===n&&(n=3);for(var o=t.text_.length-3;o>0;o-=3)e.push(t.text_.substr(o,3)),e.push({type_:"1000 separator"});e.push(t.text_.substr(0,n)),e.reverse()}else e.push(t.text_);for(var a in t)delete t[a];return e},"output-o":function(t){var e=[];if(t.text_=t.text_||"",t.text_.length>4){for(var n=t.text_.length-3,o=0;o":case"\u2192":case"\u27f6":return"rightarrow";case"<-":return"leftarrow";case"<->":return"leftrightarrow";case"<--\x3e":return"rightleftarrows";case"<=>":case"\u21cc":return"rightleftharpoons";case"<=>>":return"rightequilibrium";case"<<=>":return"leftequilibrium";default:throw["MhchemBugT","mhchem bug T. Please report."]}},_getBond:function(t){switch(t){case"-":case"1":return"{-}";case"=":case"2":return"{=}";case"#":case"3":return"{\\equiv}";case"~":return"{\\tripledash}";case"~-":return"{\\mathrlap{\\raisebox{-.1em}{$-$}}\\raisebox{.1em}{$\\tripledash$}}";case"~=":case"~--":return"{\\mathrlap{\\raisebox{-.2em}{$-$}}\\mathrlap{\\raisebox{.2em}{$\\tripledash$}}-}";case"-~-":return"{\\mathrlap{\\raisebox{-.2em}{$-$}}\\mathrlap{\\raisebox{.2em}{$-$}}\\tripledash}";case"...":return"{{\\cdot}{\\cdot}{\\cdot}}";case"....":return"{{\\cdot}{\\cdot}{\\cdot}{\\cdot}}";case"->":return"{\\rightarrow}";case"<-":return"{\\leftarrow}";case"<":return"{<}";case">":return"{>}";default:throw["MhchemBugT","mhchem bug T. Please report."]}},_getOperator:function(t){switch(t){case"+":return" {}+{} ";case"-":return" {}-{} ";case"=":return" {}={} ";case"<":return" {}<{} ";case">":return" {}>{} ";case"<<":return" {}\\ll{} ";case">>":return" {}\\gg{} ";case"\\pm":return" {}\\pm{} ";case"\\approx":case"$\\approx$":return" {}\\approx{} ";case"v":case"(v)":return" \\downarrow{} ";case"^":case"(^)":return" \\uparrow{} ";default:throw["MhchemBugT","mhchem bug T. Please report."]}}};return a=a.default}()}));
```
The content has been capped at 50000 tokens, and files over NaN bytes have been omitted. The user could consider applying other filters to refine the result. The better and more specific the context, the better the LLM can follow instructions. If the context seems verbose, the user can refine the filter using uithub. Thank you for using https://uithub.com - Perfect LLM context for any GitHub repo.