config update

This commit is contained in:
gcw_4spBpAfv
2026-03-05 14:41:25 +08:00
parent bd07a7526a
commit ef2bada800
2 changed files with 33 additions and 15 deletions

View File

@@ -73,4 +73,21 @@ object AppConfig {
const val SECRET_ID = "AKIDbBdyBGE5oPuIGA1iDlDYlFallaJ0YODB" // 替换为你的腾讯云SECRET_ID const val SECRET_ID = "AKIDbBdyBGE5oPuIGA1iDlDYlFallaJ0YODB" // 替换为你的腾讯云SECRET_ID
const val SECRET_KEY = "32vhIl9OQIRclmLjvuleLp9LLAnFVYEp" // 替换为你的腾讯云SECRET_KEY const val SECRET_KEY = "32vhIl9OQIRclmLjvuleLp9LLAnFVYEp" // 替换为你的腾讯云SECRET_KEY
} }
object LLM {
// 模型下载服务器地址
const val DOWNLOAD_SERVER = "http://192.168.1.19:5000"
// 下载路径
const val DOWNLOAD_PATH = "/download"
// 模型文件名
const val MODEL_FILE_NAME = "Qwen3-0.6B-rk3588-w8a8.rkllm"
// 模型存储目录
const val MODEL_DIR = "llm"
// 下载连接超时(毫秒)
const val DOWNLOAD_CONNECT_TIMEOUT = 600000
// 下载读取超时(毫秒)
const val DOWNLOAD_READ_TIMEOUT = 1200000
// 模型文件大小估计(字节)
const val MODEL_SIZE_ESTIMATE = 500L * 1024 * 1024 // 500MB
}
} }

View File

@@ -83,13 +83,13 @@ object FileHelper {
// @JvmStatic // @JvmStatic
// 当前使用的模型文件名 // 当前使用的模型文件名
private const val MODEL_FILE_NAME = "Qwen3-0.6B-rk3588-w8a8.rkllm" private val MODEL_FILE_NAME = com.digitalperson.config.AppConfig.LLM.MODEL_FILE_NAME
fun getLLMModelPath(context: Context): String { fun getLLMModelPath(context: Context): String {
Log.d(TAG, "=== getLLMModelPath START ===") Log.d(TAG, "=== getLLMModelPath START ===")
// 从应用内部存储目录加载模型 // 从应用内部存储目录加载模型
val llmDir = ensureDir(File(context.filesDir, "llm")) val llmDir = ensureDir(File(context.filesDir, AppConfig.LLM.MODEL_DIR))
Log.d(TAG, "Loading models from: ${llmDir.absolutePath}") Log.d(TAG, "Loading models from: ${llmDir.absolutePath}")
@@ -122,7 +122,7 @@ object FileHelper {
) { ) {
Log.d(TAG, "=== downloadModelFilesWithProgress START ===") Log.d(TAG, "=== downloadModelFilesWithProgress START ===")
val llmDir = ensureDir(File(context.filesDir, "llm")) val llmDir = ensureDir(File(context.filesDir, AppConfig.LLM.MODEL_DIR))
// 模型文件列表 - 使用 DeepSeek-R1-Distill-Qwen-1.5B 模型 // 模型文件列表 - 使用 DeepSeek-R1-Distill-Qwen-1.5B 模型
val modelFiles = listOf( val modelFiles = listOf(
@@ -137,19 +137,20 @@ object FileHelper {
var totalSize: Long = 0 var totalSize: Long = 0
// 首先计算总大小 // 首先计算总大小
val downloadUrl = AppConfig.LLM.DOWNLOAD_SERVER + AppConfig.LLM.DOWNLOAD_PATH
Log.i(TAG, "Using download server: ${AppConfig.LLM.DOWNLOAD_SERVER}")
for (fileName in modelFiles) { for (fileName in modelFiles) {
val modelFile = File(llmDir, fileName) val modelFile = File(llmDir, fileName)
if (!modelFile.exists() || modelFile.length() == 0L) { if (!modelFile.exists() || modelFile.length() == 0L) {
val size = getFileSizeFromServer("http://192.168.1.19:5000/download/$fileName") val size = getFileSizeFromServer("$downloadUrl/$fileName")
if (size > 0) { if (size > 0) {
totalSize += size totalSize += size
} else { } else {
// 如果无法获取文件大小,使用估计值 // 如果无法获取文件大小,使用估计值
when (fileName) { val estimatedSize = AppConfig.LLM.MODEL_SIZE_ESTIMATE
MODEL_FILE_NAME -> totalSize += 1L * 1024 * 1024 * 1024 // 1.5B模型约1GB totalSize += estimatedSize
else -> totalSize += 1L * 1024 * 1024 * 1024 // 1GB 默认 Log.i(TAG, "Using estimated size for $fileName: ${estimatedSize / (1024*1024)} MB")
}
Log.i(TAG, "Using estimated size for $fileName: ${totalSize / (1024*1024)} MB")
} }
} }
} }
@@ -160,7 +161,7 @@ object FileHelper {
Log.i(TAG, "Downloading model file: $fileName") Log.i(TAG, "Downloading model file: $fileName")
try { try {
downloadFileWithProgress( downloadFileWithProgress(
"http://192.168.1.19:5000/download/$fileName", "$downloadUrl/$fileName",
modelFile modelFile
) { downloaded, total -> ) { downloaded, total ->
val progress = if (totalSize > 0) { val progress = if (totalSize > 0) {
@@ -199,8 +200,8 @@ object FileHelper {
return try { return try {
val connection = java.net.URL(url).openConnection() as java.net.HttpURLConnection val connection = java.net.URL(url).openConnection() as java.net.HttpURLConnection
connection.requestMethod = "HEAD" connection.requestMethod = "HEAD"
connection.connectTimeout = 15000 connection.connectTimeout = AppConfig.LLM.DOWNLOAD_CONNECT_TIMEOUT
connection.readTimeout = 15000 connection.readTimeout = AppConfig.LLM.DOWNLOAD_READ_TIMEOUT
// 从响应头获取 Content-Length避免 int 溢出 // 从响应头获取 Content-Length避免 int 溢出
val contentLengthStr = connection.getHeaderField("Content-Length") val contentLengthStr = connection.getHeaderField("Content-Length")
@@ -245,8 +246,8 @@ object FileHelper {
onProgress: (Long, Long) -> Unit onProgress: (Long, Long) -> Unit
) { ) {
val connection = java.net.URL(url).openConnection() as java.net.HttpURLConnection val connection = java.net.URL(url).openConnection() as java.net.HttpURLConnection
connection.connectTimeout = 30000 connection.connectTimeout = AppConfig.LLM.DOWNLOAD_CONNECT_TIMEOUT
connection.readTimeout = 6000000 connection.readTimeout = AppConfig.LLM.DOWNLOAD_READ_TIMEOUT
// 从响应头获取 Content-Length避免 int 溢出 // 从响应头获取 Content-Length避免 int 溢出
val contentLengthStr = connection.getHeaderField("Content-Length") val contentLengthStr = connection.getHeaderField("Content-Length")
@@ -286,7 +287,7 @@ object FileHelper {
*/ */
@JvmStatic @JvmStatic
fun isLocalLLMAvailable(context: Context): Boolean { fun isLocalLLMAvailable(context: Context): Boolean {
val llmDir = File(context.filesDir, "llm") val llmDir = File(context.filesDir, AppConfig.LLM.MODEL_DIR)
val rkllmFile = File(llmDir, MODEL_FILE_NAME) val rkllmFile = File(llmDir, MODEL_FILE_NAME)