diff --git a/app/src/main/java/com/digitalperson/config/AppConfig.kt b/app/src/main/java/com/digitalperson/config/AppConfig.kt index f250e1b..0a23f3d 100644 --- a/app/src/main/java/com/digitalperson/config/AppConfig.kt +++ b/app/src/main/java/com/digitalperson/config/AppConfig.kt @@ -73,4 +73,21 @@ object AppConfig { const val SECRET_ID = "AKIDbBdyBGE5oPuIGA1iDlDYlFallaJ0YODB" // 替换为你的腾讯云SECRET_ID const val SECRET_KEY = "32vhIl9OQIRclmLjvuleLp9LLAnFVYEp" // 替换为你的腾讯云SECRET_KEY } + + object LLM { + // 模型下载服务器地址 + const val DOWNLOAD_SERVER = "http://192.168.1.19:5000" + // 下载路径 + const val DOWNLOAD_PATH = "/download" + // 模型文件名 + const val MODEL_FILE_NAME = "Qwen3-0.6B-rk3588-w8a8.rkllm" + // 模型存储目录 + const val MODEL_DIR = "llm" + // 下载连接超时(毫秒) + const val DOWNLOAD_CONNECT_TIMEOUT = 600000 + // 下载读取超时(毫秒) + const val DOWNLOAD_READ_TIMEOUT = 1200000 + // 模型文件大小估计(字节) + const val MODEL_SIZE_ESTIMATE = 500L * 1024 * 1024 // 500MB + } } diff --git a/app/src/main/java/com/digitalperson/util/FileHelper.kt b/app/src/main/java/com/digitalperson/util/FileHelper.kt index b4f81c2..f554b97 100644 --- a/app/src/main/java/com/digitalperson/util/FileHelper.kt +++ b/app/src/main/java/com/digitalperson/util/FileHelper.kt @@ -83,13 +83,13 @@ object FileHelper { // @JvmStatic // 当前使用的模型文件名 - private const val MODEL_FILE_NAME = "Qwen3-0.6B-rk3588-w8a8.rkllm" + private val MODEL_FILE_NAME = com.digitalperson.config.AppConfig.LLM.MODEL_FILE_NAME fun getLLMModelPath(context: Context): String { Log.d(TAG, "=== getLLMModelPath START ===") // 从应用内部存储目录加载模型 - val llmDir = ensureDir(File(context.filesDir, "llm")) + val llmDir = ensureDir(File(context.filesDir, AppConfig.LLM.MODEL_DIR)) Log.d(TAG, "Loading models from: ${llmDir.absolutePath}") @@ -122,7 +122,7 @@ object FileHelper { ) { Log.d(TAG, "=== downloadModelFilesWithProgress START ===") - val llmDir = ensureDir(File(context.filesDir, "llm")) + val llmDir = ensureDir(File(context.filesDir, AppConfig.LLM.MODEL_DIR)) // 模型文件列表 - 使用 DeepSeek-R1-Distill-Qwen-1.5B 模型 val modelFiles = listOf( @@ -137,19 +137,20 @@ object FileHelper { var totalSize: Long = 0 // 首先计算总大小 + val downloadUrl = AppConfig.LLM.DOWNLOAD_SERVER + AppConfig.LLM.DOWNLOAD_PATH + Log.i(TAG, "Using download server: ${AppConfig.LLM.DOWNLOAD_SERVER}") + for (fileName in modelFiles) { val modelFile = File(llmDir, fileName) if (!modelFile.exists() || modelFile.length() == 0L) { - val size = getFileSizeFromServer("http://192.168.1.19:5000/download/$fileName") + val size = getFileSizeFromServer("$downloadUrl/$fileName") if (size > 0) { totalSize += size } else { // 如果无法获取文件大小,使用估计值 - when (fileName) { - MODEL_FILE_NAME -> totalSize += 1L * 1024 * 1024 * 1024 // 1.5B模型约1GB - else -> totalSize += 1L * 1024 * 1024 * 1024 // 1GB 默认 - } - Log.i(TAG, "Using estimated size for $fileName: ${totalSize / (1024*1024)} MB") + val estimatedSize = AppConfig.LLM.MODEL_SIZE_ESTIMATE + totalSize += estimatedSize + Log.i(TAG, "Using estimated size for $fileName: ${estimatedSize / (1024*1024)} MB") } } } @@ -160,7 +161,7 @@ object FileHelper { Log.i(TAG, "Downloading model file: $fileName") try { downloadFileWithProgress( - "http://192.168.1.19:5000/download/$fileName", + "$downloadUrl/$fileName", modelFile ) { downloaded, total -> val progress = if (totalSize > 0) { @@ -199,8 +200,8 @@ object FileHelper { return try { val connection = java.net.URL(url).openConnection() as java.net.HttpURLConnection connection.requestMethod = "HEAD" - connection.connectTimeout = 15000 - connection.readTimeout = 15000 + connection.connectTimeout = AppConfig.LLM.DOWNLOAD_CONNECT_TIMEOUT + connection.readTimeout = AppConfig.LLM.DOWNLOAD_READ_TIMEOUT // 从响应头获取 Content-Length,避免 int 溢出 val contentLengthStr = connection.getHeaderField("Content-Length") @@ -245,8 +246,8 @@ object FileHelper { onProgress: (Long, Long) -> Unit ) { val connection = java.net.URL(url).openConnection() as java.net.HttpURLConnection - connection.connectTimeout = 30000 - connection.readTimeout = 6000000 + connection.connectTimeout = AppConfig.LLM.DOWNLOAD_CONNECT_TIMEOUT + connection.readTimeout = AppConfig.LLM.DOWNLOAD_READ_TIMEOUT // 从响应头获取 Content-Length,避免 int 溢出 val contentLengthStr = connection.getHeaderField("Content-Length") @@ -286,7 +287,7 @@ object FileHelper { */ @JvmStatic fun isLocalLLMAvailable(context: Context): Boolean { - val llmDir = File(context.filesDir, "llm") + val llmDir = File(context.filesDir, AppConfig.LLM.MODEL_DIR) val rkllmFile = File(llmDir, MODEL_FILE_NAME)