mirror of
https://github.com/MarSeventh/CloudFlare-ImgBed.git
synced 2026-01-31 09:03:19 +08:00
fix:修复索引系统的问题。update remade
This commit is contained in:
142
README.md
142
README.md
@@ -1,8 +1,8 @@
|
||||
<div align="center">
|
||||
<a href="https://github.com/MarSeventh/CloudFlare-ImgBed"><img width="80%" alt="logo" src="static/readme/banner.png"/></a>
|
||||
<p><em>🗂️开源文件托管解决方案,支持 Docker 和无服务器部署,支持 Telegram Bot 、 Cloudflare R2 、S3 等多种存储渠道</em></p>
|
||||
<p><em>🗂️开源文件托管解决方案,支持 Docker 和无服务器部署,支持 Telegram Bot 、 Cloudflare R2 、S3 等多种存储渠道.</em> 魔改原版将KV改为D1存储</p>
|
||||
<p>
|
||||
<a href="https://github.com/MarSeventh/CloudFlare-ImgBed/blob/main/README.md">简体中文</a> | <a href="https://github.com/MarSeventh/CloudFlare-ImgBed/blob/main/README_en.md">English</a> | <a href="https://cfbed.sanyue.de">官方网站</a>
|
||||
<a href="https://github.com/ccxyChuzhong/CloudFlare-ImgBed-D1/blob/main/README.md">简体中文</a> | <a href="https://github.com/ccxyChuzhong/CloudFlare-ImgBed-D1/blob/main/README_en.md">English</a> | <a href="https://github.com/MarSeventh/CloudFlare-ImgBed">KV版本(原版)</a> | <a href="https://github.com/ccxyChuzhong/CloudFlare-ImgBed-D1">D1版本</a> | <a href="https://cfbed.sanyue.de">官方网站</a>
|
||||
</p>
|
||||
<div>
|
||||
<a href="https://github.com/MarSeventh/CloudFlare-ImgBed/blob/main/LICENSE">
|
||||
@@ -78,6 +78,144 @@
|
||||
</details>
|
||||
|
||||
|
||||
# 必看!必看 !必看!
|
||||
如果是使用KV存储想转D1存储。建议重新创建一个图床。使用系统的备份和恢复功能进行数据迁移!!!!
|
||||
|
||||
<details>
|
||||
<summary>KV转D1存储详细如下</summary>
|
||||
|
||||
- 首先确认您的 D1 数据库已经创建:数据库名称必须为: `imgbed-database` 将数据库sql语句一段一段的全部执行
|
||||
```sql
|
||||
-- CloudFlare ImgBed D1 Database Initialization Script
|
||||
-- 这个脚本用于初始化D1数据库
|
||||
|
||||
-- 删除已存在的表(如果需要重新初始化)
|
||||
-- 注意:在生产环境中使用时请谨慎
|
||||
-- DROP TABLE IF EXISTS files;
|
||||
-- DROP TABLE IF EXISTS settings;
|
||||
-- DROP TABLE IF EXISTS index_operations;
|
||||
-- DROP TABLE IF EXISTS index_metadata;
|
||||
-- DROP TABLE IF EXISTS other_data;
|
||||
|
||||
-- 执行主要的数据库架构创建
|
||||
-- 这里会包含 schema.sql 的内容
|
||||
|
||||
-- 1. 文件表 - 存储文件元数据
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
id TEXT PRIMARY KEY,
|
||||
value TEXT,
|
||||
metadata TEXT NOT NULL,
|
||||
file_name TEXT,
|
||||
file_type TEXT,
|
||||
file_size TEXT,
|
||||
upload_ip TEXT,
|
||||
upload_address TEXT,
|
||||
list_type TEXT,
|
||||
timestamp INTEGER,
|
||||
label TEXT,
|
||||
directory TEXT,
|
||||
channel TEXT,
|
||||
channel_name TEXT,
|
||||
tg_file_id TEXT,
|
||||
tg_chat_id TEXT,
|
||||
tg_bot_token TEXT,
|
||||
is_chunked BOOLEAN DEFAULT FALSE,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 2. 系统配置表
|
||||
CREATE TABLE IF NOT EXISTS settings (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
category TEXT,
|
||||
description TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 3. 索引操作表
|
||||
CREATE TABLE IF NOT EXISTS index_operations (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
processed BOOLEAN DEFAULT FALSE,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 4. 索引元数据表
|
||||
CREATE TABLE IF NOT EXISTS index_metadata (
|
||||
key TEXT PRIMARY KEY,
|
||||
last_updated INTEGER,
|
||||
total_count INTEGER DEFAULT 0,
|
||||
last_operation_id TEXT,
|
||||
chunk_count INTEGER DEFAULT 0,
|
||||
chunk_size INTEGER DEFAULT 0,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 5. 其他数据表
|
||||
CREATE TABLE IF NOT EXISTS other_data (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
type TEXT,
|
||||
description TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 插入初始的索引元数据
|
||||
INSERT OR REPLACE INTO index_metadata (key, last_updated, total_count, last_operation_id)
|
||||
VALUES ('main_index', 0, 0, NULL);
|
||||
|
||||
-- 初始化完成
|
||||
-- 数据库已准备就绪,可以开始迁移数据
|
||||
|
||||
```
|
||||
|
||||
### 在 Cloudflare Dashboard 配置 Pages 绑定
|
||||
|
||||
#### 步骤 A: 登录 Cloudflare Dashboard
|
||||
1. 访问 https://dash.cloudflare.com
|
||||
2. 登录您的账户
|
||||
|
||||
#### 步骤 B: 进入 Pages 项目
|
||||
1. 在左侧菜单中点击 **"Pages"**
|
||||
2. 找到并点击您的图床项目
|
||||
|
||||
#### 步骤 C: 配置 Functions 绑定
|
||||
1. 在项目页面中,点击 **"Settings"** 标签
|
||||
2. 在左侧菜单中点击 **"Functions"**
|
||||
3. 向下滚动找到 **"D1 database bindings"** 部分
|
||||
|
||||
#### 步骤 D: 添加 D1 绑定
|
||||
1. 点击 **"Add binding"** 按钮
|
||||
2. 填写以下信息:
|
||||
- **Variable name**: `DB` (必须是大写的 DB)
|
||||
- **D1 database**: 从下拉菜单中选择您创建的 `imgbed-database`
|
||||
3. 点击 **"Save"** 按钮
|
||||
|
||||
#### 步骤 E: 重新部署 Pages
|
||||
|
||||
配置绑定后,需要重新部署:
|
||||
|
||||
#### 步骤 F: 验证配置
|
||||
|
||||
部署完成后,访问以下URL验证配置:
|
||||
|
||||
```
|
||||
https://your-domain.com/api/manage/migrate?action=check
|
||||
```
|
||||
|
||||
查看详细的配置状态
|
||||
```
|
||||
https://your-domain.com/api/manage/migrate?action=status
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
|
||||
# 1. Introduction
|
||||
|
||||
144
README_en.md
144
README_en.md
@@ -1,9 +1,8 @@
|
||||
<div align="center">
|
||||
<a href="https://github.com/MarSeventh/CloudFlare-ImgBed"><img width="80%" alt="logo" src="static/readme/banner.png"/></a>
|
||||
<p><em>🗂️Open-source file hosting solution, supporting Docker and serverless deployment, supporting multiple storage channels such as Telegram Bot, Cloudflare R2, S3, etc.</em></p>
|
||||
<p><em>🗂️Open-source file hosting solution, supporting Docker and serverless deployment, supporting multiple storage channels such as Telegram Bot, Cloudflare R2, S3, etc.</em> Modified version that replaces KV with D1 storage</p>
|
||||
<p>
|
||||
<a href="https://github.com/MarSeventh/CloudFlare-ImgBed/blob/main/README.md">简体中文</a> | <a href="https://github.com/MarSeventh/CloudFlare-ImgBed/blob/main/README_en.md">English</a> | <a
|
||||
href="https://cfbed.sanyue.de/en">Official Website</a>
|
||||
<a href="https://github.com/ccxyChuzhong/CloudFlare-ImgBed-D1/blob/main/README.md">简体中文</a> | <a href="https://github.com/ccxyChuzhong/CloudFlare-ImgBed-D1/blob/main/README_en.md">English</a> | <a href="https://github.com/MarSeventh/CloudFlare-ImgBed">KV Version (Original)</a> | <a href="https://github.com/ccxyChuzhong/CloudFlare-ImgBed-D1">D1 Version</a> | <a href="https://cfbed.sanyue.de/en">Official Website</a>
|
||||
</p>
|
||||
<div>
|
||||
<a href="https://github.com/MarSeventh/CloudFlare-ImgBed/blob/main/LICENSE">
|
||||
@@ -68,6 +67,145 @@
|
||||
</details>
|
||||
|
||||
|
||||
# Important! Important! Important!
|
||||
If you are using KV storage and want to migrate to D1 storage, it is recommended to create a new image hosting service. Use the system's backup and restore functions for data migration!!!!
|
||||
|
||||
<details>
|
||||
<summary>Detailed KV to D1 Storage Migration Guide</summary>
|
||||
|
||||
- First, confirm that your D1 database has been created: The database name must be: `imgbed-database`. Execute all SQL statements section by section:
|
||||
```sql
|
||||
-- CloudFlare ImgBed D1 Database Initialization Script
|
||||
-- This script is used to initialize the D1 database
|
||||
|
||||
-- Drop existing tables (if re-initialization is needed)
|
||||
-- Note: Use with caution in production environment
|
||||
-- DROP TABLE IF EXISTS files;
|
||||
-- DROP TABLE IF EXISTS settings;
|
||||
-- DROP TABLE IF EXISTS index_operations;
|
||||
-- DROP TABLE IF EXISTS index_metadata;
|
||||
-- DROP TABLE IF EXISTS other_data;
|
||||
|
||||
-- Execute main database schema creation
|
||||
-- This will include the content of schema.sql
|
||||
|
||||
-- 1. Files table - stores file metadata
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
id TEXT PRIMARY KEY,
|
||||
value TEXT,
|
||||
metadata TEXT NOT NULL,
|
||||
file_name TEXT,
|
||||
file_type TEXT,
|
||||
file_size TEXT,
|
||||
upload_ip TEXT,
|
||||
upload_address TEXT,
|
||||
list_type TEXT,
|
||||
timestamp INTEGER,
|
||||
label TEXT,
|
||||
directory TEXT,
|
||||
channel TEXT,
|
||||
channel_name TEXT,
|
||||
tg_file_id TEXT,
|
||||
tg_chat_id TEXT,
|
||||
tg_bot_token TEXT,
|
||||
is_chunked BOOLEAN DEFAULT FALSE,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 2. System configuration table
|
||||
CREATE TABLE IF NOT EXISTS settings (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
category TEXT,
|
||||
description TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 3. Index operations table
|
||||
CREATE TABLE IF NOT EXISTS index_operations (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
processed BOOLEAN DEFAULT FALSE,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 4. Index metadata table
|
||||
CREATE TABLE IF NOT EXISTS index_metadata (
|
||||
key TEXT PRIMARY KEY,
|
||||
last_updated INTEGER,
|
||||
total_count INTEGER DEFAULT 0,
|
||||
last_operation_id TEXT,
|
||||
chunk_count INTEGER DEFAULT 0,
|
||||
chunk_size INTEGER DEFAULT 0,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 5. Other data table
|
||||
CREATE TABLE IF NOT EXISTS other_data (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
type TEXT,
|
||||
description TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Insert initial index metadata
|
||||
INSERT OR REPLACE INTO index_metadata (key, last_updated, total_count, last_operation_id)
|
||||
VALUES ('main_index', 0, 0, NULL);
|
||||
|
||||
-- Initialization complete
|
||||
-- Database is ready, data migration can begin
|
||||
|
||||
```
|
||||
|
||||
### Configure Pages Bindings in Cloudflare Dashboard
|
||||
|
||||
#### Step A: Login to Cloudflare Dashboard
|
||||
1. Visit https://dash.cloudflare.com
|
||||
2. Login to your account
|
||||
|
||||
#### Step B: Enter Pages Project
|
||||
1. Click **"Pages"** in the left menu
|
||||
2. Find and click your image hosting project
|
||||
|
||||
#### Step C: Configure Functions Bindings
|
||||
1. Click the **"Settings"** tab on the project page
|
||||
2. Click **"Functions"** in the left menu
|
||||
3. Scroll down to find the **"D1 database bindings"** section
|
||||
|
||||
#### Step D: Add D1 Binding
|
||||
1. Click the **"Add binding"** button
|
||||
2. Fill in the following information:
|
||||
- **Variable name**: `DB` (must be uppercase DB)
|
||||
- **D1 database**: Select your created `imgbed-database` from the dropdown
|
||||
3. Click the **"Save"** button
|
||||
|
||||
#### Step E: Redeploy Pages
|
||||
|
||||
After configuring bindings, you need to redeploy:
|
||||
|
||||
#### Step F: Verify Configuration
|
||||
|
||||
After deployment is complete, visit the following URL to verify configuration:
|
||||
|
||||
```
|
||||
https://your-domain.com/api/manage/migrate?action=check
|
||||
```
|
||||
|
||||
View detailed configuration status:
|
||||
```
|
||||
https://your-domain.com/api/manage/migrate?action=status
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
|
||||
# 1. Introduction
|
||||
|
||||
|
||||
@@ -447,12 +447,46 @@ export async function readIndex(context, options = {}) {
|
||||
// 处理目录满足无头有尾的格式,根目录为空
|
||||
const dirPrefix = directory === '' || directory.endsWith('/') ? directory : directory + '/';
|
||||
|
||||
// 处理挂起的操作
|
||||
await mergeOperationsToIndex(context);
|
||||
// 直接从数据库读取文件,不依赖索引
|
||||
const { env } = context;
|
||||
const db = getDatabase(env);
|
||||
|
||||
// 获取当前索引
|
||||
const index = await getIndex(context);
|
||||
let filteredFiles = index.files;
|
||||
let allFiles = [];
|
||||
let cursor = null;
|
||||
|
||||
// 分批获取所有文件
|
||||
while (true) {
|
||||
const response = await db.listFiles({
|
||||
limit: 1000,
|
||||
cursor: cursor
|
||||
});
|
||||
|
||||
if (!response || !response.keys || !Array.isArray(response.keys)) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (const item of response.keys) {
|
||||
// 跳过管理相关的键和分块数据
|
||||
if (item.name.startsWith('manage@') || item.name.startsWith('chunk_')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// 跳过没有元数据的文件
|
||||
if (!item.metadata || !item.metadata.TimeStamp) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allFiles.push({
|
||||
id: item.name,
|
||||
metadata: item.metadata
|
||||
});
|
||||
}
|
||||
|
||||
cursor = response.cursor;
|
||||
if (!cursor) break;
|
||||
}
|
||||
|
||||
let filteredFiles = allFiles;
|
||||
|
||||
// 目录过滤
|
||||
if (directory) {
|
||||
@@ -531,9 +565,9 @@ export async function readIndex(context, options = {}) {
|
||||
files: resultFiles,
|
||||
directories: Array.from(directories),
|
||||
totalCount: totalCount,
|
||||
indexLastUpdated: index.lastUpdated,
|
||||
indexLastUpdated: Date.now(),
|
||||
returnedCount: resultFiles.length,
|
||||
success: index.success ?? true
|
||||
success: true
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
@@ -643,15 +677,57 @@ export async function rebuildIndex(context, progressCallback = null) {
|
||||
*/
|
||||
export async function getIndexInfo(context) {
|
||||
try {
|
||||
const index = await getIndex(context);
|
||||
// 直接从数据库读取文件信息,不依赖索引
|
||||
const { env } = context;
|
||||
const db = getDatabase(env);
|
||||
|
||||
// 检查索引是否成功获取
|
||||
if (index.success === false) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Failed to retrieve index',
|
||||
message: 'Index is not available or corrupted'
|
||||
let allFiles = [];
|
||||
let cursor = null;
|
||||
|
||||
// 分批获取所有文件
|
||||
while (true) {
|
||||
const response = await db.listFiles({
|
||||
limit: 1000,
|
||||
cursor: cursor
|
||||
});
|
||||
|
||||
if (!response || !response.keys || !Array.isArray(response.keys)) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (const item of response.keys) {
|
||||
// 跳过管理相关的键和分块数据
|
||||
if (item.name.startsWith('manage@') || item.name.startsWith('chunk_')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// 跳过没有元数据的文件
|
||||
if (!item.metadata || !item.metadata.TimeStamp) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allFiles.push({
|
||||
id: item.name,
|
||||
metadata: item.metadata
|
||||
});
|
||||
}
|
||||
|
||||
cursor = response.cursor;
|
||||
if (!cursor) break;
|
||||
}
|
||||
|
||||
// 如果没有文件,返回空结果
|
||||
if (allFiles.length === 0) {
|
||||
return {
|
||||
success: true,
|
||||
totalFiles: 0,
|
||||
lastUpdated: Date.now(),
|
||||
channelStats: {},
|
||||
directoryStats: {},
|
||||
typeStats: {},
|
||||
oldestFile: null,
|
||||
newestFile: null
|
||||
};
|
||||
}
|
||||
|
||||
// 统计各渠道文件数量
|
||||
@@ -659,7 +735,7 @@ export async function getIndexInfo(context) {
|
||||
const directoryStats = {};
|
||||
const typeStats = {};
|
||||
|
||||
index.files.forEach(file => {
|
||||
allFiles.forEach(file => {
|
||||
// 渠道统计
|
||||
let channel = file.metadata.Channel || 'Telegraph';
|
||||
if (channel === 'TelegramNew') {
|
||||
@@ -680,15 +756,31 @@ export async function getIndexInfo(context) {
|
||||
typeStats[listType] = (typeStats[listType] || 0) + 1;
|
||||
});
|
||||
|
||||
// 找到最新和最旧的文件
|
||||
let oldestFile = null;
|
||||
let newestFile = null;
|
||||
|
||||
if (allFiles.length > 0) {
|
||||
// 按时间戳排序
|
||||
const sortedFiles = [...allFiles].sort((a, b) => {
|
||||
const timeA = a.metadata.TimeStamp || 0;
|
||||
const timeB = b.metadata.TimeStamp || 0;
|
||||
return timeA - timeB;
|
||||
});
|
||||
|
||||
oldestFile = sortedFiles[0];
|
||||
newestFile = sortedFiles[sortedFiles.length - 1];
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
totalFiles: index.totalCount,
|
||||
lastUpdated: index.lastUpdated,
|
||||
totalFiles: allFiles.length,
|
||||
lastUpdated: Date.now(),
|
||||
channelStats,
|
||||
directoryStats,
|
||||
typeStats,
|
||||
oldestFile: index.files[index.files.length - 1],
|
||||
newestFile: index.files[0]
|
||||
oldestFile,
|
||||
newestFile
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error getting index info:', error);
|
||||
|
||||
Reference in New Issue
Block a user