docker cluster deployment

This commit is contained in:
王健辉 2021-12-20 16:03:03 +08:00
parent 0c52c93139
commit 32d12a8d82
5 changed files with 2207 additions and 0 deletions

View File

@ -0,0 +1,104 @@
CREATE TABLE IF NOT EXISTS Binding (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
email VARCHAR(255),
peer_id CHAR(41),
UNIQUE INDEX (peer_id),
INDEX (email(20))
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS EmailUser (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
email VARCHAR(255),
passwd VARCHAR(256),
is_staff BOOL NOT NULL,
is_active BOOL NOT NULL,
ctime BIGINT,
reference_id VARCHAR(255),
UNIQUE INDEX (email),
UNIQUE INDEX (reference_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS `Group` (
`group_id` BIGINT PRIMARY KEY AUTO_INCREMENT,
`group_name` VARCHAR(255),
`creator_name` VARCHAR(255),
`timestamp` BIGINT,
`type` VARCHAR(32),
`parent_group_id` INTEGER
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS GroupDNPair (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
group_id INTEGER,
dn VARCHAR(255)
)ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS GroupStructure (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
group_id INTEGER,
path VARCHAR(1024),
UNIQUE INDEX(group_id)
)ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS `GroupUser` (
`id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
`group_id` BIGINT,
`user_name` VARCHAR(255),
`is_staff` tinyint,
UNIQUE INDEX (`group_id`, `user_name`),
INDEX (`user_name`)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS LDAPConfig (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
cfg_group VARCHAR(255) NOT NULL,
cfg_key VARCHAR(255) NOT NULL,
value VARCHAR(255),
property INTEGER
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS LDAPUsers (
id BIGINT PRIMARY KEY AUTO_INCREMENT,
email VARCHAR(255) NOT NULL,
password varchar(255) NOT NULL,
is_staff BOOL NOT NULL,
is_active BOOL NOT NULL,
extra_attrs TEXT,
reference_id VARCHAR(255),
UNIQUE INDEX(email),
UNIQUE INDEX (reference_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgGroup (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
group_id INTEGER,
INDEX (group_id),
UNIQUE INDEX(org_id, group_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgUser (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
email VARCHAR(255),
is_staff BOOL NOT NULL,
INDEX (email),
UNIQUE INDEX(org_id, email)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS Organization (
org_id BIGINT PRIMARY KEY AUTO_INCREMENT,
org_name VARCHAR(255),
url_prefix VARCHAR(255),
creator VARCHAR(255),
ctime BIGINT,
UNIQUE INDEX (url_prefix)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS UserRole (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
email VARCHAR(255),
role VARCHAR(255),
is_manual_set INTEGER DEFAULT 0,
UNIQUE INDEX (email)
) ENGINE=INNODB;

View File

@ -0,0 +1,324 @@
CREATE TABLE IF NOT EXISTS Branch (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
name VARCHAR(10),
repo_id CHAR(41),
commit_id CHAR(41),
UNIQUE INDEX(repo_id, name)
) ENGINE = INNODB;
CREATE TABLE IF NOT EXISTS FileLockTimestamp (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(40),
update_time BIGINT NOT NULL,
UNIQUE INDEX(repo_id)
);
CREATE TABLE IF NOT EXISTS FileLocks (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(40) NOT NULL,
path TEXT NOT NULL,
user_name VARCHAR(255) NOT NULL,
lock_time BIGINT,
expire BIGINT,
KEY(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS FolderGroupPerm (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36) NOT NULL,
path TEXT NOT NULL,
permission CHAR(15),
group_id INTEGER NOT NULL,
INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS FolderPermTimestamp (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
timestamp BIGINT,
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS FolderUserPerm (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36) NOT NULL,
path TEXT NOT NULL,
permission CHAR(15),
user VARCHAR(255) NOT NULL,
INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS GCID (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
gc_id CHAR(36),
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS GarbageRepos (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
UNIQUE INDEX(repo_id)
);
CREATE TABLE IF NOT EXISTS InnerPubRepo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
permission CHAR(15),
UNIQUE INDEX (repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS LastGCID (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
client_id VARCHAR(128),
gc_id CHAR(36),
UNIQUE INDEX(repo_id, client_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgGroupRepo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
repo_id CHAR(37),
group_id INTEGER,
owner VARCHAR(255),
permission CHAR(15),
UNIQUE INDEX(org_id, group_id, repo_id),
INDEX (repo_id), INDEX (owner)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgInnerPubRepo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
repo_id CHAR(37),
UNIQUE INDEX(org_id, repo_id),
permission CHAR(15)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgQuota (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
quota BIGINT,
UNIQUE INDEX(org_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgRepo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
repo_id CHAR(37),
user VARCHAR(255),
UNIQUE INDEX(org_id, repo_id),
UNIQUE INDEX (repo_id),
INDEX (org_id, user),
INDEX(user)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgSharedRepo (
id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INT,
repo_id CHAR(37) ,
from_email VARCHAR(255),
to_email VARCHAR(255),
permission CHAR(15),
INDEX(repo_id),
INDEX (org_id, repo_id),
INDEX(from_email), INDEX(to_email)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS OrgUserQuota (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INTEGER,
user VARCHAR(255),
quota BIGINT,
UNIQUE INDEX(org_id, user)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS Repo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
UNIQUE INDEX (repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoFileCount (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
file_count BIGINT UNSIGNED,
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoGroup (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
group_id INTEGER,
user_name VARCHAR(255),
permission CHAR(15),
UNIQUE INDEX(group_id, repo_id),
INDEX (repo_id), INDEX (user_name)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoHead (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
branch_name VARCHAR(10),
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoHistoryLimit (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
days INTEGER,
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoInfo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
name VARCHAR(255) NOT NULL,
update_time BIGINT,
version INTEGER,
is_encrypted INTEGER,
last_modifier VARCHAR(255),
status INTEGER DEFAULT 0,
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoOwner (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
owner_id VARCHAR(255),
UNIQUE INDEX (repo_id),
INDEX (owner_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoSize (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
size BIGINT UNSIGNED,
head_id CHAR(41),
UNIQUE INDEX (repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoStorageId (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(40) NOT NULL,
storage_id VARCHAR(255) NOT NULL,
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoSyncError (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
token CHAR(41),
error_time BIGINT UNSIGNED,
error_con VARCHAR(1024),
UNIQUE INDEX(token)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
token CHAR(41),
peer_id CHAR(41),
peer_ip VARCHAR(41),
peer_name VARCHAR(255),
sync_time BIGINT,
client_ver VARCHAR(20),
UNIQUE INDEX(token)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoTrash (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
repo_name VARCHAR(255),
head_id CHAR(40),
owner_id VARCHAR(255),
size BIGINT(20),
org_id INTEGER,
del_time BIGINT,
UNIQUE INDEX(repo_id),
INDEX(owner_id),
INDEX(org_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoUserToken (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
email VARCHAR(255),
token CHAR(41),
UNIQUE INDEX(repo_id, token),
INDEX (email)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RepoValidSince (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
timestamp BIGINT,
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS RoleQuota (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
role VARCHAR(255),
quota BIGINT,
UNIQUE INDEX(role)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS SeafileConf (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
cfg_group VARCHAR(255) NOT NULL,
cfg_key VARCHAR(255) NOT NULL,
value VARCHAR(255),
property INTEGER
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS SharedRepo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37) ,
from_email VARCHAR(255),
to_email VARCHAR(255),
permission CHAR(15),
INDEX (repo_id),
INDEX(from_email),
INDEX(to_email)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS SystemInfo (
id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,
info_key VARCHAR(256),
info_value VARCHAR(1024)
);
CREATE TABLE IF NOT EXISTS UserQuota (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
user VARCHAR(255),
quota BIGINT,
UNIQUE INDEX(user)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS UserShareQuota (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
user VARCHAR(255),
quota BIGINT,
UNIQUE INDEX(user)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS VirtualRepo (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(36),
origin_repo CHAR(36),
path TEXT,
base_commit CHAR(40),
UNIQUE INDEX(repo_id),
INDEX(origin_repo)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS WebAP (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(37),
access_property CHAR(10),
UNIQUE INDEX(repo_id)
) ENGINE=INNODB;
CREATE TABLE IF NOT EXISTS WebUploadTempFiles (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
repo_id CHAR(40) NOT NULL,
file_path TEXT NOT NULL,
tmp_file_path TEXT NOT NULL
) ENGINE=INNODB;

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,542 @@
# Seafile Docker Cluster Deployment
## Environment
System: Ubuntu 20.04
docker-compose: 1.25.0
Seafile Server: 2 frontend node, 1 backend node
Mariadb, Memcached, Elasticsearch are all deployed on the backend node.
## Deploy Mariadb, Memcached and Elasticsearch
Install docker-compose on the backend node
```
$ apt update && apt install docker-compose -y
```
### MariaDB
Create the mount directory
```
$ mkdir -p /opt/seafile-mysql/mysql-data
```
Create the docker-compose.yml file
```
$ cd /opt/seafile-mysql
$ vim docker-compose.yml
```
```
version: '2.0'
services:
db:
image: mariadb:10.5
container_name: seafile-mysql
ports:
- 172.26.6.23:3306:3306 # Change '172.26.6.23' to the IP of your backend node
volumes:
- /opt/seafile-mysql/mysql-data:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=PASSWORD # Set your MySQL root user's password
- MYSQL_LOG_CONSOLE=true
```
Start MariaDB
```
$ cd /opt/seafile-mysql
$ docker-compose up -d
```
Create the three databases ccnet_db, seafile_db, and seahub_db required by Seafile on MariaDB, and authorize the \`seafile\` user to be able to access these three databases:
```
$ mysql -h{your backend node IP} -uroot -pPASSWORD
mysql>
create user 'seafile'@'%' identified by 'PASSWORD';
create database `ccnet_db` character set = 'utf8';
create database `seafile_db` character set = 'utf8';
create database `seahub_db` character set = 'utf8';
GRANT ALL PRIVILEGES ON `ccnet_db`.* to 'seafile'@'%';
GRANT ALL PRIVILEGES ON `seafile_db`.* to 'seafile'@'%';
GRANT ALL PRIVILEGES ON `seahub_db`.* to 'seafile'@'%';
```
You also need to create a table in \`seahub_db\`
```none
mysql>
use seahub_db;
CREATE TABLE `avatar_uploaded` (
`filename` text NOT NULL,
`filename_md5` char(32) NOT NULL,
`data` mediumtext NOT NULL,
`size` int(11) NOT NULL,
`mtime` datetime NOT NULL,
PRIMARY KEY (`filename_md5`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
```
After the databases are created, the tables needed by seafile need to be imported into the database: [ccnet_db](SQL/ccnet.sql), [seafile_db](SQL/seafile.sql), [seahub_db](SQL/seahub.sql).
### Memcached
Create the mount directory
```
$ mkdir -p /opt/seafile-memcached
```
Create the docker-compose.yml file
```
$ cd /opt/seafile-memcached
$ vim docker-compose.yml
```
```
version: '2.0'
services:
memcached:
image: memcached:1.5.6
container_name: seafile-memcached
entrypoint: memcached -m 256
ports:
- 172.26.6.23:11211:11211 # Change '172.26.6.23' to the IP of your backend node
```
Start memcached
```
$ cd /opt/seafile-memcached
$ docker-compose up -d
```
Test if can connect to memcached
```
$ telnet {your backend node IP} 11211
```
### Elasticsearch
Create the mount directory
```
$ mkdir -p /opt/seafile-elasticsearch/data
```
chmod 777 -R /opt/seafile-elasticsearch/data
Create the docker-compose.yml file
```
$ cd /opt/seafile-elasticsearch
$ vim docker-compose.yml
```
```
version: '2.0'
services:
elasticsearch:
image: elasticsearch:6.8.20
container_name: seafile-elasticsearch
ports:
- 172.26.6.23:9200:9200 # Change '172.26.6.23' to the IP of your backend node
volumes:
- /opt/seafile-elasticsearch/data:/usr/share/elasticsearch/data
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms1g -Xmx1g
```
**Among them, ES_JAVA_OPTS=-Xms1g -Xmx1g is to set the memory used by elasticsearch, which is set according to the memory size of your server.**
**Note **Need to pay attention to the version of elasticsearch, the seafile-pro-9.0.x version must correspond to the 6.8.X version of elasticsearch.
Start memcached
```
$ cd /opt/seafile-elasticsearch
$ docker-compose up -d
```
Test if elasticsearch is started normally
```
$ curl http://{your backend node IP}:9200/_cluster/health?pretty
```
## Deploy Seafile service
### Deploy seafile frontend nodes
Install docker-compose on the backend node
```
$ apt update && apt install docker-compose -y
```
Create the mount directory
```
$ mkdir -p /opt/seafile/shared
```
Create the docker-compose.yml file
```
$ cd /opt/seafile
$ vim docker-compose.yml
```
```
version: '2.0'
services:
seafile:
image: docker.seafile.top/seafileltd/seafile-pro-mc:9.0.2
container_name: seafile
ports:
- 80:80
volumes:
- /opt/seafile/shared:/shared
environment:
- CLUSTER_SERVER=true
- CLUSTER_MODE=frontend
- TIME_ZONE=Asia/Shanghai # Optional, default is UTC. Should be uncomment and set to your local time zone.
```
**Note**: **CLUSTER_SERVER=true** means seafile cluster mode, **CLUSTER_MODE=frontend** means this node is seafile frontend server.
Start the seafile docker container
```
$ cd /opt/seafile
$ docker-compose up -d
```
#### Initial configuration files
1\. Manually generate configuration files
```
$ docker exec -it seafile bash
# cd /scripts && ./cluster_conf_init.py
# cd /opt/seafile/conf
```
2\. Modify the mysql configuration options (user, host, password) in configuration files such as ccnet.conf, seafevents.conf, seafile.conf and seahub_settings.py.
3\. Modify the memcached configuration option in seahub_settings.py
```
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': 'memcached:11211',
},
...
}
|
v
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': '{you backend node IP}:11211',
},
...
}
```
4\. Modify the \[INDEX FILES] configuration option (es_host) in seafevents.conf
```
[INDEX FILES]
es_port = 9200
es_host = {you backend node IP}
external_es_server = true
enabled = true
interval = 10m
...
```
5\. Add some configurations in seahub_settings.py
```python
SERVICE_URL = 'http{s}://{you backend node IP or your sitename}/'
FILE_SERVER_ROOT = 'http{s}://{you backend node IP or your sitename}/seafhttp'
AVATAR_FILE_STORAGE = 'seahub.base.database_storage.DatabaseStorage'
```
6\. Add cluster special configuration in seafile.conf
```
[cluster]
enabled = true
memcached_options = --SERVER={you backend node IP} --POOL-MIN=10 --POOL-MAX=100
```
Start Seafile service
```
$ docker exec -it seafile bash
# cd /opt/seafile/seafile-server-latest
# ./seafile.sh start && ./seahub.sh start
```
When you start it for the first time, seafile will guide you to set up an admin user.
When deploying the second frontend node, you can directly copy all the directories generated by the first frontend node, including the docker-compose.yml file and modified configuration files, and then start the seafile docker container.
### Deploy seafile backend node
Create the mount directory
```
$ mkdir -p /opt/seafile/shared
```
Create the docker-compose.yml file
```
$ cd /opt/seafile
$ vim docker-compose.yml
```
```
version: '2.0'
services:
seafile:
image: docker.seafile.top/seafileltd/seafile-pro-mc:9.0.2
container_name: seafile
ports:
- 80:80
volumes:
- /opt/seafile/shared:/shared
environment:
- CLUSTER_SERVER=true
- CLUSTER_MODE=backend
- TIME_ZONE=Asia/Shanghai # Optional, default is UTC. Should be uncomment and set to your local time zone.
```
**Note**: **CLUSTER_SERVER=true** means seafile cluster mode, **CLUSTER_MODE=backend** means this node is seafile backend server.
Start the seafile docker container
```
$ cd /opt/seafile
$ docker-compose up -d
```
Copy configuration files of the frontend node, and then start Seafile server of the backend node
```
$ docker exec -it seafile bash
# cd /opt/seafile/seafile-server-latest
# ./seafile.sh start && ./seafile-background-tasks.sh start
```
### Use S3 as backend storage
Modify the seafile.conf file on each node to configure S3 storage.
vim seafile.conf
```
[commit_object_backend]
name = s3
bucket = {your-commit-objects} # The bucket name can only use lowercase letters, numbers, and dashes
key_id = {your-key-id}
key = {your-secret-key}
use_v4_signature = true
aws_region = eu-central-1 # eu-central-1 for Frankfurt region
[fs_object_backend]
name = s3
bucket = {your-fs-objects}
key_id = {your-key-id}
key = {your-secret-key}
use_v4_signature = true
aws_region = eu-central-1
[block_backend]
name = s3
bucket = {your-block-objects}
key_id = {your-key-id}
key = {your-secret-key}
use_v4_signature = true
aws_region = eu-central-1
```
### Deployment load balance
#### Install HAproxy and Keepalived services
Execute the following commands on the two Seafile frontend servers:
```
$ apt install haproxy keepalived -y
$ mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
$ cat > /etc/haproxy/haproxy.cfg << 'EOF'
global
log 127.0.0.1 local1 notice
maxconn 4096
user haproxy
group haproxy
defaults
log global
mode http
retries 3
timeout connect 10000
timeout client 300000
timeout server 300000
listen seafile 0.0.0.0:80
mode http
option httplog
option dontlognull
option forwardfor
cookie SERVERID insert indirect nocache
server seafile01 Front-End01-IP:8001 check port 11001 cookie seafile01
server seafile02 Front-End02-IP:8001 check port 11001 cookie seafile02
EOF
```
**Note**: Correctly modify the IP address (Front-End01-IP and Front-End02-IP) of the front-end server in the above configuration file.
**Choose one of the above two servers as the master node, and the other as the slave node.**
Perform the following operations on the master node:
```bash
$ cat > /etc/keepalived/keepalived.conf << 'EOF'
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id node1
vrrp_mcast_group4 224.0.100.18
}
vrrp_instance VI_1 {
state MASTER
interface eno1 # Set to the device name of a valid network interface on the current server, and the virtual IP will be bound to the network interface
virtual_router_id 50
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass seafile123
}
virtual_ipaddress {
172.26.154.45/24 dev eno1 # Configure to the correct virtual IP and network interface device name
}
}
EOF
```
**Note: **Correctly configure the virtual IP address and network interface device name in the above file.
Perform the following operations on the standby node:
```bash
$ cat > /etc/keepalived/keepalived.conf << 'EOF'
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id node2
vrrp_mcast_group4 224.0.100.18
}
vrrp_instance VI_1 {
state BACKUP
interface eno1 # Set to the device name of a valid network interface on the current server, and the virtual IP will be bound to the network interface
virtual_router_id 50
priority 98
advert_int 1
authentication {
auth_type PASS
auth_pass seafile123
}
virtual_ipaddress {
172.26.154.45/24 dev eno1 # Configure to the correct virtual IP and network interface device name
}
}
EOF
```
Finally, run the following commands on the two Seafile frontend servers to start the corresponding services:
```
$ systemctl enable --now haproxy
$ systemctl enable --now keepalived
```
So far, Seafile cluster has been deployed.

View File

@ -121,6 +121,7 @@ nav:
- Seafile Setup with Docker:
- Seafile Community Installation: docker/deploy_seafile_with_docker.md
- Seafile Professional Installation: docker/pro-edition/deploy_seafile_pro_with_docker.md
- Seafile Docker Cluster Deployment: docker/cluster/deploy_seafile_cluster_with_docker.md
- Migration from Seafile Community: docker/pro-edition/migrate_ce_to_pro_with_docker.md
- Migrate from non-docker deployment: docker/non_docker_to_docker.md
- Upgrade from 6.3 to 7.0: