Jumpserver 是一个开源的堡垒机(也称为跳板机)系统,用于管理和审计服务器访问。它提供安全的登录入口,用户需要通过 Jumpserver 访问目标服务器,从而确保所有访问行为都被记录和监控。
关键特点包括:
- 权限控制:细粒度的用户权限管理,确保只有授权用户才能访问特定资源。
- 操作审计:记录用户的操作行为,便于追踪和审计。
- 多协议支持:支持 SSH、RDP 等多种协议,适用于不同类型的服务器。
- 便捷管理:提供图形化管理界面,简化服务器和用户的管理。
太简单,略过
主机名 | IP | 任务 |
---|---|---|
jump-basicserver | 192.168.31.66 | NFS、Mysql |
jump01 | 192.168.31.67 | jumpserver |
jump02 | 192.168.31.68 | jumpserver |
jump03 | 192.168.31.69 | jumpserver |
jump04 | 192.168.31.70 | jumpserver |
HAproxyjump | 192.168.31.140 | LoadBlance |
minIOjump | 192.168.31.141 | Ceph |
ElasticSearchjump | 192.168.31.142 | ElasticSearch |
软件要求 |
---|
Mysql>=5.7 |
MariaDB>=10.2 |
Redis>=5 |
机器 | 硬件要求 |
---|---|
Ha、jumpserver | 2C 2G 50G |
basicserver | 4C 4G 50G |
HAproxy | 2C 2G 50G |
MinIO | 2C 2G 50G |
ElasticSearch⚠ | 4C 4G 50G |
基本的防火墙,selinux,请关闭! ⚠ 生产环境除外
配置基础的阿里yum源
https://developer.aliyun.com/mirror/centos?spm=a2c6h.13651102.0.0.3e221b11aMW5oE
####jumpbasicserver
#NFS
yum -y install epel-release
yum makecache fast
yum install -y nfs-tools rpcbind
systemctl enable --now rpcbind nfs-server nfs-lock nfs-idmap
mkdir /data
chmod -R 777 /data
vi /etc/exports
/data 192.168.31.*(rw,sync,all_squash,anonuid=0,anongid=0)
exportfs -a
#MariaDB
yum -y localinstall http://mirrors.ustc.edu.cn/mysql-repo/mysql57-community-release-el7.rpm
rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
yum install mysql-community-server -y
if [ ! "$(cat /usr/bin/mysqld_pre_systemd | grep -v ^\# | grep initialize-insecure )" ]; then sed -i "s@--initialize @--initialize-insecure @g" /usr/bin/mysqld_pre_systemd; fi
systemctl enable --now mysqld
mysql -uroot
create database jumpserver default charset 'utf8';
set global validate_password_policy=LOW;
create user 'jumpserver'@'%' identified by '123456';
grant all on jumpserver.* to 'jumpserver'@'%';
flush privileges;
#Redis
wget http://download.redis.io/releases/redis-5.0.0.tar.gz
tar -xf redis-5.0.0.tar.gz -C /opt/
cd /opt/redis-5.0.0
make
make install PREFIX=/usr/local/redis
cd /usr/local/redis/bin/
cp /opt/redis-5.0.0/redis.conf .
vim redis.conf
bind 0.0.0.0
protected-mode no
daemonize yes
maxmemory-policy allkeys-lru
requirepass 12345678
./redis-server ./redis.conf
ps aux|grep redis
vim /etc/systemd/system/redis.service
# /etc/systemd/system/redis.service
[Unit]
Description=redis-server
After=network.target
[Service]
Type=forking
ExecStart=/usr/local/redis/bin/redis-server /usr/local/redis/bin/redis.conf
PrivateTmp=true
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
pkill redis
systemctl enable --now redis
###jump01
mkdir -p /opt/jumpserver/core/data
mount -t nfs 192.168.31.66:/data /opt/jumpserver/core/data
echo "192.168.31.66:/data /opt/jumpserver/core/data nfs defaults 0 0" >> /etc/fstab
cd /opt/
wget http://github.com/jumpserver/installer/releases/download/v2.28.8/jumpserver-installer-v2.28.8.tar.gz
tar -xf jumpserver-installer-v2.28.8.tar.gz
#修改配置
cd jumpserver-installer-v2.28.8/
vi config-example.txt
VOLUME_DIR=/opt/jumpserver
SECRET_KEY=kWQdmdCQKjaWlHYpPhkNQDkfaRulM6YnHctsHLlSPs8287o2kW
BOOTSTRAP_TOKEN=KXOeyNgDeTdpeu9q
DB_HOST=192.168.31.66
DB_PORT=3306
DB_USER=jumpserver
DB_PASSWORD=12345678
DB_NAME=jumpserver
REDIS_HOST=192.168.31.66
REDIS_PORT=6379
REDIS_PASSWORD=12345678
################################# KoKo 配置 #################################
SHARE_ROOM_TYPE=redis
REUSE_CONNECTION=False
#安装
./jmsctl.sh install
###jump02
mkdir -p /opt/jumpserver/core/data
mount -t nfs 192.168.31.66:/data /opt/jumpserver/core/data
echo "192.168.31.66:/data /opt/jumpserver/core/data nfs defaults 0 0" >> /etc/fstab
cd /opt/
wget http://github.com/jumpserver/installer/releases/download/v2.28.8/jumpserver-installer-v2.28.8.tar.gz
tar -xf jumpserver-installer-v2.28.8.tar.gz
#修改配置
VOLUME_DIR=/opt/jumpserver
SECRET_KEY=kWQdmdCQKjaWlHYpPhkNQDkfaRulM6YnHctsHLlSPs8287o2kW
BOOTSTRAP_TOKEN=KXOeyNgDeTdpeu9q
DB_HOST=192.168.31.66
DB_PORT=3306
DB_USER=jumpserver
DB_PASSWORD=12345678
DB_NAME=jumpserver
USE_EXTERNAL_REDIS=1
REDIS_HOST=192.168.31.66
REDIS_PORT=6379
REDIS_PASSWORD=12345678
################################## KoKo配置 ##################################
SHARE_ROOM_TYPE=redis
#安装
./jmsctl.sh install
###jump03
mkdir -p /opt/jumpserver/core/data
mount -t nfs 192.168.31.66:/data /opt/jumpserver/core/data
echo "192.168.31.66:/data /opt/jumpserver/core/data nfs defaults 0 0" >> /etc/fstab
cd /opt/
wget http://github.com/jumpserver/installer/releases/download/v2.28.8/jumpserver-installer-v2.28.8.tar.gz
tar -xf jumpserver-installer-v2.28.8.tar.gz
#修改配置
VOLUME_DIR=/opt/jumpserver
SECRET_KEY=kWQdmdCQKjaWlHYpPhkNQDkfaRulM6YnHctsHLlSPs8287o2kW
BOOTSTRAP_TOKEN=KXOeyNgDeTdpeu9q
DB_HOST=192.168.31.66
DB_PORT=3306
DB_USER=jumpserver
DB_PASSWORD=12345678
DB_NAME=jumpserver
USE_EXTERNAL_REDIS=1
REDIS_HOST=192.168.31.66
REDIS_PORT=6379
REDIS_PASSWORD=12345678
################################# KoKo 配置 #################################
SHARE_ROOM_TYPE=redis
REUSE_CONNECTION=False
#安装
./jmsctl.sh install
###jump04
mkdir -p /opt/jumpserver/core/data
mount -t nfs 192.168.31.66:/data /opt/jumpserver/core/data
echo "192.168.31.66:/data /opt/jumpserver/core/data nfs defaults 0 0" >> /etc/fstab
cd /opt/
wget http://github.com/jumpserver/installer/releases/download/v2.28.8/jumpserver-installer-v2.28.8.tar.gz
tar -xf jumpserver-installer-v2.28.8.tar.gz
#修改配置
VOLUME_DIR=/opt/jumpserver
SECRET_KEY=kWQdmdCQKjaWlHYpPhkNQDkfaRulM6YnHctsHLlSPs8287o2kW
BOOTSTRAP_TOKEN=KXOeyNgDeTdpeu9q
DB_HOST=192.168.31.66
DB_PORT=3306
DB_USER=jumpserver
DB_PASSWORD=12345678
DB_NAME=jumpserver
USE_EXTERNAL_REDIS=1
REDIS_HOST=192.168.31.66
REDIS_PORT=6379
REDIS_PASSWORD=12345678
################################# KoKo 配置 #################################
SHARE_ROOM_TYPE=redis
REUSE_CONNECTION=False
#安装
./jmsctl.sh install
###HAproxyjump
yum -y install epel-release
yum install -y haproxy
#修改配置
vim /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen stats
bind *:8080
mode http
stats enable
stats uri /haproxy
stats refresh 5s
stats realm haproxy-status
stats auth admin:12345678
#---------------------------------------------------------------------
# check 检活参数说明
# inter 间隔时间, 单位: 毫秒
# rise 连续成功的次数, 单位: 次
# fall 连续失败的次数, 单位: 次
# 例: inter 2s rise 2 fall 3
# 表示 2 秒检查一次状态, 连续成功 2 次服务正常, 连续失败 3 次服务异常
#
# server 服务参数说明
# server 192.168.31.67 192.168.31.67:80 weight 1 cookie web01
# 第一个 192.168.31.67 做为页面展示的标识, 可以修改为其他任意字符串
# 第二个 192.168.31.67:80 是实际的后端服务端口
# weight 为权重, 多节点时安装权重进行负载均衡
# cookie 用户侧的 cookie 会包含此标识, 便于区分当前访问的后端节点
# 例: server db01 192.168.31.67:3306 weight 1 cookie db_01
#---------------------------------------------------------------------
listen jms-web
bind *:80
mode http
# redirect scheme https if !{ ssl_fc } # 重定向到 https
# bind *:443 ssl crt /opt/ssl.pem # https 设置
option httpclose
option forwardfor
option httpchk GET /api/health/ # Core 检活接口
cookie SERVERID insert indirect
hash-type consistent
fullconn 500
balance leastconn
server 192.168.31.67 192.168.31.67:80 weight 1 cookie web01 check inter 2s rise 2 fall 3 # JumpServer 服务器
server 192.168.31.68 192.168.31.68:80 weight 1 cookie web02 check inter 2s rise 2 fall 3
server 192.168.31.69 192.168.31.69:80 weight 1 cookie web03 check inter 2s rise 2 fall 3
server 192.168.31.70 192.168.31.70:80 weight 1 cookie web03 check inter 2s rise 2 fall 3
listen jms-ssh
bind *:2222
mode tcp
option tcp-check
fullconn 500
balance source
server 192.168.31.67 192.168.31.67:2222 weight 1 check inter 2s rise 2 fall 3 send-proxy
server 192.168.31.68 192.168.31.68:2222 weight 1 check inter 2s rise 2 fall 3 send-proxy
server 192.168.31.69 192.168.31.69:2222 weight 1 check inter 2s rise 2 fall 3 send-proxy
server 192.168.31.70 192.168.31.70:2222 weight 1 check inter 2s rise 2 fall 3 send-proxy
listen jms-koko
mode http
option httpclose
option forwardfor
option httpchk GET /koko/health/ HTTP/1.1\r\nHost:\ 192.168.100.100 # KoKo 检活接口, host 填写 HAProxy 的 ip 地址
cookie SERVERID insert indirect
hash-type consistent
fullconn 500
balance leastconn
server 192.168.31.67 192.168.31.67:80 weight 1 cookie web01 check inter 2s rise 2 fall 3
server 192.168.31.68 192.168.31.68:80 weight 1 cookie web02 check inter 2s rise 2 fall 3
server 192.168.31.69 192.168.31.69:80 weight 1 cookie web03 check inter 2s rise 2 fall 3
server 192.168.31.70 192.168.31.70:80 weight 1 cookie web03 check inter 2s rise 2 fall 3
listen jms-lion
mode http
option httpclose
option forwardfor
option httpchk GET /lion/health/ HTTP/1.1\r\nHost:\ 192.168.100.100 # Lion 检活接口, host 填写 HAProxy 的 ip 地址
cookie SERVERID insert indirect
hash-type consistent
fullconn 500
balance leastconn
server 192.168.31.67 192.168.31.67:80 weight 1 cookie web01 check inter 2s rise 2 fall 3
server 192.168.31.68 192.168.31.68:80 weight 1 cookie web02 check inter 2s rise 2 fall 3
server 192.168.31.69 192.168.31.69:80 weight 1 cookie web03 check inter 2s rise 2 fall 3
server 192.168.31.70 192.168.31.70:80 weight 1 cookie web03 check inter 2s rise 2 fall 3
listen jms-magnus
bind *:30000
mode tcp
option tcp-check
fullconn 500
balance source
server 192.168.31.67 192.168.31.67:30000 weight 1 check inter 2s rise 2 fall 3 send-proxy
server 192.168.31.68 192.168.31.68:30000 weight 1 check inter 2s rise 2 fall 3 send-proxy
server 192.168.31.69 192.168.31.69:30000 weight 1 check inter 2s rise 2 fall 3 send-proxy
server 192.168.31.70 192.168.31.70:30000 weight 1 check inter 2s rise 2 fall 3 send-proxy
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main *:5000
acl url_static path_beg -i /static /images /javascript /stylesheets
acl url_static path_end -i .jpg .gif .png .css .js
use_backend static if url_static
default_backend app
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
balance roundrobin
server static 127.0.0.1:4331 check
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
balance roundrobin
server app1 127.0.0.1:5001 check
server app2 127.0.0.1:5002 check
server app3 127.0.0.1:5003 check
server app4 127.0.0.1:5004 check
#测试
访问 http://192.168.31.140:8080/haproxy
访问 http://192.168.31.140
关闭 jump01,02服务
访问 http://192.168.31.140
###miniojump
#安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum makecache fast
yum -y install docker-ce
#配置docker
vi /etc/docker/daemon.json
{
"live-restore": true,
"registry-mirrors": ["https://hub-mirror.c.163.com", "https://bmtrgdvx.mirror.aliyuncs.com", "http://f1361db2.m.daocloud.io"],
"log-driver": "json-file",
"log-opts": {"max-file": "3", "max-size": "10m"}
}
#启动
systemctl enable docker
systemctl start docker
#minio镜像
docker pull minio/minio:latest
#持久化本地数据目录
mkdir -p /opt/jumpserver/minio/{data,config}
#创建容器
## 请自行修改账号密码并牢记,丢失后可以删掉容器后重新用新密码创建,数据不会丢失
# 9000 # api 访问端口
# 9001 # console 访问端口
# MINIO_ROOT_USER=minio # minio 账号
# MINIO_ROOT_PASSWORD=12345678 # minio 密码
docker run --name jms_minio -d -p 9000:9000 -p 9001:9001 -e MINIO_ROOT_USER=minio -e MINIO_ROOT_PASSWORD=12345678 -v /opt/jumpserver/minio/data:/data -v /opt/jumpserver/minio/config:/root/.minio --restart=always minio/minio:latest server /data --console-address ":9001"
#浏览器访问
http://192.168.31.141:9000
登录 ------> Buckets ------> Create Bucket -----> jumpserver
登录 ------> 右上角齿轮 ------> 终端设置 -----> 录像存储 ------> 创建Ceph
选项 值 说明 名称 jms-minio 标识, 不可重复 桶名称 jumpserver Bucket Name AK minio MINIO_ROOT_USER SK 12345678 MINIO_ROOT_PASSWORD 端点 http://192.168.31.141:9000 minio 服务访问地址 默认存储 yes 新组件将自动使用该存储
###elastcsearchjump
#安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum makecache fast
yum -y install docker-ce
#配置docker
vi /etc/docker/daemon.json
{
"live-restore": true,
"registry-mirrors": ["https://hub-mirror.c.163.com", "https://bmtrgdvx.mirror.aliyuncs.com", "http://f1361db2.m.daocloud.io"],
"log-driver": "json-file",
"log-opts": {"max-file": "3", "max-size": "10m"}
}
#启动
systemctl enable docker
systemctl start docker
#下载镜像 elasticsearch的镜像版本和jumpserver版本有关联
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.17.6
#持久化本地数据目录
mkdir -p /opt/jumpserver/elasticsearch/data /opt/jumpserver/elasticsearch/logs
#创建容器
## 请自行修改账号密码并牢记,丢失后可以删掉容器后重新用新密码创建,数据不会丢失
# 9200 # Web 访问端口
# 9300 # 集群通信
# discovery.type=single-node # 单节点
# bootstrap.memory_lock="true" # 锁定物理内存, 不使用 swap
# xpack.security.enabled="true" # 开启安全模块
# TAKE_FILE_OWNERSHIP="true" # 自动修改挂载文件夹的所属用户
# ES_JAVA_OPTS="-Xms512m -Xmx512m" # JVM 内存大小, 推荐设置为主机内存的一半
# elastic # Elasticsearch 账号
# ELASTIC_PASSWORD=12345678 # Elasticsearch 密码
docker run --name jms_es -d -p 9200:9200 -p 9300:9300 -e cluster.name=docker-cluster -e discovery.type=single-node -e network.host=0.0.0.0 -e bootstrap.memory_lock="true" -e xpack.security.enabled="true" -e TAKE_FILE_OWNERSHIP="true" -e ES_JAVA_OPTS="-Xms512m -Xmx512m" -e ELASTIC_PASSWORD=12345678 -v /opt/jumpserver/elasticsearch/data:/usr/share/elasticsearch/data -v /opt/jumpserver/elasticsearch/logs:/usr/share/elasticsearch/logs --restart=always docker.elastic.co/elasticsearch/elasticsearch:7.17.6
登录 ------> 右上角齿轮 ------> 终端设置 -----> 命令存储 ------> ElasticSearch
选项 值 说明 名称 jms-es 标识, 不可重复 主机 http://elastic:12345678@192.168.31.142:9200 http://es_host:es_port 默认存储 yes 新组件将自动使用该存储 ⚠️ 如果这台机器的配置不够,将会报错==无效的ElasticSearch配置==
Lobe Chat 是一个基于 Kubernetes 部署的聊天应用程序,使用 PostgreSQL 作为数据库,并集成了 OpenAI 和 DeepSeek 等第三方 API。应用程序通过 Ingress 暴露服务,支持 HTTPS 访问。
在开始部署之前,请确保以下环境和工具已准备就绪:
chart.jiufog.space
)和对应的 TLS 证书。强制,因为与Logto有关。将以下 Manifest 文件保存为 lobe-deployment.yaml
,并根据实际情况替换 {{ }}
中的内容。
apiVersion: apps/v1
kind: Deployment
metadata:
name: lobe-chat
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
app: lobe-chat
template:
metadata:
labels:
app: lobe-chat
spec:
containers:
- name: lobe-chat-container
image: lobehub/lobe-chat-database:latest
imagePullPolicy: Always
ports:
- containerPort: 3210
env:
- name: ENABLED_OPENAI
value: "0"
- name: SILICONCLOUD_API_KEY
value: "{{ YOUR_SILICONCLOUD_API_KEY }}" # 替换为实际的 SiliconCloud API 密钥
- name: DATABASE_URL
value: "{{ YOUR_DATABASE_URL }}" # 替换为实际的 PostgreSQL 连接字符串
- name: S3_ACCESS_KEY_ID
value: "{{ YOUR_S3_ACCESS_KEY_ID }}" # 替换为实际的 S3 Access Key ID
- name: S3_SECRET_ACCESS_KEY
value: "{{ YOUR_S3_SECRET_ACCESS_KEY }}" # 替换为实际的 S3 Secret Access Key
- name: S3_ENDPOINT
value: "{{ YOUR_S3_ENDPOINT }}" # 替换为实际的 S3 Endpoint
- name: S3_PUBLIC_DOMAIN
value: "{{ YOUR_S3_PUBLIC_DOMAIN }}" # 替换为实际的 S3 Public Domain
- name: S3_ENABLE_PATH_STYLE
value: "1"
- name: S3_BUCKET
value: "{{ YOUR_S3_BUCKET }}" # 替换为实际的 S3 Bucket 名称
- name: APP_URL
value: "{{ YOUR_APP_URL }}" # 替换为你的域名
- name: KEY_VAULTS_SECRET
value: "{{ YOUR_KEY_VAULTS_SECRET }}" # 替换为实际的 Key Vaults Secret
- name: NEXT_AUTH_SECRET
value: "{{ YOUR_NEXT_AUTH_SECRET }}" # 替换为实际的 Next Auth Secret
- name: NEXTAUTH_URL
value: "{{ YOUR_NEXTAUTH_URL }}" # 替换为实际的 NextAuth URL
- name: NEXT_AUTH_SSO_PROVIDERS
value: logto # 采用的logto作为用户管理
- name: AUTH_LOGTO_ID
value: "{{ YOUR_AUTH_LOGTO_ID }}" # 替换为实际的 Logto ID
- name: AUTH_LOGTO_SECRET
value: "{{ YOUR_AUTH_LOGTO_SECRET }}" # 替换为实际的 Logto Secret
- name: AUTH_LOGTO_ISSUER
value: "{{ YOUR_AUTH_LOGTO_ISSUER }}" # 替换为实际的 Logto Issuer
- name: LOGTO_WEBHOOK_SIGNING_KEY
value: "{{ YOUR_LOGTO_WEBHOOK_SIGNING_KEY }}" # 替换为实际的 Logto Webhook Signing Key
- name: DEFAULT_FILES_CONFIG
value: embedding_model=siliconcloud/BAAI/bge-m3 # 向量分析使用的模型
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ai-ingress
namespace: dev
spec:
ingressClassName: nginx
rules:
- host: "{{ YOUR_HOST }}" # 替换为实际的域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ai-chat
port:
number: 80
tls:
- hosts:
- "{{ YOUR_HOST }}" # 替换为实际的域名
secretName: "{{ YOUR_TLS_SECRET_NAME }}" # 替换为实际的 TLS Secret 名称
---
apiVersion: v1
kind: Service
metadata:
name: ai-chat
namespace: dev
spec:
selector:
app: lobe-chat
ports:
- protocol: TCP
port: 80
targetPort: 3210
选择硅基流动API(避免deepseek的API服务器性能瓶颈,还会额外赠送14元的体验金):
https://cloud.siliconflow.cn/i/frVugSQo
硅基流动API接入文档:
如果 dev
Namespace 尚未创建,请先创建:
kubectl create namespace dev
使用 kubectl
部署应用程序:
kubectl apply -f lobe-deployment.yaml
检查 Deployment、Service 和 Ingress 是否成功创建:
kubectl -n dev get deployment,service,ingress
确保 Pod 已成功启动并处于 Running
状态:
kubectl -n dev get pods
通过浏览器访问 https://{{ YOUR_HOST }}
,确保应用程序能够正常加载。
如果应用程序无法正常工作,可以查看 Pod 日志以排查问题:
kubectl -n dev logs <pod-name>
根据需要调整 Deployment 的副本数:
kubectl -n dev scale deployment lobe-chat --replicas=3
如果需要更新应用程序镜像,可以修改 image
字段并重新应用 Manifest:
kubectl apply -f lobe-deployment.yaml
建议配置 Prometheus 和 Grafana 对应用程序进行监控,重点关注以下指标:
kubectl logs
查看 Pod 日志以获取更多信息。DATABASE_URL
的值正确。以下是需要用户根据实际情况替换的内容:
{{ YOUR_SILICONCLOUD_API_KEY }}
:替换为实际的 SiliconCloud API 密钥。{{ YOUR_DATABASE_URL }}
:替换为实际的 PostgreSQL 连接字符串。{{ YOUR_S3_ACCESS_KEY_ID }}
、{{ YOUR_S3_SECRET_ACCESS_KEY }}
、{{ YOUR_S3_ENDPOINT }}
、{{ YOUR_S3_BUCKET }}
:替换为实际的 S3 存储配置。{{ YOUR_HOST }}
:替换为实际的域名。{{ YOUR_TLS_SECRET_NAME }}
:替换为实际的 TLS 证书 Secret 名称。{{ YOUR_APP_URL }}
:替换为实际的应用程序 URL。{{ YOUR_KEY_VAULTS_SECRET }}
:替换为实际的 Key Vaults Secret。{{ YOUR_NEXT_AUTH_SECRET }}
:替换为实际的 Next Auth Secret。{{ YOUR_NEXTAUTH_URL }}
:替换为实际的 NextAuth URL。{{ YOUR_AUTH_LOGTO_ID }}
:替换为实际的 Logto ID。{{ YOUR_AUTH_LOGTO_SECRET }}
:替换为实际的 Logto Secret。{{ YOUR_AUTH_LOGTO_ISSUER }}
:替换为实际的 Logto Issuer。{{ YOUR_LOGTO_WEBHOOK_SIGNING_KEY }}
:替换为实际的 Logto Webhook Signing Key。选择硅基流动API(避免deepseek的API服务器性能瓶颈,还会额外赠送14元的体验金):
https://cloud.siliconflow.cn/i/frVugSQo
硅基流动API接入文档:
Logto 是一个基于 Kubernetes 部署的身份认证和用户管理平台。它使用 PostgreSQL 作为数据库,并通过 Ingress 暴露服务,支持 HTTPS 访问。Logto 提供了两个主要服务:
3001
。3002
。在开始部署之前,请确保以下环境和工具已准备就绪:
logto.jiufog.space
和 logto-admin.jiufog.space
)和对应的 TLS 证书。强制ReadWriteMany
访问模式的存储类(如 NFS)。将以下 Manifest 文件保存到本地,并根据实际情况替换 {{ }}
中的内容。
logto-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: logto
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
app: logto
template:
metadata:
labels:
app: logto
spec:
initContainers:
- image: ghcr.io/logto-io/logto
command:
- /bin/sh
args:
- '-c'
- 'npm run cli connector add -- --official'
name: init
volumeMounts:
- name: logto-storage
mountPath: /etc/logto/packages/core/connectors
env:
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "1"
- name: TRUST_PROXY_HEADER
value: "true"
- name: DB_URL
value: "{{ YOUR_DB_URL }}" # 替换为实际的 PostgreSQL 连接字符串
- name: ENDPOINT
value: "{{ YOUR_ENDPOINT }}" # 替换为实际的 Auth 服务端点
- name: ADMIN_ENDPOINT
value: "{{ YOUR_ADMIN_ENDPOINT }}" # 替换为实际的 Admin 服务端点
containers:
- name: logto
image: ghcr.io/logto-io/logto
volumeMounts:
- name: logto-storage
mountPath: /etc/logto/packages/core/connectors
env:
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "1"
- name: TRUST_PROXY_HEADER
value: "true"
- name: DB_URL
value: "{{ YOUR_DB_URL }}" # 替换为实际的 PostgreSQL 连接字符串
- name: ENDPOINT
value: "{{ YOUR_ENDPOINT }}" # 替换为实际的 Auth 服务端点
- name: ADMIN_ENDPOINT
value: "{{ YOUR_ADMIN_ENDPOINT }}" # 替换为实际的 Admin 服务端点
command: ["sh", "-c", "npm run cli db seed -- --swe && npm start"]
volumes:
- name: logto-storage
persistentVolumeClaim:
claimName: logto-pvc
logto-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: logto-service
namespace: dev
labels:
app: logto
spec:
selector:
app: logto
ports:
- name: auth
port: 3001
- name: admin
port: 3002
type: ClusterIP
logto-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: logto-pvc
namespace: dev
spec:
storageClassName: "{{ YOUR_STORAGE_CLASS }}" # 替换为实际的存储类名称
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
logto-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logto-ingress
namespace: dev
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: "{{ YOUR_AUTH_HOST }}" # 替换为实际的 Auth 服务域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: logto-service
port:
number: 3001
tls:
- hosts:
- "{{ YOUR_AUTH_HOST }}" # 替换为实际的 Auth 服务域名
secretName: "{{ YOUR_AUTH_TLS_SECRET }}" # 替换为实际的 TLS Secret 名称
logto-admin-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logto-ingress-admin
namespace: dev
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: "{{ YOUR_ADMIN_HOST }}" # 替换为实际的 Admin 服务域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: logto-service
port:
number: 3002
tls:
- hosts:
- "{{ YOUR_ADMIN_HOST }}" # 替换为实际的 Admin 服务域名
secretName: "{{ YOUR_ADMIN_TLS_SECRET }}" # 替换为实际的 TLS Secret 名称
如果 dev
Namespace 尚未创建,请先创建:
kubectl create namespace dev
使用 kubectl
部署应用程序:
kubectl apply -f logto-deployment.yaml
kubectl apply -f logto-svc.yaml
kubectl apply -f logto-pvc.yaml
kubectl apply -f logto-ingress.yaml
kubectl apply -f logto-admin-ingress.yaml
检查 Deployment、Service、PVC 和 Ingress 是否成功创建:
kubectl -n dev get deployment,service,pvc,ingress
确保 服务 已成功启动并处于 Running
状态:
kubectl -n dev get pods,svc,ingress
通过浏览器访问以下地址,确保应用程序能够正常加载:
https://{{ YOUR_AUTH_HOST }}
https://{{ YOUR_ADMIN_HOST }}
如果应用程序无法正常工作,可以查看 Pod 日志以排查问题:
kubectl -n dev logs <pod-name>
根据需要调整 Deployment 的副本数:
kubectl -n dev scale deployment logto --replicas=3
如果需要更新应用程序镜像,可以修改 image
字段并重新应用 Manifest:
kubectl apply -f logto-deployment.yaml
建议配置 Prometheus 和 Grafana 对应用程序进行监控,重点关注以下指标:
kubectl logs
查看 Pod 日志以获取更多信息。DB_URL
的值正确。以下是需要用户根据实际情况替换的内容:
{{ YOUR_DB_URL }}
:替换为实际的 PostgreSQL 连接字符串。{{ YOUR_ENDPOINT }}
:替换为实际的 Auth 服务端点。{{ YOUR_ADMIN_ENDPOINT }}
:替换为实际的 Admin 服务端点。{{ YOUR_STORAGE_CLASS }}
:替换为实际的存储类名称。{{ YOUR_AUTH_HOST }}
:替换为实际的 Auth 服务域名。{{ YOUR_ADMIN_HOST }}
:替换为实际的 Admin 服务域名。{{ YOUR_AUTH_TLS_SECRET }}
:替换为实际的 Auth 服务 TLS 证书 Secret 名称。{{ YOUR_ADMIN_TLS_SECRET }}
:替换为实际的 Admin 服务 TLS 证书 Secret 名称。效果图:
PostgreSQL 是一个功能强大的开源关系型数据库,支持扩展插件。pgvector 是一个用于向量相似性搜索的 PostgreSQL 插件,适用于机器学习、推荐系统等场景。本文档描述了如何在 Kubernetes 中部署带有 pgvector 插件的 PostgreSQL 数据库。
在开始部署之前,请确保以下环境和工具已准备就绪:
ReadWriteOnce
或 ReadWriteMany
访问模式的存储类。将以下 Manifest 文件保存到本地,并根据实际情况替换 {{ }}
中的内容。
pgsql.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgresql-deployment
namespace: dev
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: postgresql
template:
metadata:
labels:
app: postgresql
spec:
containers:
- image: pgvector/pgvector:pg16 # 使用 pgvector 插件的 PostgreSQL 镜像
name: postgresql
env:
- name: POSTGRES_PASSWORD
value: "{{ YOUR_POSTGRES_PASSWORD }}" # 替换为实际的 PostgreSQL 密码
ports:
- containerPort: 5432
name: postgresql
volumeMounts:
- name: postgresql-persistent-storage
mountPath: /var/lib/postgresql/data
volumes:
- name: postgresql-persistent-storage
persistentVolumeClaim:
claimName: postgresql-pv-claim
---
apiVersion: v1
kind: Service
metadata:
name: postgresql-client-service
namespace: dev
labels:
app: postgresql
spec:
type: ClusterIP
ports:
- port: 5432
targetPort: 5432
protocol: TCP
selector:
app: postgresql
pgsql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgresql-pv-claim
namespace: dev
spec:
storageClassName: "{{ YOUR_STORAGE_CLASS }}" # 替换为实际的存储类名称
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi # 根据实际需求调整存储大小
如果 dev
Namespace 尚未创建,请先创建:
kubectl create namespace dev
使用 kubectl
部署 PostgreSQL:
kubectl apply -f pgsql.yaml
kubectl apply -f pgsql-pvc.yaml
检查 Deployment、Service 和 PVC 是否成功创建:
kubectl -n dev get deployment,service,pvc
确保 Pod 已成功启动并处于 Running
状态:
kubectl -n dev get pods
使用以下命令连接到 PostgreSQL 数据库:
kubectl -n dev exec -it <postgresql-pod-name> -- psql -U postgres
在 PostgreSQL 中验证 pgvector 插件是否已启用:
CREATE EXTENSION vector;
SELECT * FROM pg_extension WHERE extname = 'vector';
根据需要调整 Deployment 的副本数:
kubectl -n dev scale deployment postgresql-deployment --replicas=2
建议定期备份 PostgreSQL 数据。可以使用以下命令进行备份:
kubectl -n dev exec -it <postgresql-pod-name> -- pg_dumpall -U postgres > backup.sql
建议配置 Prometheus 和 Grafana 对 PostgreSQL 进行监控,重点关注以下指标:
kubectl logs
查看 Pod 日志以获取更多信息。CREATE EXTENSION vector;
以下是需要用户根据实际情况替换的内容:
{{ YOUR_POSTGRES_PASSWORD }}
:替换为实际的 PostgreSQL 密码。{{ YOUR_STORAGE_CLASS }}
:替换为实际的存储类名称。串行:业务中需要发请求,必须等请求响应过来了,才可以进行下一步的代码操作
消息传递:服务与服务之间 通过消息发送数据来通信,而不是互相调用
排队:指得是应用程序通过队列来通信
异步处理 A B C三个请求,B C不会影响A请求
解耦服务 避免每个服务之间的冗余联系
流量削峰 避免流量堆积
1:erlang
2:socat
3:RabbitMQ
31 cd /usr/lib/rabbitmq/
32 ls
33 cd bin/
34 ls
35 rabbitmq-plugins enable rabbitmq_management
36 systemctl restart rabbitmq-server
37 systemctl start rabbitmq-server
38 systemctl status rabbitmq-server
15672 web页面
25672 集群接口
5672 接受命令的端口
[root@localhost bin]# netstat -anp|grep beam
tcp 0 0 0.0.0.0:15672 0.0.0.0:* LISTEN 20728/beam.smp
tcp 0 0 0.0.0.0:25672 0.0.0.0:* LISTEN 20728/beam.smp
tcp 0 0 127.0.0.1:59040 127.0.0.1:4369 ESTABLISHED 20728/beam.smp
tcp6 0 0 :::5672 :::* LISTEN 20728/beam.smp
unix 3 [ ] STREAM CONNECTED 43996 20728/beam.smp
unix 3 [ ] STREAM CONNECTED 49040 20728/beam.smp
[root@localhost bin]#
[root@localhost ~]# rabbitmqctl add_user admin admin #添加用户
Adding user "admin" ...
[root@localhost ~]# rabbitmqctl set_user_tags admin administrator #绑定权限
Setting tags for user "admin" to [administrator] ...
[root@localhost ~]# rabbitmqctl change_password admin 123456 #修改密码
Changing password for user "admin" ...
Connections:客户端和RabbitMQ所建立的连接
Channels:通道 传输数据
Exchanges:交换机 将数据转发到队列中
Queues:队列
Consumers:消费者
**Broker:**接收和分发消息的应用,RabbitMQ Server就是 Message Broker
**Virtual host:**出于多租户和安全因素设计的,把 AMQP 的基本组件划分到一个虚拟的分组中,类似于网络中的 namespace 概念。当多个不同的用户使用同一个 RabbitMQ server 提供的服务时,可以划分出多个vhost,每个用户在自己的 vhost 创建 exchange/queue 等
**Connection:**publisher/consumer 和 broker 之间的 TCP 连接
**Channel:**如果每一次访问 RabbitMQ 都建立一个 Connection,在消息量大的时候建立 TCP Connection的开销将是巨大的,效率也较低。Channel 是在 connection 内部建立的逻辑连接,如果应用程序支持多线程,通常每个thread创建单独的 channel 进行通讯,AMQP method 包含了channel id 帮助客户端和message broker 识别 channel,所以 channel 之间是完全隔离的。Channel 作为轻量级的 Connection 极大减少了操作系统建立 TCP connection 的开销
**Exchange:**message 到达 broker 的第一站,根据分发规则,匹配查询表中的 routing key,分发消息到queue 中去。常用的类型有:direct (point-to-point), topic (publish-subscribe) and fanout (multicast)
**Queue:**消息最终被送到这里等待 consumer 取走
**Binding:**exchange 和 queue 之间的虚拟连接,binding 中可以包含 routing key。Binding 信息被保存到 exchange 中的查询表中,用于 message 的分发依据
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.example</groupId>
<artifactId>demo02-mq</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>demo02-mq</name>
<description>demo02-mq</description>
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<spring-boot.version>2.3.6.RELEASE</spring-boot.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-amqp</artifactId>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-dependencies</artifactId>
<version>${spring-boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<version>${spring-boot.version}</version>
<configuration>
<mainClass>com.example.demo02.mq.Demo02MqApplication</mainClass>
<skip>true</skip>
</configuration>
<executions>
<execution>
<id>repackage</id>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
package com.example.demo02.mq.util;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.ConnectionFactory;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
/**
* @author Allen
* 4/10/2024 7:25 PM
* @version 1.0
* @description: MQ连接工具类
*
*/
public class ConnectionUtils {
//为什么使用静态代码块初始化连接工厂?
//因为连接工厂只需要初始化一次,所以使用静态代码块初始化
private static ConnectionFactory connectionFactory;
static {
// 创建连接工厂
connectionFactory = new ConnectionFactory();
//mq服务主机地址
connectionFactory.setHost("ningbo-3689d402.of-7af93c01.shop");
//连接端口
connectionFactory.setPort(40991);
connectionFactory.setVirtualHost("/my240410");
//设置用户名
connectionFactory.setUsername("allen");
//设置密码
connectionFactory.setPassword("123456");
}
public static Connection getConnection() {
try {
//返回连接 通过工厂获取连接
return connectionFactory.newConnection();
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
}
@SpringBootTest
class Demo02MqApplicationTests {
@Test
void contextLoads() {
Connection connection = ConnectionUtils.getConnection();
System.out.println(connection);
}
}
结果:
amqp://allen@110.42.14.112:40991//my240410
* 操作MQ
* 生产者:发送消息
* 消费者:消费信息
*
* 五大消息模型:
* 1、simple消息模型
* 一个生产者、一个消费者、一个队列
* 2、work消息模型
* 一个生产者、多个消费者、一个队列
* 能者多劳:消费者性能高就可以多消费一些信息
* 3、fanout消息模型
* 一个生产者、一个交换机、多个队列、多个消费者
* 怎么实现一个生产者发送消息给多个消费者?
* 生产者发送消息给交换机,交换机发送消息给队列,队列发送消息给消费者
* fanout:广播的意思,发送消息给所有绑定的队列
* 4、direct消息模型
* 一个生产者、一个交换机、多个队列、多个消费者 (队列绑定交换机时需要指定routingKey)
* 5、topic消息模型
* 一个生产者/多个生产者、一个交换机、多个队列、多个消费者 (队列绑定交换机时使用的routingKey是通配符)
* *:通配一级任意多个字符
* #:通配多级任意多个字符
* 例如:user.*:匹配user.add、user.delete
* user.#:匹配user.add、user.delete、user.add.delete
package com.example.demo02.mq.simple;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import java.io.IOException;
/**
* @author Allen
* 4/10/2024 8:07 PM
* @version 1.0
* @description: 简单模型-生产者
*/
public class SimpleSender {
public static void main(String[] args) throws Exception {
// 1、创建连接
Connection connection = ConnectionUtils.getConnection();
// 2、创建通道
Channel channel = connection.createChannel();
// 3、声明队列:队列可以缓存数据、mq的交换机不能存储数据 simple模型使用的是默认交换机
// 参数1:队列名称 参数2:是否持久化 参数3:是否排他性 参数4:是否自动删除 参数5:其他属性
channel.queueDeclare("simple_queue", false, false, false, null);
// 4、发送消息:发送到队列中
// 在simple模型中 参数1:交换机 参数2:队列名称 参数3:传递消息额外设置 参数4:消息的具体内容(任意数据类型)
String msg = "hello rabbitmq";
channel.basicPublish("", "simple_queue", null, msg.getBytes());
// 5、关闭通道
channel.close();
// 6、关闭连接
connection.close();
}
}
package com.example.demo02.mq.simple;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/10/2024 8:59 PM
* @version 1.0
* @description: 简单模型-消费者
*/
public class SimpleReciver {
public static void main(String[] args) throws Exception {
// 1、创建连接
Connection connection = ConnectionUtils.getConnection();
// 2、创建通道
Channel channel = connection.createChannel();
// 3、声明队列:如果已声明可以忽略
// 4、监听队列:
channel.queueDeclare("simple_queue", false, false, false, null);
Consumer consumer = new DefaultConsumer(channel){
// 消息的具体处理逻辑
// 消费者接收到消息后调用此方法
// 如果AutoAck为true,此方法一旦被调用,消息会被确认
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
System.out.println("消费者接收到消息:"+new String(body));
/*
channel.basicReject(); //拒绝消息,可以丢弃消息,也可以重新入队
channel.basicNack(); //不确认消息,可以设置是否重新入队
*/
channel.basicAck(envelope.getDeliveryTag(),false); //手动确认消息,参数1:消息的标识,参数2:是否开启多个消息同时确认
}
};
/* 参数1:队列名称 参数2:是否自动确认 参数3:消费者
队列名称:消费哪个队列的消息
AutoAck:是否自动确认,如果为true,表示消息一旦被接收,自动向mq回复接收到,mq会删除消息,如果为false,需要手动确认,如果消费者挂掉,消息会被重回队列
Consumer:消费者对象
channel.basicConsume("simple_queue", true,consumer);
避免消息丢失,手动确认
*/
channel.basicConsume("simple_queue", false,consumer);
}
}
Unacked : 消费者用了、但是没有告诉交换机、就是未确认的状态
当消息被消费了,且告诉交换机了,交换机就会将这个消息删除
* work模型:
* 多个消费者消费同一个队列中的消息,每个消费者获取到的消息唯一,且只能消费一次
* 作用:提高消息的消费速度,避免消息的堆积
* 默认采用轮询的方式分发消息
* 如果某个消费者处理消息慢,会导致消息堆积
package com.example.demo02.mq.work;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/**
* @author Allen
* 4/10/2024 9:37 PM
* @version 1.0
* @description: work模式发送者
*
* work模型:
* 多个消费者消费同一个队列中的消息,每个消费者获取到的消息唯一,且只能消费一次
* 作用:提高消息的消费速度,避免消息的堆积
* 默认采用轮询的方式分发消息
* 如果某个消费者处理消息慢,会导致消息堆积
*/
public class WorkSender {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明队列
// 参数1:队列名称 参数2:是否持久化 参数3:是否排他性 参数4:是否自动删除 参数5:队列的属性
channel.queueDeclare("work.queue", false, false, false, null);
// 4:发送100条消息
for (int i = 0; i < 100; i++) {
String msg = "work模式消息" + i;
//休眠i*5毫秒
TimeUnit.MILLISECONDS.sleep(i * 5);
// 参数1:交换机名称 参数2:队列名称 参数3:消息的其他属性 参数4:消息的内容
channel.basicPublish("", "work.queue", null, msg.getBytes());
System.out.println("work模式发送消息:" + msg);
}
// 5:关闭通道
channel.close();
// 6:关闭连接
connection.close();
}
}
(能者多劳角色)
package com.example.demo02.mq.work;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/10/2024 9:37 PM
* @version 1.0
* @description: work模式消费者1号
*/
public class WorkReciver1 {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明队列
// 参数1:队列名称 参数2:是否持久化 参数3:是否排他性 参数4:是否自动删除 参数5:队列的属性
channel.queueDeclare("work.queue", false, false, false, null);
// 4:定义消费者,消费消息
// 参数1:队列名称 参数2:是否自动确认消息 参数3:消费者对象
Consumer consumer = new DefaultConsumer(channel) {
// 消费者接收消息调用此方法
// 参数1:消费者标签 参数2:队列参数 参数3:消息属性 参数4:消息内容
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
// 获取消息
String msg = new String(body);
System.out.println("work模式消费者1号接收消息:" + msg);
channel.basicAck(envelope.getDeliveryTag(), false);
}
};
channel.basicConsume("work.queue", false, consumer);
}
}
(消费能力差)
package com.example.demo02.mq.work;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/**
* @author Allen
* 4/10/2024 9:37 PM
* @version 1.0
* @description: work模式消费者1号
*/
public class WorkReciver2 {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明队列
// 参数1:队列名称 参数2:是否持久化 参数3:是否排他性 参数4:是否自动删除 参数5:队列的属性
channel.queueDeclare("work.queue", false, false, false, null);
//如果此消费者性能较差,配置能者多劳:指定一次获取几条信息,消息消费成功后 ack之后 mq才会发送下一条消息
channel.basicQos(1);
// 4:定义消费者,消费消息
// 参数1:队列名称 参数2:是否自动确认消息 参数3:消费者对象
Consumer consumer = new DefaultConsumer(channel) {
// 消费者接收消息调用此方法
// 参数1:消费者标签 参数2:队列参数 参数3:消息属性 参数4:消息内容
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
//模拟二号消费者处理消息慢
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
// 获取消息:执行业务
String msg = new String(body);
System.out.println("work模式消费者2号接收消息:" + msg);
channel.basicAck(envelope.getDeliveryTag(), false);
}
};
// 参数1:队列名称 参数2:ACK是否自动确认 参数3:消费者对象
//必须手动确认消息,否则会报406错误
channel.basicConsume("work.queue", false, consumer);
}
}
能者多劳
* 广播模型:
* 一个交换机绑定多个队列
* 每个队列都有一个消费者
* 每个消费者消费自己队列中的消息,每个队列的信息是一样的
package com.example.demo02.mq.fanout;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.BuiltinExchangeType;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 8:24 AM
* @version 1.0
* @description: 广播模型发送者
*
* 广播模型:
* 一个交换机绑定多个队列
* 每个队列都有一个消费者
* 每个消费者消费自己队列中的消息,每个队列的信息是一样的
*/
public class FanoutSender {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机
// 参数1:交换机名称 参数2:交换机类型 (fanout direct topic) 参数3:是否持久化
/*
fanout:广播模式
绑定了这个交换机的队列都会收到消息
direct:路由模式
通过路由键完全匹配的队列会收到消息
topic:通配符模式
通过通配符匹配的队列会收到消息
*/
channel.exchangeDeclare("fanout.exchange", BuiltinExchangeType.FANOUT,false);
// 交换机不会存储消息,只是负责消息的转发,如果没有队列绑定到交换机上,消息会丢失
// 4:发送消息到交换机:需要消费信息的消费者自己声明自己的队列绑定到当前交换机上
String msg = "fanout message";
channel.basicPublish("fanout.exchange", "", null, msg.getBytes());
// 5:关闭通道
channel.close();
// 6:关闭连接
connection.close();
}
}
package com.example.demo02.mq.fanout;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 8:55 AM
* @version 1.0
* @description: 广播模型接收者
*/
public class FanoutReceiver1 {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机
//为什么消费者也得声明交换机?如果消费者先启动,那么交换机还没有声明,消费者就会报错,所以消费者也得声明交换机
// 参数1:交换机名称 参数2:交换机类型 参数3:是否持久化
channel.exchangeDeclare("fanout.exchange", BuiltinExchangeType.FANOUT,false);
// 4:声明队列
// 参数1:队列名称 参数2:是否持久化 参数3:是否排他性 参数4:是否自动删除 参数5:其他参数
channel.queueDeclare("fanout.queue1", false, false, false, null);
// 5:绑定自己的队列到交换机
channel.queueBind("fanout.queue1", "fanout.exchange", "");
// 6:消费消息
Consumer consumer = new DefaultConsumer(channel){
@Override
// 参数1:消费者标签 参数2:消息传递参数 参数3: 参数4:消息内容
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
// 消费消息
System.out.println("Fanout1接收到的消息是:" + new String(body));
// 手动确认消息
channel.basicAck(envelope.getDeliveryTag(),false);
}
};
channel.basicConsume("fanout.queue1",false,consumer);
}
}
package com.example.demo02.mq.fanout;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 8:55 AM
* @version 1.0
* @description: 广播模型接收者
*/
public class FanoutReceiver2 {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机
//为什么消费者也得声明交换机?如果消费者先启动,那么交换机还没有声明,消费者就会报错,所以消费者也得声明交换机
channel.exchangeDeclare("fanout.exchange", BuiltinExchangeType.FANOUT,false);
// 4:声明队列
// 参数1:队列名称 参数2:是否持久化 参数3:是否排他性 参数4:是否自动删除 参数5:其他参数
channel.queueDeclare("fanout.queue2", false, false, false, null);
// 5:绑定队列到交换机
channel.queueBind("fanout.queue2", "fanout.exchange", "");
// 6:消费消息
Consumer consumer = new DefaultConsumer(channel){
@Override
// 参数1:消费者标签 参数2:消息传递参数 参数3: 参数4:消息内容
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
// 消费消息
System.out.println("Fanout2接收到的消息是:" + new String(body));
// 手动确认消息
channel.basicAck(envelope.getDeliveryTag(),false);
}
};
channel.basicConsume("fanout.queue2",false,consumer);
}
}
当消息很多的时候,需要指定的路由键也会很多,究极复杂。
###### 生产者
package com.example.demo02.mq.direct;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.BuiltinExchangeType;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 9:30 AM
* @version 1.0
* @description: 路由模型发送者
*
* 路由模型:
* 一个交换机可以绑定多个队列
* 生产者给交换机发送消息时,需要指定消息的路由键
* 消费者绑定队列到交换机时,需要指定所需要消费的信息的路由键
* 交换机会根据消息的路由键将消息转发到对应的队列
* 缺点:
* 当消息很多的时候,需要指定的路由键也会很多,究极复杂。
*/
public class DirectSender {
public static void main(String[] args) throws Exception {
// 1:创建连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机 参数1:交换机名称 参数2:交换机类型 参数3:是否持久化
channel.exchangeDeclare("direct.exchange", BuiltinExchangeType.DIRECT, false);
// 6:发送消息
String msg1 = "{To DirectReceiver1: orderId:1001}";
String msg2 = "{To DirectReceiver2: orderId:1002}";
// 参数1:交换机 参数2:路由键(与消费者相匹配) 参数3:其他参数 参数4:消息内容
channel.basicPublish("direct.exchange","order.save",null,msg1.getBytes());
channel.basicPublish("direct.exchange","order.update",null,msg2.getBytes());
// 7:关闭通道
channel.close();
// 8:关闭连接
connection.close();
}
}
package com.example.demo02.mq.direct;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 9:44 AM
* @version 1.0
* @description: 路由模型接收者1
*/
public class DirectReceiver1 {
public static void main(String[] args) throws Exception {
Connection connection = ConnectionUtils.getConnection();
Channel channel = connection.createChannel();
channel.exchangeDeclare("direct.exchange", BuiltinExchangeType.DIRECT, false);
channel.queueDeclare("direct.queue1", false, false, false, null);
channel.queueBind("direct.queue1","direct.exchange","order.save");
Consumer consumer = new DefaultConsumer(channel){
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
System.out.println("DirectReceiver1接收到的新增订单消息是:" + new String(body));
channel.basicAck(envelope.getDeliveryTag(),false);
}
};
channel.basicConsume("direct.queue1",false,consumer);
}
}
package com.example.demo02.mq.direct;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 9:44 AM
* @version 1.0
* @description: 路由模型接收者2
*/
public class DirectReceiver2 {
public static void main(String[] args) throws Exception {
Connection connection = ConnectionUtils.getConnection();
Channel channel = connection.createChannel();
channel.exchangeDeclare("direct.exchange", BuiltinExchangeType.DIRECT, false);
channel.queueDeclare("direct.queue2", false, false, false, null);
channel.queueBind("direct.queue2","direct.exchange","order.update");
Consumer consumer = new DefaultConsumer(channel){
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
System.out.println("DirectReceiver2接收到的修改订单消息:" + new String(body));
channel.basicAck(envelope.getDeliveryTag(),false);
}
};
channel.basicConsume("direct.queue2",false,consumer);
}
}
* 通配符模型
* 生产者必须指定完整且准确的路由key
* 消费者可以使用通配符
* *:可以替代一级的任意字符 add.* ==> add.user add.goods
* #:可以替代多级的任意字符 add.# ==> add.user.name add.user.name.firstName
package com.example.demo02.mq.topic;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.BuiltinExchangeType;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 10:15 AM
* @version 1.0
* @description: 通配符模型发送者
* 通配符模型
* 生产者必须指定完整且准确的路由key
* 消费者可以使用通配符
* *:可以替代一级的任意字符 add.* ==> add.user add.goods
* #:可以替代多级的任意字符 add.# ==> add.user.name add.user.name.firstName
*/
public class TopicSender {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机
channel.exchangeDeclare("topic.exchange", BuiltinExchangeType.TOPIC,false);
// 4:发送消息 路由Key写法 goods.add 不要使用通配符
String msg1 = "商品新增了,Topic模型,routing key 为 goods.add";
String msg2 = "商品修改了,Topic模型,routing key 为 goods.update";
String msg3 = "商品删除了,Topic模型,routing key 为 goods.delete";
String msg4 = "用户新增了,Topic模型,routing key 为 user.add";
String msg5 = "用户修改了,Topic模型,routing key 为 user.update";
String msg6 = "用户删除了,Topic模型,routing key 为 user.delete";
String msg7 = "添加了用户名字,Topic模型,routing key 为 user.add.name";
String msg8 = "添加了用户年龄,Topic模型,routing key 为 user.add.age";
String msg9 = "修改了用户名字,Topic模型,routing key 为 user.update.name";
String msg10 = "修改了用户年龄,Topic模型,routing key 为 user.update.age";
channel.basicPublish("topic.exchange","goods.add",null,msg1.getBytes());
channel.basicPublish("topic.exchange","goods.update",null,msg2.getBytes());
channel.basicPublish("topic.exchange","goods.delete",null,msg3.getBytes());
channel.basicPublish("topic.exchange","user.add",null,msg4.getBytes());
channel.basicPublish("topic.exchange","user.update",null,msg5.getBytes());
channel.basicPublish("topic.exchange","user.delete",null,msg6.getBytes());
channel.basicPublish("topic.exchange","user.add.name",null,msg7.getBytes());
channel.basicPublish("topic.exchange","user.add.age",null,msg8.getBytes());
channel.basicPublish("topic.exchange","user.update.name",null,msg9.getBytes());
channel.basicPublish("topic.exchange","user.update.age",null,msg10.getBytes());
// 5:关闭连接
channel.close();
connection.close();
}
}
package com.example.demo02.mq.topic;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 10:22 AM
* @version 1.0
*
* @description: 通配符模型接收者1
*/
public class TopicReceiver1 {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机
channel.exchangeDeclare("topic.exchange", BuiltinExchangeType.TOPIC,false);
// 4:声明队列
channel.queueDeclare("topic.queue1", false, false, false, null);
// 5:绑定队列到交换机 使用通配符* 一级任意字符 # 多级任意字符
channel.queueBind("topic.queue1", "topic.exchange", "goods.*");
// 6:消费消息
Consumer consumer = new DefaultConsumer(channel){
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
System.out.println("商品模块接收到的消息是:" + new String(body));
channel.basicAck(envelope.getDeliveryTag(),false);
}
};
channel.basicConsume("topic.queue1",false,consumer);
}
}
package com.example.demo02.mq.topic;
import com.example.demo02.mq.util.ConnectionUtils;
import com.rabbitmq.client.*;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 10:22 AM
* @version 1.0
*
* @description: 通配符模型接收者2
*/
public class TopicReceiver2 {
public static void main(String[] args) throws Exception {
// 1:获取连接
Connection connection = ConnectionUtils.getConnection();
// 2:创建通道
Channel channel = connection.createChannel();
// 3:声明交换机
channel.exchangeDeclare("topic.exchange", BuiltinExchangeType.TOPIC,false);
// 4:声明队列
channel.queueDeclare("topic.queue2", false, false, false, null);
// 5:绑定队列到交换机 使用通配符 user.* user.#
channel.queueBind("topic.queue2", "topic.exchange", "user.#");
// 6:消费消息
Consumer consumer = new DefaultConsumer(channel){
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
System.out.println("用户模块接收到的消息是:" + new String(body));
channel.basicAck(envelope.getDeliveryTag(),false);
}
};
channel.basicConsume("topic.queue2",false,consumer);
}
}
会造成的问题:
生产者发送消息失败导致丢失:
1:没有发送到mq的交换机(交换机不存在、网络通信失败)
2:交换机没有绑定队列(交换机会丢弃信息)
解决:生产者确认回调:
rabbitTemplate可以设置 生产者确认回调
1、消息是否到达交换机回调
2、消息没有到达队列的回调
mq宕机导致丢失:
默认mq 交换机队列 消息没有持久化,mq非法关闭的时候,数据会丢失
声明队列交换机发送信息时都配置持久化
消费者消费消息导致丢失 手动ACK
交换机
队列
都需要持久化
使用了持久化的交换机和消息,宕机之后不会被清除
对交换机发送消息,需要指定持久化数据类型
生产者
// 参数1:交换机名称 参数2:交换机类型 参数3:是否持久化
channel.exchangeDeclare("topic.exchange", BuiltinExchangeType.TOPIC,true);
//参数1:交换机名称 参数2:路由key 参数3:消息持久化的数据类型配置 参数4:消息内容
channel.basicPublish("topic.exchange","goods.add", MessageProperties.PERSISTENT_TEXT_PLAIN,msg1.getBytes());
消费者
// 3:声明交换机
channel.exchangeDeclare("topic.exchange", BuiltinExchangeType.TOPIC,true);
使用了持久化的队列 可以保存住队列里面的消息
pom文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.allen</groupId>
<artifactId>demo03-order-provider</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>demo03-order-provider</name>
<description>demo03-order-provider</description>
<properties>
<java.version>1.8</java.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<spring-boot.version>2.3.6.RELEASE</spring-boot.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-amqp</artifactId>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-dependencies</artifactId>
<version>${spring-boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<version>${spring-boot.version}</version>
<configuration>
<mainClass>com.allen.demo03.order.Demo03OrderProviderApplication</mainClass>
<skip>true</skip>
</configuration>
<executions>
<execution>
<id>repackage</id>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
application.yaml文件
#端口号
server:
port: 8110
spring:
application:
name: order-service
#虚拟机重启之后,rabbitmq服务需要重启 systemctl restart rabbitmq-server
#rabbitmq配置
rabbitmq:
host: ningbo-3689d402.of-7af93c01.shop
port: 40991
username: admin
password: admin
virtual-host: /my240410
#配置mq生产者确认回调 默认是false
publisher-returns: true
#correlated: 使用额外的线程池的线程来调用回调方法
#simple: 使用发布者的线程池的线程来调用回调方法
publisher-confirm-type: correlated
死信交换机
死信队列
死信路由key
* 配置死信交换机、死信队列、绑定关系
* 解释死信队列:
* 1、死信队列:当消息被拒绝、消息过期、队列达到最大长度时,消息会进入死信队列
* 2、死信交换机:死信队列绑定的交换机
* 3、死信路由key:消息进入死信队列时,会携带一个路由key
* 4、死信队列绑定死信交换机
* 5、业务队列绑定死信交换机
* 6、业务队列绑定业务交换机
@Test
void contextLoads() {
rabbitTemplate.convertAndSend("business.exchange",
"stock.bussiness",
"我是测试死信队列,所发送到business的消息!!!");
}
配置发送消息到交换机、队列的回调
也可以配置交换机和队列
package com.allen.demo03.order.config;
import org.springframework.amqp.core.*;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import javax.annotation.Resource;
/**
* @author Allen
* 4/11/2024 2:08 PM
* @version 1.0
*/
@Configuration
public class RabbitmqConfig implements InitializingBean {
@Resource
RabbitTemplate rabbitTemplate;
//spring管理bean时提供的生命周期方法,可以在当前类对象属性值初始化之后 容器调用afterPropertiesSet方法
@Override
public void afterPropertiesSet() throws Exception {
//确认消息是否到达交换机的回调
rabbitTemplate.setConfirmCallback((correlationData, ack, cause) -> {
if(!ack){
System.out.println("消息没有到达交换机");
System.out.println("消息是否到达交换机:"+ack);
System.out.println(ack?"":"未到达原因:"+cause);
System.out.println("消息:"+correlationData);
}
});
//消息没有到达队列的回调
rabbitTemplate.setReturnCallback((Message message, int replyCode, String replyText, String exchange, String routingKey)->{
System.out.println("消息没有到达队列");
System.out.println("消息:"+message);
System.out.println("应答码:"+replyCode);
System.out.println("原因:"+replyText);
System.out.println("交换机:"+exchange);
System.out.println("路由键:"+routingKey);
});
}
//创建交换机对象添加到spring容器中
@Bean
public Exchange payExchange(){
return ExchangeBuilder.topicExchange("pay.exchange")
.ignoreDeclarationExceptions() // 忽略声明异常 避免异常导致程序宕机
.durable(true) // 持久化
.build();
}
//创建队列对象添加到spring容器中
@Bean
public Queue payQueue(){
return QueueBuilder.durable("pay.queue")
.build();
}
//创建绑定对象添加到spring容器中
@Bean
public Binding payBinding(Queue payQueue, Exchange payExchange){
return BindingBuilder.bind(payQueue()) // 绑定队列
.to(payExchange()) // 到交换机
.with("pay.*") // 路由键
.noargs(); // 参数
}
}
package com.example.demo04.stock.consumer.listener;
import com.rabbitmq.client.Channel;
import org.springframework.amqp.core.Message;
import org.springframework.amqp.rabbit.annotation.Exchange;
import org.springframework.amqp.rabbit.annotation.Queue;
import org.springframework.amqp.rabbit.annotation.QueueBinding;
import org.springframework.amqp.rabbit.annotation.RabbitListener;
import org.springframework.stereotype.Component;
import java.io.IOException;
/**
* @author Allen
* 4/11/2024 9:30 PM
* @version 1.0
* @description: 消费者:库存监听器,监听库存消息,进行消费
*/
@Component
public class StockListener {
//监听订单支付成功的消息
//RabbitListener标注的方法,就是一个消费者;会自动监听指定的队列,当有消息到达时,会自动调用该方法进行消费
/* @RabbitListener(
//如果消费的队列其他地方已经创建,可以直接使用队列名称绑定队列进行消费
queues = {"pay.queue"}
)
public void test(String orderToken){
System.out.println("stock服务接收到订单支付成功的消息:"+orderToken);
}*/
/*
springboot整合mq:
消费者默认消息确认方式:
出现异常,消息会重回队列,再次尝试重新消费
如果一直出现异常 会出现死循环消费
*/
//库存服务自定义队列绑定到订单服务的交换机上,进行消费
@RabbitListener(
//如果消费的队列其他地方已经创建,可以直接使用队列名称绑定队列进行消费
bindings = @QueueBinding(
value = @Queue(name = "stock.pay.queue", durable = "true"), //创建队列
//@Exchange:绑定交换机;如果交换机已存在,新创建的交换机必须和已存在的交换机配置一致
exchange = @Exchange(name = "pay.exchange", type = "topic",ignoreDeclarationExceptions = "true"), //绑定交换机
key = {"pay.#"} //绑定路由key
)
)
//参数1:消息体内容 参数2:消息对象 参数3:消息通道
public void paySuccess(String orderToken, Message message, Channel channel) throws IOException {
try {
// int a = 1/0;
System.out.println("stock服务接收到订单支付成功的消息:"+orderToken);
// 手动ACK
channel.basicAck(message.getMessageProperties().getDeliveryTag(),false);
} catch (Exception e) {
System.out.println("手动ACK,出现异常");
Boolean flag = message.getMessageProperties().isRedelivered(); //是否是重回队列的消息
if (flag) {
System.out.println("消息已经重回队列且再次消费出现异常被捕获,丢弃信息");
channel.basicReject(message.getMessageProperties().getDeliveryTag(),false);
// 除了丢弃消息,还可以绑定死信队列,将消息转发到死信队列
}else {
System.out.println("消息第一次消费出现异常被捕获,重新回到队列中");
channel.basicNack(message.getMessageProperties().getDeliveryTag(),false,true);
}
}
}
/*
* 手动ACK时:
* 业务方法可能会出现异常,导致消息一致不能正确ACK,一直处于unacked状态,直到消费者进程停止
* 如果是暂时的网络波动导致本次消费异常,过一会网络正常就可以正确消费消息了,但是消息不会重新回到队列中
* 为了避免消费失败的信息一直不能被处理处于unacked状态
* 解决:
* 1:手动ack时有编译时的异常,try catch时可以捕获最大的异常
* 2:当第一次消费失败时进入到了catch中,可以将消息重新回到队列中(unacked 转换为ready),让消费者再次尝试消费
* 3:当第二次消费失败时进入到了catch中,可以丢弃消息(私信队列:丢弃信息的队列如果绑定了死信队列,消息到达死信队列,如果没有绑定消息丢弃)
*
* 如果消息无关紧要,可以丢失
* 重要消息,必须保证消息的可靠性(不丢失)
* 1:生产者确认
* 2:交换机队列消息持久化
* 3:消费者手动ACK
* 手动ACK时,消息重复消费失败会丢弃掉,可以给该队列绑定死信队列
* 私信队列:本质就是一个队列
* 一般会为死信队列创建 交换机 队列 特定路由Key绑定
* 需要使用死信队列的队列,设置额外的参数(死信队列参数,丢弃信息时保存信息的死信配置)
*
* 队列可以配置的参数:mq的控制台,再新建Queue时,可以看到队列可以配置的参数
* x-max-length:队列存储的最大信息长度
* x-max-length-bytes:队列所有信息的总字节数限制
* 以上两个配置 哪个先到达阈值哪个先生效
* x-dead-letter-exchange:死信交换机
* 队列消息丢弃由该交换机处理
* x-dead-letter-routing-key:死信路由key
* 队列消息丢弃 死信交换机会将消息使用该路由key分发
* x-message-ttl:消息的过期时间 单位毫秒
* 如果队列的消息到达设置时间的过期时间仍然未被消费 消息会被丢弃
* 以后队列消息丢弃时的情况:
* 1:代码手动丢弃
* 2:消息队列已满存不下去新的消息时 会被丢弃
* 3:消息队列设置了过期时间,消息过期后会被丢弃
*
*
*
*
* */
//测试死信
//业务队列的消费者
@RabbitListener(queues = "business.queue")
public void businessListener(Message message, Channel channel , String msg) throws IOException {
try {
int a =1 / 0;
//消费消息的业务代码
System.out.println("测试死信队列:所接受到的business队列的消息:"+msg);
//手动ACK 参数1:消息的tag 参数2:是否批量确认
channel.basicAck(message.getMessageProperties().getDeliveryTag(),false);
} catch (Exception e) {
if(message.getMessageProperties().isRedelivered()){
channel.basicReject(message.getMessageProperties().getDeliveryTag(),false);
}else {
//消息重新回到队列中 参数1:消息的tag 参数2:是否批量确认 参数3:是否重新回到队列
channel.basicNack(message.getMessageProperties().getDeliveryTag(),false,true);
}
}
}
}
package com.example.demo04.stock.consumer.config;
import com.rabbitmq.client.Channel;
import org.springframework.amqp.core.*;
import org.springframework.amqp.rabbit.annotation.QueueBinding;
import org.springframework.amqp.rabbit.annotation.RabbitListener;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.io.IOException;
import java.util.Map;
/**
* @author Allen
* 4/12/2024 11:27 AM
* @version 1.0
*
* @description: 库存服务的MQ配置类
* 配置死信交换机、死信队列、绑定关系
* 解释死信队列:
* 1、死信队列:当消息被拒绝、消息过期、队列达到最大长度时,消息会进入死信队列
* 2、死信交换机:死信队列绑定的交换机
* 3、死信路由key:消息进入死信队列时,会携带一个路由key
* 4、死信队列绑定死信交换机
* 5、业务队列绑定死信交换机
* 6、业务队列绑定业务交换机
*/
@Configuration
public class StockMqConfig {
//死信
//1、声明死信交换机
@Bean
public Exchange deadExchange(){
return ExchangeBuilder.topicExchange("dead.exchange")
.ignoreDeclarationExceptions()
.build();
}
//2、声明死信队列
@Bean
public Queue deadQueue(){
return QueueBuilder.durable("dead.queue")
.build();
}
//3、死信队列绑定死信交换机
@Bean
public Binding deadBinding(Exchange deadExchange, Queue deadQueue){
return BindingBuilder.bind(deadQueue)
.to(deadExchange)
.with("dead.msg")
.noargs();
}
//业务
//1、声明业务交换机
@Bean
public Exchange businessExchange(){
return ExchangeBuilder.topicExchange("business.exchange")
.ignoreDeclarationExceptions()
.build();
}
//2、声明业务队列:业务队列绑定死信交换机
@Bean
public Queue businessQueue(){
//以后business.queue队列丢弃消息时,会自动转发到dead.exchange交换机上,路由key为dead.msg
return QueueBuilder.durable("business.queue")
.deadLetterExchange("dead.exchange")
.deadLetterRoutingKey("dead.msg")
.build();
}
//3、业务队列绑定业务交换机
@Bean
public Binding businessBinding(Exchange businessExchange, Queue businessQueue){
return BindingBuilder.bind(businessQueue)
.to(businessExchange)
.with("stock.bussiness")
.noargs();
}
}
重点:交换机根据路由key来转发信息
@Test
void testDelayQueue() {
rabbitTemplate.convertAndSend("buss2.Exchange",
"buss2.msg",
"我是测试延迟队列,所发送到buss2的消息!!!");
}
//延迟队列的消费者 监听器
@RabbitListener(queues = "delay.Queue")
public void delayListener(Message message,Channel channel,String msg) throws IOException {
try {
System.out.println("延迟队列的消费者接收到的消息:"+msg);
channel.basicAck(message.getMessageProperties().getDeliveryTag(),false);
} catch (Exception e) {
if(message.getMessageProperties().isRedelivered()){
channel.basicReject(message.getMessageProperties().getDeliveryTag(),false);
}else {
channel.basicNack(message.getMessageProperties().getDeliveryTag(),false,true);
}
}
}
package com.example.demo04.stock.consumer.config;
import org.springframework.amqp.core.*;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* @author Allen
* 4/12/2024 2:34 PM
* @version 1.0
* @description: 延迟队列配置
*/
@Configuration
public class DelayMqConfig {
//延迟队列
//1:创建延迟队列 死信队列
@Bean
public Exchange delayExchange() {
return ExchangeBuilder.topicExchange("delay.Exchange")
.ignoreDeclarationExceptions()
.durable(true)
.build();
}
//2:创建死信队列
@Bean
public Queue delayQueue() {
return QueueBuilder
.durable("delay.Queue")
//绑定死信交换机、死信路由key
// .deadLetterExchange("dead.exchange")
// .deadLetterRoutingKey("dead.msg")
.build();
}
@Bean
public Binding delayBinding(Exchange delayExchange,Queue delayQueue) {
return BindingBuilder.bind(delayQueue)
.to(delayExchange)
.with("delay.msg")
.noargs();
}
//业务队列:设置过期时间,并且绑定死信交换机、死信路由key
@Bean
public Exchange buss2Exchange() {
return ExchangeBuilder.topicExchange("buss2.Exchange")
.ignoreDeclarationExceptions()
.durable(true).
build();
}
@Bean
public Queue buss2Queue() {
return QueueBuilder.durable("buss2.Queue")
//利用死信交换机来设置延迟 =====> 延迟交换机
//配置延迟交换机、死信路由key
.deadLetterExchange("delay.Exchange")
.deadLetterRoutingKey("delay.msg")
//设置过期时间,单位毫秒,一旦消息过期,会自动转发到延迟队列(死信队列)
.ttl(1*60*1000)
.build();
}
@Bean
public Binding buss2Binding(Exchange buss2Exchange,Queue buss2Queue) {
return BindingBuilder.bind(buss2Queue)
.to(buss2Exchange)
.with("buss2.msg")
.noargs();
}
}
@Test
void testParam() {
Map map = new HashMap<>();
map.put("orderToken", "123456123");
map.put("userId", "1001");
map.put("amount", 100D);
rabbitTemplate.convertAndSend("pay.exchange",
"param.pay",map
);
}
//接收复杂类型的消息
// 复杂类型会通过JAVA的序列化机制进行序列化,然后再进行传输到队列中
@RabbitListener(bindings = {
@QueueBinding(
value = @org.springframework.amqp.rabbit.annotation.Queue(value = "param.queue", durable = "true"),
exchange = @org.springframework.amqp.rabbit.annotation.Exchange(name = "pay.exchange", type = ExchangeTypes.TOPIC),
key = {"param.pay"}
)
})
public void paramsListener(Message message , Channel channel, Map map) throws IOException {
System.out.println("接收到的Map类型消息:" + map);
channel.basicAck(message.getMessageProperties().getDeliveryTag(), false);
}
作用:异步、解耦和、削峰填谷
百分百避免消息丢失,项目性能非常差
(本地消息表:发送消息时,同时将消息作为日志存到本地消息表中 状态设置为 消息发送中、消息成功到达队列、交换机的回调方法中可以设置消息表中消息的状态,最后通过定时任务查找状态为发送失败的信息 重新发送)
一般业务场景:
1:生产者确认
2:持久化
3:消费者确认
手动ack
重新投递
丢弃消息到死信队列(人工干预)
幂等性
因为生产者可能重新发送同一个消息,消费者消费异常时也会重新让消息归队
如果网络通信失败:生产者消费者已经发送,或者,重新让消息归队,或者,消费消息成功但跟mq交互时通信失败,都会导致消息以后再次被消费
和防止表单重复提交类似:
1、生产者发送消息时 可以携带一个token(唯一字符串 可以是一个id、也可以是自己生成的数据)
消费者消费时 验证token在本地数据库表中是否已存在/redis的set中是否已存在
如果已存在 表示消息已经被消费过 本次不处理
如果不存在 将token存到本地数据库表中/redis的string并设置过期时间
2、同一个队列中每个消息都有自己的唯一id
一个队列 一个消费者
消费者不足
1、增加部署消费者服务实例
2、开启能者多劳
一个消费者可以配置一次获取多个消息
开启多线程消费消息,提高并发能力
#能者多劳+多线程
#一次性获取消息的数量
prefetch: 3
#允许并发处理的线程数
concurrency: 3
#最大并发处理的线程数
max-concurrency: 3
apiVersion:
指的是Api的版本。
kind:
资源的类型。
metadata:
资源的元数据。比如资源的名称,标签,名称空间,注解等信息。
spec:
用户期望资源的运行状态。
staus:
资源实际的运行状态,由K8S集群内部维护。
编写yaml文件:
# 指定API的版本号
apiVersion: v1
# 指定资源的类型
kind: Pod
# 指定元数据
metadata:
# 指定名称
name: web
# 用户期望的资源状态
spec:
# 定义容器资源
containers:
# 指定的名称
- name: nginx
# 指定容器的镜像
image: harbor.allen.com/k8s/nginx:1.23.4-alpine
创建Pod:
kubectl create -f 01-nginx.yaml
如果Pod存在,更新资源;如果不存在,创建资源
kubectl apply -f 01-nginx.yaml
查看资源:
kubectl get pods -o wide
相关字段说明:
NAME
代表的是资源的名称。
READY
代表资源是否就绪。比如 0/1 ,表示一个Pod内有一个容器,而且这个容器还未运行成功。
STATUS
代表容器的运行状态。
RESTARTS
代表Pod重启次数,即容器被创建的次数。
AGE
代表Pod资源运行的时间。
IP
代表Pod的IP地址。
NODE
代表Pod被调度到哪个节点。
其他:
"NOMINATED NODE和"READINESS GATES"暂时先忽略哈
删除资源:
kubectl delete -f 01-nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: allen-nginx-tomcat
spec:
containers:
- name: nginx
image: nginx:1.23.4-alpine
- name: tomcat
image: tomcat:jre8-alpine
alpine:轻量级的 Linux 发行版 ⚠ 使用此镜像启动的容器,会失败,因为容器中没有持久运行的进程
涉及到知识点:
hostNetwork: true 使用宿主机网络,相当于"docker run --network host"
stdin: true
args: ["tail","-f","/etc/hosts"]
command: ["sleep","15"]
command:
/- "tail"
/- args:/- "-f"
/- "/etc/hosts"
apiVersion: v1
kind: Pod
metadata:
name: allen-nginx-alpine
spec:
# 使用宿主机网络,相当于"docker run --network host"
hostNetwork: true
containers:
- name: nginx
image: nginx:1.23.4-alpine
- name: linux
image: alpine
# 给容器分配一个标准输入,默认值为false
# stdin: true
# 给容器分配一个启动命令,修改Dockerfile的CMD指令
# args: ["tail","-f","/etc/hosts"]
# 也可以修改command字段,相当于修改Dockerfile的ENTRYPOINT指令
# command: ["sleep","15"]
# args也可以和command命令搭配使用,和Dockfile的ENTRYPOINT和CMD效果类似
command:
- "tail"
args:
- "-f"
- "/etc/hosts"
apiVersion: v1
kind: Pod
metadata:
name: games
spec:
hostNetwork: true
# 将Pod调度到指定节点,注意,该node名称必须和etcd的数据保持一致
nodeName: k8s-node01
containers:
- name: game
image: harbor.allen.com/games/games:v0.1
apiVersion: v1
kind: Pod
metadata:
name: allen-web-restartpolicy-always
spec:
nodeName: k8s-node01
# 当容器退出时,始终重启容器。
restartPolicy: Always
containers:
- name: nginx
image: harbor.allen.com/k8s/web:v0.1
imagePullPolicy: Always
command:
- "sleep"
- "10"
---
apiVersion: v1
kind: Pod
metadata:
name: allen-web-restartpolicy-onfailure
spec:
nodeName: k8s-node01
# 当容器正常退出时不会重启容器,异常退出时,会重启容器。
restartPolicy: OnFailure
containers:
- name: nginx
image: harbor.allen.com/k8s/web:v0.1
imagePullPolicy: Always
command:
- "sleep"
- "10"
---
apiVersion: v1
kind: Pod
metadata:
name: allen-web-restartpolicy-never
spec:
nodeName: k8s-node01
# 当容器退出时,始终不重启。
restartPolicy: Never
containers:
- name: nginx
image: harbor.allen.com/k8s/web:v0.1
imagePullPolicy: Always
command:
- "sleep"
- "10"
(1)将Pod容器的文件拷贝到宿主机
kubectl cp games:/start.sh /tmp/start.sh
(2)连接到Pod的容器(必须在容器是run的状态)
kubectl exec -it games -- sh
(3)查看某个Pod的日志。
kubectl logs -f games
apiVersion: v1
kind: Pod
metadata:
name: allen-volume-emptydir
spec:
# 定义存储卷
volumes:
# 指定存储卷名称
- name: data01
# 指定存储卷类型为emptyDir
# 当pod删除时,存储卷也会被删除
emptyDir: {}
containers:
- name: web
image: harbor.allen.com/k8s/nginx:1.23.4-alpine
# 指定挂载点
volumeMounts:
# 指定存储卷的名称
- name: data01
# 指定挂载目录
mountPath: /usr/share/nginx/html
- name: linux
image: harbor.allen.com/k8s/alpine:v1
stdin: true
#一个Pod里面的容器都共享这个存储卷
volumeMounts:
- name: data01
mountPath: /allen
#本地存储卷所在的位置 [root@k8s-node01 ~]# ll /var/lib/kubelet/pods/a14858f1-be81-4f8b-a526-e683e629da57/volumes/kubernetes.io~empty-dir/data01/ total 4 -rw-r--r-- 1 root root 26 Jul 27 02:45 index.html #共享存储卷data01 [root@k8s-master01 /server/yaml]# kubectl exec allen-volume-emptydir -c linux -- ls -l /allen/ total 4 -rw-r--r-- 1 root root 42 Jul 26 19:03 index.html
Pod被删除,卷也被删除,因为会将/var/lib/kubelet/pods/a14858f1-be81-4f8b-a526-e683e629da57/ 这个目录删除 ------>(emptyDir类型的存储卷)
apiVersion: v1
kind: Pod
metadata:
name: allen-volume-hostpath01
spec:
nodeName: k8s-node01
# 定义存储卷
volumes:
# 指定存储卷名称
- name: web-data
# 指定存储卷类型为hostpath
hostPath:
# 指定宿主机存储卷的路径 (将数据存在主机这个目录下)
path: /hostpath-data
containers:
- name: web
image: harbor.allen.com/k8s/nginx:1.23.4-alpine
# 指定挂载点
volumeMounts:
# 指定存储卷的名称
- name: web-data
# 指定挂载目录
mountPath: /usr/share/nginx/html
---
apiVersion: v1
kind: Pod
metadata:
name: allen-volume-hostpath02
spec:
nodeName: k8s-node01
volumes:
- name: linux-data
hostPath:
path: /hostpath-data
containers:
- name: linux
image: harbor.allen.com/k8s/alpine:v1
stdin: true
volumeMounts:
- name: linux-data
mountPath: /allen
#master节点 kubectl apply -f 07-volume-hostpath.yaml kubectl exec allen-volume-hostpath01 -it -c web -- sh kubectl gte pods -o wide curl 10.100.2.12 <h1>我是hostPath v1.0</h1> kubectl exec allen-volume-hostpath02 -c linux -- ls -l /allen total 4 -rw-r--r-- 1 root root 29 Jul 26 19:30 index.html #node节点 [root@k8s-node01 ~]# cd /hostpath-data/ [root@k8s-node01 /hostpath-data]# ls index.html [root@k8s-node01 /hostpath-data]# cat "<h1>我是通过外部存储卷修改的 v2.0</h1>" > index.html cat: <h1>我是通过外部存储卷修改的 v2.0</h1>: No such file or directory [root@k8s-node01 /hostpath-data]# echo "<h1>我是通过外部存储卷修改的 v2.0</h1>" > index.html #master节点 [root@k8s-master01 /server/yaml]# curl 10.100.2.12 <h1>我是通过外部存储卷修改的 v2.0</h1>
删除Pod时,不会删除存储卷,注意:共享hostPath存储卷的Pod必须存在同一个节点
apiVersion: v1
kind: Pod
metadata:
name: allen-volume-nfs01
spec:
nodeName: k8s-node01
# 定义存储卷
volumes:
# 指定存储卷名称
- name: web-data
# 指定存储卷类型为nfs
nfs:
# 指定nfs服务器地址
server: harbor.allen.com
# 指定nfs存储卷的路径
path: /allen/data/k8s
containers:
- name: web
image: harbor.allen.com/k8s/nginx:1.23.4-alpine
# 指定挂载点
volumeMounts:
# 指定存储卷的名称
- name: web-data
# 指定挂载目录
mountPath: /usr/share/nginx/html
---
apiVersion: v1
kind: Pod
metadata:
name: allen-volume-nfs02
spec:
nodeName: k8s-node02
volumes:
- name: linux-data
nfs:
server: harbor.allen.com
# 指定nfs存储卷的路径
path: /allen/data/k8s
containers:
- name: linux
image: harbor.allen.com/k8s/alpine:v1
stdin: true
volumeMounts:
- name: linux-data
mountPath: /allen
#server [root@k8s-master01 /server/yaml]# kubectl apply -f 08-volume-nfs.yaml pod/allen-volume-nfs01 created pod/allen-volume-nfs02 created [root@k8s-master01 /server/yaml]# kubectl get pods NAME READY STATUS RESTARTS AGE allen-volume-nfs01 1/1 Running 0 7s allen-volume-nfs02 1/1 Running 0 7s [root@k8s-master01 /server/yaml]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES allen-volume-nfs01 1/1 Running 0 11s 10.100.2.16 k8s-node01 <none> <none> allen-volume-nfs02 1/1 Running 0 11s 10.100.1.10 k8s-node02 <none> <none> #nfs [root@harbor01 /allen/data/k8s]# echo "<h1>我是nfs添加的内容</h1>" > index.html [root@harbor01 /allen/data/k8s]# ls index.html #server [root@k8s-master01 /server/yaml]# curl 10.100.2.16 <h1>我是nfs添加的内容</h1> [root@k8s-master01 /server/yaml]# kubectl exec allen-volume-nfs02 -- ls -l /allen total 4 -rw-r--r-- 1 root root 34 Jul 26 20:00 index.html
Nfs存储卷解决了两个node的Pod之间数据共享问题
apiVersion: v1
kind: Pod
metadata:
name: allen-stress-01
spec:
containers:
- name: stress
image: harbor.allen.com/tools/linux-tools:v0.1
args:
- "tail"
- "-f"
- "/etc/hosts"
# 对容器进行资源限制
resources:
# 期望目标节点有的资源大小,不满足以下条件不会进行调度,Pod处于pending状态
requests:
# 要求目标内存
memory: 256M
# 要求目标CPU核数,固定单位:1core = 1000m
cpu: 500m
# 配置容器使用资源的上限
limits:
memory: 256M
# 不带单位就为 1核
cpu: 1
知识点:
resources: 资源关键字
requests: 期望目标资源大小 不满足条件不会进行调度,Pod处于pending状态
limits: 配置容器使用资源的上限
#master [root@k8s-master01 /server/yaml]# kubectl apply -f 09-stress.yaml [root@k8s-master01 /server/yaml]# kubectl exec allen-stress-01 -it -- sh /usr/local/stress # stress -c 4 --verbose --timeout 1m /usr/local/stress # stress -m 5 --vm-bytes 200000000 --vm-keep --verbose #node [root@k8s-node02 ~]# docker stats 2e9d43e29947
kubectl exec -it allen-nginx-tomcat -- sh # 默认连接到第一个容器
kubectl exec -it allen-nginx-tomcat -c nginx -- sh # 连接nginx容器
kubectl exec -it allen-nginx-tomcat -c tomcat -- sh # 连接tomcat容器
查看容器名称
cat 02-nginx-tomcat.yaml
kubectl describe pod allen-nginx-tomcat
kubectl get pods allen-nginx-tomcat -o yaml
kubectl logs -c nginx -f --timestamps --since=20m allen-nginx-tomcat
-c:
指定要查看的容器名称。-f:
实时查看日志。--timestamps :
显示时间戳相关信息。--since=20m
查看最近20分钟内的日志。
kubectl logs -c tomcat -f --timestamps -p allen-nginx-tomcat
-p:
查看上一个挂掉的容器
使用"kubectl logs"查看的是容器的标准输出或错误输出日志,如果想要使用该方式查看,需要将日志重定向到/dev/stdout或者/dev/stderr。
#将Pod的的文件拷贝到宿主机: kubectl cp allen-game-014:/start.sh /tmp/1.sh # 拷贝文件 kubectl cp allen-game-014:/etc /tmp/2222 # 拷贝目录 ll /tmp/ total 16 -rw-r--r-- 1 root root 3369 Apr 13 17:01 1.sh drwxr-xr-x 20 root root 4096 Apr 13 17:02 2222 #将宿主机的文件拷贝到Pod的容器中: kubectl cp 01-nginx.yaml allen-game-014:/ kubectl cp /tmp/2222/ allen-game-014:/ kubectl exec allen-game-014 -- ls -l / total 24 -rw-r--r-- 1 root root 301 Apr 13 09:03 01-nginx.yaml drwxr-xr-x 20 root root 4096 Apr 13 09:04 2222
Always 默认值,表示始终拉取最新的镜像。
IfNotPresent 如果本地有镜像,则不去远程仓库拉取镜像,若本地没有,才会去远程仓库拉取镜像。
Never 如果本地有镜像则尝试启动,若本地没有镜像,也不会去远程仓库拉取镜像。
apiVersion: v1 kind: Pod metadata: name: allen-web-imagepullpolicy-001 spec: nodeName: k8s-node01 containers: - name: nginx image: harbor.allen.com/k8s/web:v0.1 # 指定镜像的下载策略,有效值为: Always, Never, IfNotPresent # Always: # 默认值,表示始终拉取最新的镜像。 # IfNotPresent: # 如果本地有镜像,则不去远程仓库拉取镜像,若本地没有,才会去远程仓库拉取镜像。 # Never: # 如果本地有镜像则尝试启动,若本地没有镜像,也不会去远程仓库拉取镜像。 #imagePullPolicy: Always # imagePullPolicy: IfNotPresent imagePullPolicy: Never
restartPolicy: Always # 当容器退出时,始终重启容器。
restartPolicy: OnFailure # 当容器正常退出时不会重启容器,异常退出时,会重启容器。
restartPolicy: Never # 当容器退出时,始终不重启。
apiVersion: v1 kind: Pod metadata: name: allen-web-restartpolicy-always spec: nodeName: k8s-node01 # 当容器退出时,始终重启容器。 restartPolicy: Always containers: - name: nginx image: harbor.allen.com/k8s/web:v0.1 imagePullPolicy: Always command: - "sleep" - "10" --- apiVersion: v1 kind: Pod metadata: name: allen-web-restartpolicy-onfailure spec: nodeName: k8s-node01 # 当容器正常退出时不会重启容器,异常退出时,会重启容器。 restartPolicy: OnFailure containers: - name: nginx image: harbor.allen.com/k8s/web:v0.1 imagePullPolicy: Always command: - "sleep" - "10" --- apiVersion: v1 kind: Pod metadata: name: allen-web-restartpolicy-never spec: nodeName: k8s-node01 # 当容器退出时,始终不重启。 restartPolicy: Never containers: - name: nginx image: harbor.allen.com/k8s/web:v0.1 imagePullPolicy: Always command: - "sleep" - "10"
知识点:
env: 后面跟键值对 - key
value
valueFrom: 引用已有的关键字变量
fieldRef: 引用某个字段
fieldPath: 指定字段的路径
metadata.name
spec.nodeName
status.hostIP
status.podIP
apiVersion: v1 kind: Pod metadata: name: allen-game-env spec: nodeName: k8s-node01 containers: - name: game image: harbor.allen.com/games/games:v0.1 # 想容器传递环境变量 env: # 指定的变量名称 - name: SCHOOL # 指定变量的值 value: HBNU - name: CLASS value: 2110 - name: ALLEN_POD_NAME # 不适用自定义的变量值,而是引用别处的值 valueFrom: # 值引用自某个字段 fieldRef: # 指定字段的路径 fieldPath: "metadata.name" - name: ALLEN_NODENAME valueFrom: fieldRef: fieldPath: "spec.nodeName" - name: ALLEN_HOSTIP valueFrom: fieldRef: fieldPath: "status.hostIP" - name: ALLEN_PODIP valueFrom: fieldRef: fieldPath: "status.podIP"
kubectl exec linux85-game-env -- env
利用SFTPGo 实现,跨服务端的数据同步功能
https://github.com/sftpgo/helm-chart.git
helm install --generate-name sftpgo -f sftpgo/values.yaml
输入域名,第一次登录要求创建admin用户
进去之后:
选择硅基流动API(避免deepseek的API服务器性能瓶颈,还会额外赠送14元的体验金):
https://cloud.siliconflow.cn/i/frVugSQo
硅基流动API接入文档:
基于K8S以及NEXTCHAT实现个人AI对话平台
主机名 | 规格 | PrivateIP | PublicIP |
---|---|---|---|
master | 2C4G | xx | xx |
node01 | 2C4G | xx | xx |
node02 | 2C4G | xx | xx |
node03 | 2C4G | xx | xx |
角色 | 应用 | 版本 |
---|---|---|
系统 | Centos | 7.9 |
容器编排 | K8S | 1.28 |
运行时 | containerd | 1.7.16 |
自行百度,参考其他文章:
需要服务器集群科学上网解决方案的同学联系作者
服务器科学上网解决的目的:
不用在意DockerHUB的局限性
kubectl create ns ingress-nginx
kubectl apply -f deploy.yaml
kubectl get all -n ingress-nginx
注意:没有代理没有加速地址的同学请:修改Yaml中的镜像地址
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
spec:
containers:
- args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.12.0@sha256:e6b8de175acda6ca913891f0f727bca4527e797d52688cbe9fec9040d6f6b6fa
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsGroup: 82
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-create
spec:
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.0
name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None
优势: 丰富的面具(提示词),集成十几个AI厂商
使用docker作为运行时同学,请自行寻找加速地址。
#$cat ai-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ai-chat
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
app: ai-chat
template:
metadata:
labels:
app: ai-chat
spec:
containers:
- name: ai-chat-container
image: rocky9/nextchat-nightly:latest # 最新版镜像
ports:
- containerPort: 3000
env:
- name: WHITE_WEBDAV_ENDPOINTS # 消息同步
value: http://<用户>:<密码>@<sftpgo地址>/ai-chat
- name: DEEPSEEK_API_KEY
value: # 请替换为你的实际 API 密钥
- name: SILICONFLOW_API_KEY
value: #硅基流动API_KEY
- name: CODE
value: "" # 请替换为你的访问密码
- name: DEFAULT_MODULE
value: deepseek-chat
nodeSelector:
disktype: ai-chart
dnsConfig:
nameservers:
- 114.114.114.114 # 公共DNS解析
apiVersion: v1
kind: Service
metadata:
name: ai-chat
namespace: dev
spec:
selector:
app: ai-chat
ports:
- protocol: TCP
port: 80
targetPort: 3000
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ai-ingress
namespace: dev
spec:
ingressClassName: nginx
rules:
- host: # 如果没有域名,可省略,使用 IP 访问,你的域名xx.xx.xx
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ai-chat
port:
number: 80
kubectl label nodes node02 disktype=ai-chart #给node打标签,让Pod调度到指定node
kubectl create ns dev
kubectl apply -f 上面的三个yaml文件
自己的公有云平台,将域名解析到指定的node节点的公网IP上
选择硅基流动API(避免deepseek的API服务器性能瓶颈,还会额外赠送14元的体验金):
https://cloud.siliconflow.cn/i/frVugSQo
硅基流动API接入文档:
回复速率取决于deepseek API服务器的性能瓶颈,因此建议:
择硅基流动API(避免deepseek的API服务器性能瓶颈,还会额外赠送14元的体验金):
诚挚地向您推荐硅基流动,作为个人AI对话平台的卓越API选择。其性能瓶颈远超同类产品,为您提供无与伦比的流畅体验。更令人欣喜的是,以官网同等价格为您呈现,并额外奉上14元体验金,让您尽情探索智能对话的无限可能。
https://cloud.siliconflow.cn/i/frVugSQo
https://github.com/ChatGPTNextWeb/NextChat?tab=readme-ov-file
本文档描述了如何在 Kubernetes 集群中部署 Solo 博客系统。Solo 是一个基于 Java 的博客系统,使用 MySQL 作为数据库。部署包括以下组件:
kubectl
命令行工具。nginx-ingress-controller
。solo.jiufog.space
),并配置好 DNS 解析。确保 MySQL 数据库已创建,并准备好以下信息:
solo
root
<your_password>
(替换为实际密码)<your_db_endpoints>
如果 dev
命名空间不存在,请先创建:
kubectl create namespace dev
将以下内容保存为 solo-deployment.yaml
文件:
apiVersion: apps/v1
kind: Deployment
metadata:
name: solo
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
app: solo
template:
metadata:
labels:
app: solo
spec:
containers:
- name: solo
image: b3log/solo
env:
- name: RUNTIME_DB
value: "MYSQL"
- name: JDBC_USERNAME
value: "root"
- name: JDBC_PASSWORD
value: "" # 替换为 root 的密码
- name: JDBC_DRIVER
value: "com.mysql.cj.jdbc.Driver"
- name: JDBC_URL
value: "jdbc:mysql://10.244.3.48:3306/solo?useUnicode=yes&characterEncoding=UTF-8&useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true"
args: [
"--listen_port=8080",
"--server_scheme=http",
"--server_host=solo.jiufog.space", # 替换为你的域名
"--server_port="
]
ports:
- containerPort: 8080
使用 kubectl
部署:
kubectl apply -f solo-deployment.yaml
将以下内容保存为 solo-service.yaml
文件:
apiVersion: v1
kind: Service
metadata:
name: solo-service
namespace: dev
spec:
selector:
app: solo
ports:
- protocol: TCP
port: 80
targetPort: 8080
使用 kubectl
部署:
kubectl apply -f solo-service.yaml
将以下内容保存为 solo-ingress.yaml
文件:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: solo-ingress
namespace: dev
spec:
ingressClassName: nginx
rules:
- host: solo.jiufog.space # 替换为你的域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: solo-service
port:
number: 80
使用 kubectl
部署:
kubectl apply -f solo-ingress.yaml
kubectl get pods -n dev
确保 solo
Pod 状态为 Running
。
kubectl get svc -n dev
确保 solo-service
已创建。
kubectl get ingress -n dev
确保 solo-ingress
已创建,并检查其 ADDRESS
字段是否已分配。
http://solo.jiufog.space
,确保页面正常加载。solo-deployment.yaml
中正确填写 JDBC_PASSWORD
。solo.jiufog.space
已正确解析到 Kubernetes 集群的 Ingress 控制器 IP。kubectl logs <pod-name> -n dev
kubectl logs -n ingress-nginx <ingress-nginx-pod-name>
如果需要删除部署,可以运行以下命令:
kubectl delete -f solo-deployment.yaml
kubectl delete -f solo-service.yaml
kubectl delete -f solo-ingress.yaml