0%

每个master节点都需要执行命令

允许用户ssh远程登录

注:在root用户修改sshd_config文件

vi /etc/ssh/sshd_config

修改 PasswordAuthentication yes

设置系统主机名以及 Host 文件的相互解析

1
2
3
4
# 不同机器rt-master11需要不同
hostnamectl set-hostname rt-master11
# 将配置复制到各个节点
scp /etc/hosts root@192.168.180.31:/etc/hosts

安装依赖包

1
yum install -y iptables ipset wget ipvsadm curl wget net-tools

设置防火墙为 Iptables 并设置空规则

1
2
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

关闭 虚拟内存 及 SELINUX

1
2
3
4
5
6
7
# 关闭虚拟内存
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
# 关闭 SELINUX
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
reboot
# 确认selinux已经被禁用
getenforce

创建安装目录

1
mkdir ~/install-k8s ~/install-k8s/core ~/install-k8s/pluging ~/install-k8s/pluging/flannel

调整内核参数,对于 K8S

在~/install-k8s/core创建

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
cat > kubernetes.conf <<EOF 
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

modprobe br_netfilter模块开机加载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
vi /etc/rc.sysinit
# 添加以下文本
#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done
# 添加启动模块
cat > /etc/sysconfig/modules/br_netfilter.modules <<EOF
modprobe br_netfilter
modprobe ip_conntrack
EOF
# 检查启动模块
vi /etc/sysconfig/modules/br_netfilter.modules
# 增加权限
chmod 755 /etc/sysconfig/modules/br_netfilter.modules
reboot
# 重启后 模块自动加载
lsmod |grep br_netfilter

复制配置到开机启动

1
2
3
4
modprobe br_netfilter
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
# 启动配置
sysctl -p /etc/sysctl.d/kubernetes.conf

调整系统时区

1
2
3
4
5
6
7
# 设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond

关闭系统不需要服务

1
systemctl stop postfix && systemctl disable postfix

使用Vagrant设置虚拟机,设置默认网卡

1
2
3
4
5
6
7
8
9
route add default gw 192.168.180.1
[root@rt-master21 hightUse]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.180.1 0.0.0.0 UG 0 0 0 eth1
0.0.0.0 10.0.2.2 0.0.0.0 UG 100 0 0 eth0
10.0.2.0 0.0.0.0 255.255.255.0 U 100 0 0 eth0
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
192.168.180.0 0.0.0.0 255.255.255.0 U 101 0 0 eth1

设置 rsyslogd 和 systemd journald

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 持久化保存日志的目录
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d

cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF

# 启动日志记录服务
systemctl restart systemd-journald

关闭 NUMA

1
2
3
4
5
6
7
8
9
10
11
12
# 备份默认文件
cp /etc/default/grub{,.bak}
# 在 GRUB_CMDLINE_LINUX 一行添加 `numa=off` 参数
vi /etc/default/grub
# 检查
# diff /etc/default/grub.bak /etc/default/grub
# 6c6
# < GRUB_CMDLINE_LINUX="no_timer_check console=tty0 console=ttyS0,115200n8 net.ifnames=0 biosdevname=0 elevator=noop crashkernel=auto"
# ---
# > GRUB_CMDLINE_LINUX="no_timer_check console=tty0 console=ttyS0,115200n8 net.ifnames=0 biosdevname=0 elevator=noop crashkernel=auto numa=off"
cp /boot/grub2/grub.cfg{,.bak}
grub2-mkconfig -o /boot/grub2/grub.cfg

kube-proxy开启ipvs的前置条件

1
2
3
4
5
6
7
8
9
10
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安装 Docker 软件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 安装yum工具
yum install -y yum-utils
# 设置稳定的存储库
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 安装docker
yum install -y docker-ce
# 配置 daemon
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://77xsmdni.mirror.aliyuncs.com/"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
EOF

systemctl daemon-reload
# 启动docker服务
systemctl start docker && systemctl enable docker

在主节点启动 Haproxy 与 Keepalived 容器

创建文件夹~/install-k8s/hightUse

创建haproxy.cfg配置文件

1
2
mkdir -p data/lb/etc
vi data/lb/etc/haproxy.cfg

逐个进行负载,避免节点没启动而报错,先负载180.21的节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon

defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000

frontend stats-front
bind *:8081
mode http
default_backend stats-back

frontend fe_k8s_6444
bind *:6444
mode tcp
timeout client 1h
log global
option tcplog
default_backend be_k8s_6443
acl is_websocket hdr(Upgrade) -i WebSocket
acl is_websocket hdr_beg(Host) -i ws

backend stats-back
mode http
balance roundrobin
stats uri /haproxy/stats
stats auth pxcstats:secret

backend be_k8s_6443
mode tcp
timeout queue 1h
timeout server 1h
timeout connect 1h
log global
balance roundrobin
server rancher01 192.168.180.21:6443

创建启动haproxy的脚本

1
2
3
4
5
6
7
8
9
10
11
12
13
#!/bin/bash
MasterIP1=192.168.180.11
MasterIP2=192.168.180.21
MasterIP3=192.168.180.31
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
-e MasterIP1=$MasterIP1 \
-e MasterIP2=$MasterIP2 \
-e MasterIP3=$MasterIP3 \
-e MasterPort=$MasterPort \
-v /root/install-k8s/hightUse/data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
wise2c/haproxy-k8s

下载haproxy镜像

1
2
3
4
5
# 每个主节点都需要
docker pull wise2c/haproxy-k8s
# 运行haproxy镜像
chmod a+x data/lb/start-haproxy.sh
./data/lb/start-haproxy.sh

启动keepalived

vi /root/install-k8s/hightUse/data/lb/start-keepalived.sh

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#!/bin/bash
VIRTUAL_IP=192.168.180.100
INTERFACE=eth1
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
--net=host --cap-add=NET_ADMIN \
-e VIRTUAL_IP=$VIRTUAL_IP \
-e INTERFACE=$INTERFACE \
-e CHECK_PORT=$CHECK_PORT \
-e RID=$RID \
-e VRID=$VRID \
-e NETMASK_BIT=$NETMASK_BIT \
-e MCAST_GROUP=$MCAST_GROUP \
wise2c/keepalived-k8s

运行keepalived镜像

1
2
chmod a+x data/lb/start-keepalived.sh
./data/lb/start-keepalived.sh

安装 Kubeadm (主从配置)

1
2
3
4
5
6
7
8
9
10
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装k8s

1
2
3
4
5
6
7
8
9
10
11
yum -y install kubeadm kubectl kubelet
systemctl enable kubelet.service
# 设置节点ip,处理node的INTERNAL-IP为10.0.2.15
vi /etc/sysconfig/kubelet
# 修改为以下内容
KUBELET_EXTRA_ARGS="--node-ip=192.168.180.21"
# 重启服务
systemctl restart kubelet.service
# 使kubectl可以自动补充
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

创建k8s初始化文件

1
kubeadm config print init-defaults > kubeadm-config.yaml

修改以下内容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# 节点ip地址
advertiseAddress: 192.168.180.21
# 镜像仓库
imageRepository: registry.aliyuncs.com/google_containers
# 高可用多master节点需要配置改vip地址, 在clusterName下配置
controlPlaneEndpoint: "192.168.180.100:6444"
# k8s版本
kubernetesVersion: v1.18.3
# 在networking:下添加 podSubnet: "10.244.0.0/16" ,因为这是网络插件的默认网段
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12

# 将k8s代理改为ipvs, 最后一行添加
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs

集群初始化(注:需要在12小时内完成整个集群部署,否则证书会过期

1
2
3
4
5
6
7
8
9
10
11
12
13
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 添加master节点
kubeadm join 192.168.180.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:04d5aa7619d1ebe925a9ac5f7ad8d69dc511d18ee158c40f737837692b3bb41a \
--control-plane --certificate-key fcc0f31d68fff8886666649f3363ae888e02915bdaea213a8d74d9add061893a

# 添加工作节点
kubeadm join 192.168.180.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:440a02125afb176cb517893deda010b2252c041973cdd4ea01bc6ba712d7b280

将配置文件复制到其他master节点

1
2
scp -r  ~/install-k8s/hightUse/* root@192.168.180.11:/root/install-k8s/hightUse
scp -r ~/install-k8s/hightUse/* root@192.168.180.31:/root/install-k8s/hightUse

启动其他master节点

在文件夹~/install-k8s/hightUse下运行

1
2
3
4
chmod a+x data/lb/start-haproxy.sh
./data/lb/start-haproxy.sh
chmod a+x data/lb/start-keepalived.sh
./data/lb/start-keepalived.sh

安装k8s(同上)

将master节点加入到主master节点

1
2
3
kubeadm join 192.168.180.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:440a02125afb176cb517893deda010b2252c041973cdd4ea01bc6ba712d7b280 \
--control-plane --certificate-key e6fe032d63e01c1c1a61824b46b4a6e28535fd34ea527656a409ec65433f126a

每个节点都启动后,重启HAProxy-K8S

修改vi ~/install-k8s/hightUse/data/lb/etc/haproxy.cfg

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon

defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000

frontend stats-front
bind *:8081
mode http
default_backend stats-back

frontend fe_k8s_6444
bind *:6444
mode tcp
timeout client 1h
log global
option tcplog
default_backend be_k8s_6443
acl is_websocket hdr(Upgrade) -i WebSocket
acl is_websocket hdr_beg(Host) -i ws

backend stats-back
mode http
balance roundrobin
stats uri /haproxy/stats
stats auth pxcstats:secret

backend be_k8s_6443
mode tcp
timeout queue 1h
timeout server 1h
timeout connect 1h
log global
balance roundrobin
server rancher01 192.168.180.21:6443
server rancher02 192.168.180.11:6443
server rancher03 192.168.180.31:6443

重启HAProxy-K8S容器

1
2
3
4
docker rm -f HAProxy-K8S && bash /root/install-k8s/hightUse/data/lb/start-haproxy.sh

# 若连接不上180.100,可重启keepalived
# docker rm -f Keepalived-K8S && bash /root/install-k8s/hightUse/data/lb/start-keepalived.sh

将配置覆盖其他master节点的配置,并重启HAProxy-K8S容器

1
2
scp ~/install-k8s/hightUse/data/lb/etc/haproxy.cfg root@192.168.180.31:/root/install-k8s/hightUse/data/lb/etc/haproxy.cfg
scp ~/install-k8s/hightUse/data/lb/etc/haproxy.cfg root@192.168.180.11:/root/install-k8s/hightUse/data/lb/etc/haproxy.cfg

Etcd 集群状态查看

1
2
3
[root@rt-master21 hightUse]$ kubectl -n kube-system exec etcd-rt-master21 -- etcdctl --endpoints=https://192.168.180.21:2379,https://192.168.180.11:2379,https://192.168.180.31:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key endpoint health
kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml

处理某个master节点关闭后,无法使用kubectl

vi ~/.kube/config

1
2
# 修改server: https://192.168.180.100:6443 为本机ip
server: https://192.168.180.11:6443

K8S重新加入MASTER节点,避免ETCD错误

我们有时候会有删除,再重新加入master节点的需求,比如master机器改名。这里注意重新加入时,经常会出现etcd报错,如下

1
[check-etcd] Checking that the etcd cluster is healthy

删除etcd信息

在master01节点上执行命令,进入etcd的容器里

1
2
3
4
5
6
7
[root@rt-master21 vagrant]# kubectl exec -it etcd-rt-master21 sh -n kube-system
#输入命令
etcdctl --endpoints 127.0.0.1:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key member list
# bf50d6c7bfb98b43, started, rt-master21, https://192.168.180.21:2380, https://192.168.180.21:2379, false
# da4f3d6e4d869225, started, rt-master11, https://10.0.2.15:2380, https://10.0.2.15:2379, false
# 因为我们的rt-master11机器对应的hash是da4f3d6e4d869225。我们下一步就是根据hash删除etcd信息,执行如下命令
etcdctl --endpoints 127.0.0.1:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key member remove da4f3d6e4d869225

获取master加入的token

1
2
3
4
5
6
kubeadm init phase upload-certs --upload-certs
# b = 2f530afd934070166d4520badf1c86c56d018358e018d6e7d97ef5f08fef9d4f
kubeadm token create --print-join-command
# a = kubeadm join 192.168.180.100:6444 --token 2rmyg7.qorrwlcux6pz3r0s --discovery-token-ca-cert-hash sha256:418f932dd8fe8d516f980980285688e5bc299e746f9697c94638aafd23814eca
# 加入master的命令为 a + --control-plane --certificate-key + b
kubeadm join 192.168.180.100:6444 --token 2rmyg7.qorrwlcux6pz3r0s --discovery-token-ca-cert-hash sha256:418f932dd8fe8d516f980980285688e5bc299e746f9697c94638aafd23814eca --control-plane --certificate-key 2f530afd934070166d4520badf1c86c56d018358e018d6e7d97ef5f08fef9d4f

环境准备

  1. 涉及工具
    操作系统: CentOS7
    虚拟机工具: VirtualBox
    工具: Vagrant
  2. 工具使用
    通过Vagrant工具, 可以很方便地在VirtualBox创建一台CentOS7的虚拟机, 通过Vagrant也可以配置虚拟机与宿主机的网络连接
    Vagrantfile配置
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    # -*- mode: ruby -*-
    # vi: set ft=ruby :
    Vagrant.configure("2") do |config|
    config.vm.box = "centos/7"

    config.vm.provider "vmware_fusion" do |v|
    v.vmx["memsize"] = "254"
    v.vmx["numvcpus"] = "1"
    end

    config.vm.provider "virtualbox" do |v|
    v.customize ["modifyvm", :id, "--memory", "254"]
    v.customize ["modifyvm", :id, "--cpus", "1"]
    end

    #ip需要跟虚拟机里默认分配的IP段,需要到虚拟机里查看
    #config.vm.network :private_network, ip: "10.0.2.18"
    #桥接网络,可以跟本地wifi通网段
    #config.vm.network "public_network", :bridge => "en0: Wi-Fi (Airport)", :ip => "192.168.31.21"
    config.vm.network "public_network", :bridge => "en0: Wi-Fi (Airport)", :ip => "192.168.180.21"

    #按照docker容器的预处理命令
    config.vm.provision "shell", privileged: true, path: "./setup.sh"
    end
  3. 在CentOS安装Docker
    为了避免安装过程中出现错误, 可先安装编译工具
    yum install -y gcc make gcc-c++ kernel-devel
    3.1 进入root用户命令行, 安装yum的工具
    yum install -y yum-utils
    3.2 配置Docker最新的源
    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    3.3 安装Docker
    yum install docker-ce docker-ce-cli containerd.io
    3.4 启动Docker
    systemctl start docker
    3.5 设置开机启动Docker
    systemctl enable docker
  4. 设置虚拟机共享文件
    选中虚拟机->右击设置->共享文件夹->添加共享文件夹(设置自动加载, 固定分配)

IIS站点的web deploy自动化部署

  1. web服务器安装好web deploy
    详细安装参考
    web deploy全部安装比较稳妥,否者会导致发布的时候提示404错误的问题
  2. 配置web服务器的web deploy
    1. 开启服务的WMSVC服务
      图片alt图片alt
    2. 确认站点与用户数据
      图片alt
    3. 具体站点开启远程更新,并获得客户端可用的配置文件
      图片alt图片alt
    4. 开启备份功能,以管路员身份打开powershell ISE,开启备份还原功能(默认备份目录:站点同目录/站点名_snapshots)
      图片alt
  3. 客户段安装Web Deploy
    VS2017的组件web部署就是web deploy 4.0,官网可下载的是3.6
  4. 网站的备份、发布和回滚
    1. powershell自动发布命令
      & "C:\Program Files\IIS\Microsoft Web Deploy V3"\msdeploy.exe -allowuntrusted -verb:sync -source:contentPath="需要发布内容文件夹绝对路径" -dest:contentPath="站点名称",publishSettings="由服务端生成的配置文件绝对路径",password="对应用户的密码"
    2. poweshell备份命令
      & "C:\Program Files\IIS\Microsoft Web Deploy V3"\msdeploy.exe -allowuntrusted -verb:sync -source:backupManager -dest:backupManager="站点名称",publishSettings="由服务端生成的配置文件绝对路径",password="对应用户的密码"
    3. powershell回滚命令
      & "C:\Program Files\IIS\Microsoft Web Deploy V3"\msdeploy.exe -verb:sync -source:backupManager -dest:backupManager="站点名称",useLatest=true,publishSettings="由服务端生成的配置文件绝对路径",password="对应用户的密码"

      tips

      -enableRule:DoNotDeleteRule参数指定不删除任何文件
      –whatif参数不真正执行,只查看实际操作摘要

Abp文件导出, 可以在分布式服务层导出(web.core层), 但是如果是列表数据导出, 若在web.core层写的话, 这样我们到导出功能与应用层分开了, 这样对于后期维护, 这会非常烦恼, 所以今天就把导出的功能写到应用层中

封装导出方法

  1. FileContentResult返回文件
    ExcelManager中定义GetFileResponse静态文件获取方法

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    public static FileContentResult GetFileResponse<T>(List<T> data, string strFileName, string sheetName)
    {
    try
    {
    FileContentResult fileContentResult = new FileContentResult(ListToDownExcel(data, sheetName).ToArray(), "application/vnd.ms-excel")
    {
    FileDownloadName = strFileName
    };
    return fileContentResult;

    }
    catch (Exception)
    {
    throw new UserFriendlyException("服务器繁忙,请重试!");
    }

    }

    其中ListToDownExcel的方法主要是获取文件流, 需要根据具体业务

  2. Application层中应用导出接口

    1
    2
    3
    4
    5
    6
    7
    8
    9
    public async Task<FileContentResult> ExportAsset(AssetSearchPageInput input)
    {
    //获取需要导出的数据
    var dataList = await _assetManager.GetLoadFilterData(input.Asset).ToListAsync();
    //转换为导出模型
    var datas = ObjectMapper.Map<List<AssetExportDto>>(dataList);
    //execl文件导出
    return ExcelManager.GetFileResponse(datas, "xxx.xls", "xxx");
    }

1. 通过DotNetCore.NPOI获取上传Execl的数据

为了封装一个通用的上传接口,所以返回值类型采用List<object>

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
/// <summary>
/// 将上传的execl文件转换为模型
/// </summary>
/// <param name="httpfile"></param>
/// <param name="isHasHead"></param>
/// <returns></returns>
public static List<object> ConvertExeclToListByHttpFile(HttpRequest httpfile, bool isHasHead = true)
{
var fileStream = httpfile.Form.Files.First();
var exName = Path.GetExtension(fileStream?.FileName);
var modelList = new List<object>();
IWorkbook workbook;
using (var fs = fileStream?.OpenReadStream())
{
workbook = CreateWorkbook(exName, fs);
if (workbook == null) return modelList;
}

var sheetConfig = workbook.GetSheet(ExcelModelConfig);
if (sheetConfig == null) return modelList;
var firstSheet = workbook.GetSheetAt(0);
if (firstSheet == null) return modelList;
var configRow = sheetConfig.GetRow(0);
var columnDictionary = GetColumnDictionary(configRow);
System.Collections.IEnumerator rows = firstSheet.GetRowEnumerator();
if (isHasHead)
{
rows.MoveNext();
}

while (rows.MoveNext())
{
var row = (IRow)rows.Current;
if (row == null) continue;
var model = SetValueToModelHasMsg(row, columnDictionary);

modelList.Add(model);
}

return modelList;
}

通过读取上传的Execl,Execl模板里需要配置一个模型配置的sheet,用于获取上传的字段,并且返回到前端.

通过上传的文件类型,获取execl版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
private static IWorkbook CreateWorkbook(string exName, Stream fs)
{
try
{
//2007
if (exName == ".xlsx")
{
return new XSSFWorkbook(fs);
}
// 2003版本
return new HSSFWorkbook(fs);
}
catch (Exception)
{
return null;
}
}

获取模型配置的字段名称

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
private static Dictionary<int, string> GetColumnDictionary(IRow row)
{
Dictionary<int, string> dic = new Dictionary<int, string>();
for (int i = 0; i < row.LastCellNum; i++)
{
ICell cell = row.GetCell(i);
if (cell == null)
{
continue;
}

dic.Add(i, cell.StringCellValue);
}

return dic;
}

根据模型配置,获取Execl中的一行数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
private static object SetValueToModelHasMsg(IRow row, Dictionary<int, string> columnDictionary)
{
dynamic d = new System.Dynamic.ExpandoObject();
var model = d as ICollection<KeyValuePair<string, object>>;
//创建属性,并赋值。
foreach (var dicConfig in columnDictionary)
{
try
{
if (string.IsNullOrEmpty(dicConfig.Value))
{
continue;
}

var cell = row.Cells.FirstOrDefault(a => a.ColumnIndex == dicConfig.Key);
if (cell != null)
{
string value = GetValueByCell(cell);
if (string.IsNullOrEmpty(value)) value = string.Empty;
var item = model.FirstOrDefault(a => a.Key == dicConfig.Value);
if (default(KeyValuePair<string, object>).Equals(item) == false)
{
//字段已经存在,覆盖
model.Remove(item);
}
model.Add(new KeyValuePair<string, object>(dicConfig.Value, value));
}
}
catch (Exception ex)
{
var errItem = model.FirstOrDefault(a => a.Key == "Msg");
if (default(KeyValuePair<string, object>).Equals(errItem))
{
model.Add(new KeyValuePair<string, object>("Msg", ex.Message));
}
}

}
var msgItem = model.FirstOrDefault(a => a.Key == "Msg");
if (default(KeyValuePair<string, object>).Equals(msgItem))
{
model.Add(new KeyValuePair<string, object>("Msg", "数据未校验!"));
}

return model;
}

1.1 在Web.Core分布式服务层添加导入控制器

1
2
3
4
5
6
7
8
9
10
11
[AbpMvcAuthorize]
[Route("api/[controller]/[action]")]
public class ExeclFileController : AbpController
{
[HttpPost]
public ActionResult<List<object>> ImportExeclToGetModel(IFormFile file)
{
return ExcelManager.ConvertExeclToListByHttpFile(Request);
}

}

由于在core3.1中添加AbpMvcAuthorize需要在Startup.cs注册

1
2
3
4
//启用校验
app.UseAuthentication();
//支持控制器[AbpMvcAuthorize]特性的权限验证
app.UseAuthorization();

2. 为swagger添加文件上传按钮

注: 目前swagger使用的版本为5.0的预览版
在swagger的AddSwaggerGen回调中,添加

1
options.MapType(typeof(IFormFile), () => new OpenApiSchema() { Type = "file" });

1
2
3
4
5
6
7
USE [master]
RESTORE DATABASE [新库] FROM DISK = N'D:\DBback\旧库备份.bak' WITH FILE = 1,
MOVE N'旧库' TO N'D:\软件\程序\Microsoft SQL Server\实例\MSSQL12.MSSQLSERVER\MSSQL\DATA\新库.mdf',
MOVE N'旧库_log' TO N'D:\软件\程序\Microsoft SQL Server\实例\MSSQL12.MSSQLSERVER\MSSQL\DATA\新库_log.ldf',
NOUNLOAD, REPLACE, STATS = 5

GO

注:
[SFM_CK(测试版)]:新数据库名
SFM_ECDS:备份文件中的mdf文件名
SFM_ECDS_log:备份文件中的ldf文件名

微软变更cookie的默认策略

正文

SameSite是IETF草案标准,旨在为跨站点请求伪造(CSRF)攻击提供某种保护.该标准草案最初于2016年起草,于2019年更新.该更新不兼容2016的标准.
变更的重点是未明确指定时默认行为由None变更为Lax,chrome80版本官方说法默认开启该行为,在windows服务器或者个人机更新补丁时,可能会更新到该行为

1
Set-Cookie: ASP.NET_SessionId=hlftr5atg4lmsnzyx5disqms; path=/; HttpOnly;

变更为

1
Set-Cookie: ASP.NET_SessionId=hlftr5atg4lmsnzyx5disqms; path=/; HttpOnly; SameSite=Lax

该行为在跨域名请求时,如果服务器端依赖cookie进行会话处理的话,那么就会丢失会话.
原因是Lax模式下post请求cookie跨域是未进行传递的.

解决方案

由于是丢失cookie导致丢失会话,那么无非三种方式处理.

  1. 替换会话媒介
    使用header中的Authorization来代替cookie,但是需要会话由原本的cookie换成header涉及到后端代码修改.
  2. 设置cookie的SameSite
    1
    2
    3
    <system.web>
    <sessionState mode="InProc" timeout="60"/>
    </system.web>
    增加cookieSameSite配置
    1
    2
    3
    <system.web>
    <sessionState mode="InProc" timeout="60" cookieSameSite="None"/>
    </system.web>
    但是由于windows更新的关系,这个配置有些服务器端不支持该配置,需要更新进行系统更新,具体配置处理参考官方文档.
  3. 关闭浏览器的SameSite策略
    chrome80默认是打开该变更的,关闭就可以了
    关闭配置

    参考资料

    微软变更说明文档
    导致该行为windows更新
    chrome的SameSite行为

目前Vagrant只支持VirtualBox versions 4.0.x, 4.1.x, 4.2.x, 4.3.x, 5.0.x, 5.1.x, and 5.2.x. 其它版本不支持,大家的virtualbox如果不是这几个版本,请卸载重新安装
vagrant安装地址

创建centos/7虚拟机

1
vagrant init centos/7
  1. 可添加以下脚本自动按照docker

    1
    2
    3
    4
    5
    6
    7
    8
    #按照docker容器的预处理命令
    config.vm.provision "shell", inline: <<-SHELL
    sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine
    sudo yum install -y yum-utils device-mapper-persistent-data lvm2
    sudo yum-config-manager -y --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    sudo yum install docker-ce docker-ce-cli containerd.io
    sudo systemctl start docker
    SHELL
  2. 安装centos/7虚拟机
    vagrant up

  3. 连接Vagrantfile定义的虚拟机
    vagrant ssh

  4. 查看虚拟机状态
    vagrant status

  5. 停止虚拟机
    vagrant halt

  6. 删除虚拟机
    vagrant destroy

  7. 将vagrant添加到docker组,避免报权限错误
    sudo groupadd docker

  8. 添加vagrant到docker服务
    sudo gpasswd -a vagrant docker

  9. 重启docker服务
    sudo service docker restart

  10. 退出重连

    1
    2
    exit 
    vagrant ssh

常见问题及处理方法

  1. 修复vagrant创建的虚拟机报这个错

    1
    bash: warning: setlocale: LC_CTYPE: cannot change locale (UTF-8): No such file or directory

    在虚拟机中打开环境文件 vim /etc/environment
    加入

    1
    2
    LC_ALL=en_US.UTF-8
    LANG=en_US.UTF-8

    或者 输入命令

    1
    sudo chmod 777 /etc/environment && sudo echo -e "LC_ALL=en_US.UTF-8 \nLANG=en_US.UTF-8" >> /etc/environment && sudo chmod 644 /etc/environment

    然后退出重新登录即可

  2. vagrant使用root用户登录
    可参照一下博客
    vagrant使用root用户登录
    注:
    在root用户修改sshd_config文件
    vim /etc/ssh/sshd_config
    修改 PermitRootLogin 值为 yes
    修改 PasswordAuthentication yes
    重启sshd服务
    systemctl restart sshd

  3. yum进程被占用
    可以通过强制关掉yum进程
    rm -f /var/run/yum.pid

1. 下载.Net Core安装包

.Net Core 3.1安装地址

  • .Net Core SDK 3.1用于代码开发
  • Hosting Bundle用于在IIS上发布

安装完然Bundle后, 需要重启iis服务

1
2
net stop was /y
net start w3svc

发布网站时,应用池标识要设置为LocalSystem

2. 发布注意项

发布时,若swagger出现500,大多数是xml没有生成

1
2
3
<PropertyGroup>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
</PropertyGroup>

需要在项目csproj文件的<PropertyGroup>标签中,添加:
1
<GenerateDocumentationFile>true</GenerateDocumentationFile>

2.1 在IIS上调试

修改web.config 中的aspNetCore的arguments为调试生成的路径

1
<aspNetCore processPath="dotnet" arguments="bin\Debug\netcoreapp2.2\MPACore.PhoneBook.Web.Mvc.dll" stdoutLogEnabled="true" stdoutLogFile=".\logs\stdout" forwardWindowsAuthToken="false">

2.2 处理 不支持RESTful的问题

1
2
3
4
5
6
7
8
9
<!--支持RESTful,PUT,DELET,POST,GET请求 begin-->
<modules runAllManagedModulesForAllRequests="true">
<remove name="WebDAVModule" />
</modules>
<handlers>
<remove name="WebDAV" />
<add name="aspNetCore" path="*" verb="*" modules="AspNetCoreModuleV2" resourceType="Unspecified" />
</handlers>
<!--支持RESTful,PUT,DELET,POST,GET请求 end-->

3. abp添加实体步骤

  1. 添加迁移文件
    1
    add-migration <迁移名称>
  2. 更新数据库
    1
    update-database
  3. 多个数据库上下文迁移
    需要指定迁移哪个数据库上下文 xxxDbContext
    add-migration "标识" -Context xxxDbContext
    update-database -Context xxxDbContext