# 配置编译环境
root@OCTEONTX:~# apt-get install libpopt0 libpopt-dev libnl-3-200 libnl-3-dev libnl-genl-3-dev libpcap-dev
root@OCTEONTX:~# tar xvf linux-custom.tgz
root@OCTEONTX:~# ln -s `pwd`/linux-custom /lib/modules/`uname -r`/build
# 编译DPDK
root@OCTEONTX:~# cd /var/dpvs/
root@OCTEONTX:/var/dpvs# tar xvf dpdk-19.11.0_raw.tar.bz2
root@OCTEONTX:/var/dpvs# cd dpdk-19.11.0
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# export TARGET="arm64-octeontx2-linux-gcc"
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# export RTE_SDK=`pwd`
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# export RTE_TARGET="build"
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# export PATH="${PATH}:$RTE_SDK/usertools"
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# make config T=arm64-octeontx2-linux-gcc
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# sed -i 's/CONFIG_RTE_LIBRTE_PMD_PCAP=n/CONFIG_RTE_LIBRTE_PMD_PCAP=y/g' $RTE_SDK/build/.config
root@OCTEONTX:/var/dpvs/dpdk-19.11.0# make -j
# 编译DPVS
root@OCTEONTX:~# cd /var/dpvs/
root@OCTEONTX:/var/dpvs# tar xvf dpvs.tar
root@OCTEONTX:/var/dpvs# cd dpvs/
root@OCTEONTX:/var/dpvs/dpvs# patch -p1 < dpvs_5346e4c645c_with_dpdk.patch
root@OCTEONTX:/var/dpvs/dpvs# make -j
root@OCTEONTX:/var/dpvs/dpvs# make install
# 加载内核模块、设置大页内存、为指定端口绑定DPDK驱动
root@OCTEONTX:~# cd /var/dpvs
root@OCTEONTX:/var/dpvs# insmod /var/dpvs/dpdk-19.11.0/build/build/kernel/linux/kni/rte_kni.ko carrier=on
root@OCTEONTX:/var/dpvs# echo 128 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
root@OCTEONTX:/var/dpvs# mount -t hugetlbfs nodev /mnt/huge -o pagesize=2M
root@OCTEONTX:/var/dpvs# dpdk-devbind.py -b vfio-pci 0002:02:00.0
root@OCTEONTX:/var/dpvs# dpdk-devbind.py -b vfio-pci 0002:07:00.0
root@OCTEONTX:/var/dpvs# dpdk-devbind.py -s
Network devices using DPDK-compatible driver
============================================
0002:02:00.0 'Device a063' drv=vfio-pci unused=
0002:07:00.0 'Device a063' drv=vfio-pci unused=
Network devices using kernel driver
===================================
0000:01:10.0 'Device a059' if= drv=octeontx2-cgx unused=vfio-pci
0000:01:10.1 'Device a059' if= drv=octeontx2-cgx unused=vfio-pci
0000:01:10.2 'Device a059' if= drv=octeontx2-cgx unused=vfio-pci
......
root@OCTEONTX:/var/dpvs#
3.2.3 在DPU扣卡上配置负载均衡服务
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpvs -- -w 0002:02:00.0 -w 0002:07:00.0
root@OCTEONTX:/var/dpvs#
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpip link set dpdk0 link up
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpip link set dpdk1 link up
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpip addr add 10.0.0.10/32 dev dpdk0 sapool
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpip addr add 200.0.0.200/32 dev dpdk1
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpip route add 10.0.0.0/24 dev dpdk0
root@OCTEONTX:/var/dpvs# ./dpvs/bin/dpip route add 200.0.0.0/24 dev dpdk1
root@OCTEONTX:/var/dpvs#
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm -A -t 200.0.0.200:80 -s rr
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm -a -t 200.0.0.200:80 -r 10.0.0.11 -b
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm -a -t 200.0.0.200:80 -r 10.0.0.12 -b
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm -a -t 200.0.0.200:80 -r 10.0.0.13 -b
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm --add-laddr -z 10.0.0.10 -t 200.0.0.200:80 -F dpdk0
root@OCTEONTX:/var/dpvs#
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm -G
VIP:VPORT TOTAL SNAT_IP CONFLICTS CONNS
200.0.0.200:80 1
10.0.0.10 0 0
root@OCTEONTX:/var/dpvs# ./dpvs/bin/ipvsadm -ln
IP Virtual Server version 0.0.0 (size=0)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 200.0.0.200:80 rr
-> 10.0.0.11:80 FullNat 1 0 0
-> 10.0.0.12:80 FullNat 1 0 0
-> 10.0.0.13:80 FullNat 1 0 0
root@OCTEONTX:/var/dpvs#
3.2.4 配置3台Real Server的网络和Web服务
# Real Server 01
[root@node-01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether b8:59:9f:42:36:69 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.11/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ba59:9fff:fe42:3669/64 scope link
valid_lft forever preferred_lft forever
[root@node-01 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.0.0.10 0.0.0.0 UG 0 0 0 eth0
10.0.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
[root@node-01 ~]# cat index.html
Real Server 01
[root@node-01 ~]# python -m SimpleHTTPServer 80
Serving HTTP on 0.0.0.0 port 80 ...
10.0.0.10 - - [23/Dec/2022 02:57:18] "GET / HTTP/1.1" 200 –
# Real Server 02
[root@node-02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 68:91:d0:64:02:f1 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.12/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::6a91:d0ff:fe64:2f1/64 scope link
valid_lft forever preferred_lft forever
[root@node-02 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.0.0.10 0.0.0.0 UG 0 0 0 eth0
10.0.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
[root@node-02 ~]# python -m SimpleHTTPServer 80
Serving HTTP on 0.0.0.0 port 80 ...
10.0.0.10 - - [23/Dec/2022 08:16:40] "GET / HTTP/1.1" 200 –
# Real Server 03
[root@node-03 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ac state UP group default qlen 1000
link/ether b8:59:9f:c7:73:cb brd ff:ff:ff:ff:ff:ff
inet6 fe80::ba59:9fff:fec7:73cb/64 scope link
valid_lft forever preferred_lft forever
[root@node-03 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.0.0.10 0.0.0.0 UG 0 0 0 eth1
10.0.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1
[root@node-03 ~]# python -m SimpleHTTPServer 80
Serving HTTP on 0.0.0.0 port 80 ...
10.0.0.10 - - [23/Dec/2022 08:16:39] "GET / HTTP/1.1" 200 -
3.2.5 配置Client的网络
[root@node-00 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether b8:59:9f:42:36:68 brd ff:ff:ff:ff:ff:ff
inet 200.0.0.48/24 brd 200.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ba59:9fff:fe42:3668/64 scope link
valid_lft forever preferred_lft forever
[root@node-00 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 200.0.0.1 0.0.0.0 UG 0 0 0 eth0
200.0.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
[root@node-00 ~]#
3.3 卸载结果验证
# 在Client端,使用curl访问http://<VIP>来验证DPVS的负载均衡效果
[root@node-00 ~]# curl http://200.0.0.200
Real Server 01
[root@node-00 ~]# curl http://200.0.0.200
Real Server 02
[root@node-00 ~]# curl http://200.0.0.200
Real Server 03
# Make sure you have git-lfs installed (https://git-lfs.com)
git lfs install
git clone https://hf-mirror.com/Qwen/Qwen1.5-0.5B-Chat
# If you want to clone without large files - just their pointers
GIT_LFS_SKIP_SMUDGE=1 git clone https://hf-mirror.com/Qwen/Qwen1.5-0.5B-Chat
[root@server3 LLaMA-Factory-0.8.3]# llamafactory-cli webui
[2024-09-23 17:54:45,786] [INFO] [real_accelerator.py:203:get_accelerator] Setting ds_accelerator to cuda (auto detect)
Running on local URL: http://0.0.0.0:7861
To create a public link, set `share=True` in `launch()`.
[root@server3 AIGC]# cd LLaMA-Factory-0.8.3/asterun/
[root@server3 asterun]# ll
总用量 4
-rw-r--r-- 1 root root 817 9月 19 09:33 qwen_full_sft_ds2.yaml
drwxr-xr-x 3 root root 18 9月 13 10:28 saves
创建模型的Modelfile文件
[root@server3 asterun]# touch qwen_full_sft_ds2.ollama.Modelfile
[root@server3 asterun]# vim qwen_full_sft_ds2.ollama.Modelfile
[root@server3 asterun]# cat qwen_full_sft_ds2.ollama.Modelfile
FROM /home/lichao/AIGC/LLaMA-Factory-0.8.3/asterun/saves/qwen/full/sft/qwen-sft-620M-F16.gguf
[root@server3 asterun]# cd ../..
使用Modelfile注册模型
[root@server3 AIGC]# ollama create qwen-full-sft -f ./LLaMA-Factory-0.8.3/asterun/qwen_full_sft_ds2.ollama.Modelfile
transferring model data 100%
using existing layer sha256:19d794be57081c1a5aa7e03c4045a0fdc5b8a40f080f0c550ab38033cf0d5d58
creating new layer sha256:c33681b055686143e7d6e0bb0f1054c9910c05c3f4ab16932fbc567a8961929a
writing manifest
success
[root@server3 AIGC]#
推理测试
使用注册好的模型运行推理服务
[root@server3 AIGC]# ollama run qwen-full-sft
>>> who are you?
<|im_end|>
我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以帮助用户查询信息、安排日程、提供建议等。
>>> can you speak english?
I am an AI assistant developed by {{author}}.
>>> 好吧,用中文交流吧。
没问题。
>>> 你喜欢中国哪个城市?
每个城市都有其独特的魅力,各具特色,比如:
成都:美食之都,生活悠闲。
北京:历史悠久,文化丰富。
杭州:风景优美,以西湖闻名。
上海:现代化大都市,经济繁荣。
>>> 感谢,再见
好的,我是个人工智能助手,很高兴见到您。
>>> exit
[root@server3 AIGC]#
使用如下参数单机运行nccl-test测试任务,会提示“No OpenFabrics connection schemes reported that they were able to be used on a specific port. As such, the openib BTL (OpenFabrics support) will be disabled for this port.”,测试任务能够正常进行下去,暂不清楚会有什么影响。
[root@server3 ~]# /home/lichao/opt/openmpi/bin/mpirun --allow-run-as-root -np 1 /home/lichao/AIGC/nccl-tests/build/all_reduce_perf -b 512 -e 8G -f 2 -g 1
--------------------------------------------------------------------------
No OpenFabrics connection schemes reported that they were able to be
used on a specific port. As such, the openib BTL (OpenFabrics
support) will be disabled for this port.
Local host: server3
Local device: mlx5_0
Local port: 1
CPCs attempted: rdmacm, udcm
--------------------------------------------------------------------------
# nThread 1 nGpus 1 minBytes 512 maxBytes 8589934592 step: 2(factor) warmup iters: 5 iters: 20 agg iters: 1 validation: 1 graph: 0
#
# Using devices
# Rank 0 Group 0 Pid 8080 on server3 device 0 [0x02] NVIDIA GeForce RTX 4060 Ti
#
# Reducing maxBytes to 5261099008 due to memory limitation
#
# out-of-place in-place
# size count type redop root time algbw busbw #wrong time algbw busbw #wrong
# (B) (elements) (us) (GB/s) (GB/s) (us) (GB/s) (GB/s)
512 128 float sum -1 3.77 0.14 0.00 0 0.34 1.50 0.00 0
1024 256 float sum -1 3.96 0.26 0.00 0 0.34 3.04 0.00 0
2048 512 float sum -1 3.63 0.56 0.00 0 0.34 6.03 0.00 0
4096 1024 float sum -1 3.63 1.13 0.00 0 0.34 12.06 0.00 0
8192 2048 float sum -1 3.65 2.25 0.00 0 0.34 24.17 0.00 0
16384 4096 float sum -1 3.63 4.51 0.00 0 0.34 48.23 0.00 0
32768 8192 float sum -1 3.61 9.08 0.00 0 0.34 97.21 0.00 0
65536 16384 float sum -1 3.60 18.18 0.00 0 0.34 193.52 0.00 0
131072 32768 float sum -1 3.67 35.72 0.00 0 0.34 389.86 0.00 0
262144 65536 float sum -1 3.66 71.54 0.00 0 0.35 757.97 0.00 0
524288 131072 float sum -1 4.38 119.60 0.00 0 0.34 1542.25 0.00 0
1048576 262144 float sum -1 6.66 157.41 0.00 0 0.33 3164.08 0.00 0
2097152 524288 float sum -1 15.73 133.29 0.00 0 0.34 6233.18 0.00 0
4194304 1048576 float sum -1 31.38 133.66 0.00 0 0.34 12457.10 0.00 0
8388608 2097152 float sum -1 65.34 128.37 0.00 0 0.34 24467.28 0.00 0
16777216 4194304 float sum -1 132.4 126.70 0.00 0 0.34 49156.80 0.00 0
33554432 8388608 float sum -1 275.5 121.81 0.00 0 0.34 99258.78 0.00 0
67108864 16777216 float sum -1 549.5 122.13 0.00 0 0.34 199728.76 0.00 0
134217728 33554432 float sum -1 1101.8 121.81 0.00 0 0.34 398863.98 0.00 0
268435456 67108864 float sum -1 2203.6 121.81 0.00 0 0.34 785128.56 0.00 0
536870912 134217728 float sum -1 4414.9 121.60 0.00 0 0.34 1567735.18 0.00 0
1073741824 268435456 float sum -1 8819.1 121.75 0.00 0 0.34 3121342.51 0.00 0
2147483648 536870912 float sum -1 17639 121.75 0.00 0 0.35 6218281.88 0.00 0
4294967296 1073741824 float sum -1 35280 121.74 0.00 0 0.30 14144466.64 0.00 0
# Out of bounds values : 0 OK
# Avg bus bandwidth : 0
#
[server3:08076] 1 more process has sent help message help-mpi-btl-openib-cpc-base.txt / no cpcs for port
[server3:08076] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
[root@server3 ~]#
[root@server1 lichao]# ./run_nccl-test.sh
--------------------------------------------------------------------------
No OpenFabrics connection schemes reported that they were able to be
used on a specific port. As such, the openib BTL (OpenFabrics
support) will be disabled for this port.
Local host: server1
Local device: mlx5_1
Local port: 1
CPCs attempted: rdmacm, udcm
--------------------------------------------------------------------------
[1716789553.453110] [server1:7255 :0] sock.c:325 UCX ERROR connect(fd=54, dest_addr=200.200.0.2:49112) failed: No route to host
[root@server3 ~]# ibdev2netdev
mlx5_0 port 1 ==> ens11f0 (Up)
mlx5_1 port 1 ==> ens11f1 (Up)
[root@server3 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether ac:1f:6b:dd:1b:f2 brd ff:ff:ff:ff:ff:ff
inet 10.230.1.13/24 brd 10.230.1.255 scope global eno1
valid_lft forever preferred_lft forever
inet6 fe80::ae1f:6bff:fedd:1bf2/64 scope link
valid_lft forever preferred_lft forever
3: eno2: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether ac:1f:6b:dd:1b:f3 brd ff:ff:ff:ff:ff:ff
6: ens11f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether b8:59:9f:3b:57:b6 brd ff:ff:ff:ff:ff:ff
inet 200.200.0.2/30 brd 200.200.0.3 scope global ens11f0
valid_lft forever preferred_lft forever
inet6 fe80::ba59:9fff:fe3b:57b6/64 scope link
valid_lft forever preferred_lft forever
7: ens11f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether b8:59:9f:3b:57:b7 brd ff:ff:ff:ff:ff:ff
inet 172.16.0.13/24 brd 172.16.0.255 scope global ens11f1
valid_lft forever preferred_lft forever
inet6 fe80::ba59:9fff:fe3b:57b7/64 scope link
valid_lft forever preferred_lft forever
[root@server3 ~]#
问题3:
提示“NET/Plugin: No plugin found (libnccl-net.so)”。
server1:41185:41185 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to ens11f1
server1:41185:41185 [0] NCCL INFO Bootstrap : Using ens11f1:172.16.0.11<0>
server1:41185:41185 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so)
server1:41185:41185 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so
server1:41185:41185 [0] NCCL INFO NET/Plugin: Using internal network plugin.
server1:41185:41185 [0] NCCL INFO cudaDriverVersion 12040
NCCL version 2.21.5+cuda12.4
[root@server1 dhcp]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: slow
Min links: 0
Aggregator selection policy (ad_select): stable
System priority: 65535
System MAC address: 0c:0a:0e:54:00:01
Active Aggregator Info:
Aggregator ID: 3
Number of ports: 2
Actor Key: 9
Partner Key: 0
Partner Mac Address: 52:54:00:12:34:56
Slave Interface: eth1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 0c:0a:0e:54:00:01
Slave queue ID: 0
Aggregator ID: 3
Actor Churn State: none
Partner Churn State: none
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0c:0a:0e:54:00:01
port key: 9
port priority: 255
port number: 1
port state: 61
details partner lacp pdu:
system priority: 65535
system mac address: 52:54:00:12:34:56
oper key: 0
port priority: 255
port number: 2
port state: 63
Slave Interface: eth2
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 0c:0a:0e:54:00:02
Slave queue ID: 0
Aggregator ID: 3
Actor Churn State: none
Partner Churn State: none
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0c:0a:0e:54:00:01
port key: 9
port priority: 255
port number: 2
port state: 61
details partner lacp pdu:
system priority: 65535
system mac address: 52:54:00:12:34:56
oper key: 0
port priority: 255
port number: 2
port state: 63
[root@server1 dhcp]#
[root@server1 dhcp]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:0a:0e:54:00:00 brd ff:ff:ff:ff:ff:ff
inet 10.240.3.121/24 brd 10.240.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::e0a:eff:fe54:0/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master bond0 state UP group default qlen 1000
link/ether 0c:0a:0e:54:00:01 brd ff:ff:ff:ff:ff:ff
4: eth2: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master bond0 state UP group default qlen 1000
link/ether 0c:0a:0e:54:00:01 brd ff:ff:ff:ff:ff:ff
5: eth3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 0c:0a:0e:54:00:03 brd ff:ff:ff:ff:ff:ff
6: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 0c:0a:0e:54:00:01 brd ff:ff:ff:ff:ff:ff
inet 172.16.10.1/24 brd 172.16.10.255 scope global bond0
valid_lft forever preferred_lft forever
inet6 fe80::e0a:eff:fe54:1/64 scope link
valid_lft forever preferred_lft forever
[root@server1 dhcp]# cat dhcpd.conf
#
# DHCP Server Configuration file.
# see /usr/share/doc/dhcp*/dhcpd.conf.example
# see dhcpd.conf(5) man page
#
subnet 172.16.10.0 netmask 255.255.255.0 {
range 172.16.10.100 172.16.10.200;
#option routers 172.16.10.254;
#option domain-name-servers 223.5.5.5;
}
[root@server1 dhcp]#
在Leaf1和2上完成MC-LAG配置,并确认状态正常
leaf1# show mclag state
The MCLAG's keepalive is: OK
MCLAG info sync is: completed
Domain id: 1
MCLAG session Channel: Primary channel
VRF Name: default
consistency Check Action: idle
Local Ip: 12.12.12.1
Peer Ip: 12.12.12.2
Dad Local Ip:
Dad Peer Ip:
Peer Link Interface: lag 99
Keepalive time: 1
Dad Detection Delay: 15
Dad Recovery Delay Mlag Intf: 60
Dad Recovery Delay Non Mlag Intf: 0
Dad VRF Name: default
Dad Status: disable
session Timeout : 15
Peer Link Mac: 52:54:00:12:34:56
Admin Role: None
Role: Active
MCLAG Interface: lag 2,lag 1
Loglevel: NOTICE
leaf1# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Dw) 0/2 (D) N/A
0099 lag 99 LACP(A)(Up) 0/9 (S) N/A
0/8 (S)
leaf1#
leaf2# show mclag state
The MCLAG's keepalive is: OK
MCLAG info sync is: completed
Domain id: 1
MCLAG session Channel: Primary channel
VRF Name: default
consistency Check Action: idle
Local Ip: 12.12.12.2
Peer Ip: 12.12.12.1
Dad Local Ip:
Dad Peer Ip:
Peer Link Interface: lag 99
Keepalive time: 1
Dad Detection Delay: 15
Dad Recovery Delay Mlag Intf: 60
Dad Recovery Delay Non Mlag Intf: 0
Dad VRF Name: default
Dad Status: disable
session Timeout : 15
Peer Link Mac: 52:54:00:12:34:57
Admin Role: None
Role: Standby
MCLAG Interface: lag 2,lag 1
Loglevel: NOTICE
leaf2# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Dw) 0/2 (D) N/A
0099 lag 99 LACP(A)(Up) 0/9 (S) N/A
0/8 (S)
leaf2#
在Centos76-2的两个业务口上,通过DHCP无法获取IP地址
[root@server2 ~]# ifup eth1
正在确定 eth1 的 IP 信息... 完成。
[root@server2 ~]# ifup eth2
正在确定 eth2 的 IP 信息... 完成。
[root@server2 network-scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:00 brd ff:ff:ff:ff:ff:ff
inet 10.240.3.122/24 brd 10.240.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ea8:80ff:fe2f:0/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
inet6 fe80::ea8:80ff:fe2f:1/64 scope link
valid_lft forever preferred_lft forever
4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:02 brd ff:ff:ff:ff:ff:ff
inet6 fe80::ea8:80ff:fe2f:2/64 scope link
valid_lft forever preferred_lft forever
5: eth3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:03 brd ff:ff:ff:ff:ff:ff
[root@server2 network-scripts]#
leaf1# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Dw) 0/2 (D) N/A
0099 lag 99 LACP(A)(Up) 0/8 (S) N/A
0/9 (S)
启用Fallback:
leaf1# configure terminal
leaf1(config)# interface link-aggregation 2
leaf1(config-lagif-2)# show this
!
interface link-aggregation 2
lacp fallback
lacp fast-rate
commit
switchport access vlan 512
leaf1(config-lagif-2)# end
leaf1# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Up) 0/2 (S) N/A
0099 lag 99 LACP(A)(Up) 0/9 (S) N/A
0/8 (S)
leaf1#
leaf2# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Dw) 0/2 (D) N/A
0099 lag 99 LACP(A)(Up) 0/8 (S) N/A
0/9 (S)
启用Fallback:
leaf2# configure terminal
leaf2(config)# interface link-aggregation 2
leaf2(config-lagif-2)# show this
!
interface link-aggregation 2
lacp fallback
lacp fast-rate
commit
switchport access vlan 512
leaf2(config-lagif-2)# end
leaf2# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Dw) 0/2 (D) N/A
0099 lag 99 LACP(A)(Up) 0/8 (S) N/A
0/9 (S)
leaf2#
在Centos76-2的两个业务口上,其中一个口能通过DHCP获取到IP地址
[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:00 brd ff:ff:ff:ff:ff:ff
inet 10.240.3.122/24 brd 10.240.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ea8:80ff:fe2f:0/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
4: eth2: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:02 brd ff:ff:ff:ff:ff:ff
5: eth3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:03 brd ff:ff:ff:ff:ff:ff
[root@server2 ~]# ifup eth1
正在确定 eth1 的 IP 信息... 完成。
[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:00 brd ff:ff:ff:ff:ff:ff
inet 10.240.3.122/24 brd 10.240.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ea8:80ff:fe2f:0/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
inet 172.16.10.100/24 brd 172.16.10.255 scope global dynamic eth1
valid_lft 43197sec preferred_lft 43197sec
inet6 fe80::ea8:80ff:fe2f:1/64 scope link
valid_lft forever preferred_lft forever
4: eth2: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:02 brd ff:ff:ff:ff:ff:ff
5: eth3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:03 brd ff:ff:ff:ff:ff:ff
[root@server2 ~]# ifup eth2
正在确定 eth2 的 IP 信息... 完成。
[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:00 brd ff:ff:ff:ff:ff:ff
inet 10.240.3.122/24 brd 10.240.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ea8:80ff:fe2f:0/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
inet 172.16.10.100/24 brd 172.16.10.255 scope global dynamic eth1
valid_lft 42370sec preferred_lft 42370sec
inet6 fe80::ea8:80ff:fe2f:1/64 scope link
valid_lft forever preferred_lft forever
4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:02 brd ff:ff:ff:ff:ff:ff
inet6 fe80::ea8:80ff:fe2f:2/64 scope link
valid_lft forever preferred_lft forever
5: eth3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:03 brd ff:ff:ff:ff:ff:ff
[root@server2 ~]#
在DHCP Server上能看到租约信息
[root@server1 dhcp]# cat /var/lib/dhcpd/dhcpd.leases
# The format of this file is documented in the dhcpd.leases(5) manual page.
# This lease file was written by isc-dhcp-4.2.5
server-duid "\000\001\000\001.,\333g\014\012\016T\000\001";
lease 172.16.10.100 {
starts 5 2024/07/19 08:08:19;
ends 5 2024/07/19 20:08:19;
cltt 5 2024/07/19 08:08:19;
binding state active;
next binding state free;
rewind binding state free;
hardware ethernet 0c:a8:80:2f:00:01;
client-hostname "server2";
}
[root@server1 dhcp]# systemctl status dhcpd
● dhcpd.service - DHCPv4 Server Daemon
Loaded: loaded (/usr/lib/systemd/system/dhcpd.service; disabled; vendor preset: disabled)
Active: active (running) since 五 2024-07-19 08:11:09 UTC; 1h 13min ago
Docs: man:dhcpd(8)
man:dhcpd.conf(5)
Main PID: 4036 (dhcpd)
Status: "Dispatching packets..."
CGroup: /system.slice/dhcpd.service
└─4036 /usr/sbin/dhcpd -f -cf /etc/dhcp/dhcpd.conf -user dhcpd -group dhcpd --no-pid
7月 19 08:11:09 server1 dhcpd[4036]:
7月 19 08:11:09 server1 dhcpd[4036]: No subnet declaration for eth0 (10.240.3.121).
7月 19 08:11:09 server1 dhcpd[4036]: ** Ignoring requests on eth0. If this is not what
7月 19 08:11:09 server1 dhcpd[4036]: you want, please write a subnet declaration
7月 19 08:11:09 server1 dhcpd[4036]: in your dhcpd.conf file for the network segment
7月 19 08:11:09 server1 dhcpd[4036]: to which interface eth0 is attached. **
7月 19 08:11:09 server1 dhcpd[4036]:
7月 19 08:11:09 server1 dhcpd[4036]: Sending on Socket/fallback/fallback-net
7月 19 08:11:58 server1 dhcpd[4036]: DHCPREQUEST for 172.16.10.100 from 0c:a8:80:2f:00:01 (server2) via bond0
7月 19 08:11:58 server1 dhcpd[4036]: DHCPACK on 172.16.10.100 to 0c:a8:80:2f:00:01 (server2) via bond0
[root@server1 dhcp]#
[root@server2 network-scripts]# ifup bond0
[root@server2 network-scripts]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: slow
Min links: 0
Aggregator selection policy (ad_select): stable
System priority: 65535
System MAC address: 0c:a8:80:2f:00:01
Active Aggregator Info:
Aggregator ID: 2
Number of ports: 2
Actor Key: 9
Partner Key: 0
Partner Mac Address: 52:54:00:12:34:56
Slave Interface: eth2
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 0c:a8:80:2f:00:02
Slave queue ID: 0
Aggregator ID: 2
Actor Churn State: none
Partner Churn State: none
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0c:a8:80:2f:00:01
port key: 9
port priority: 255
port number: 2
port state: 61
details partner lacp pdu:
system priority: 65535
system mac address: 52:54:00:12:34:56
oper key: 0
port priority: 255
port number: 3
port state: 63
Slave Interface: eth1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 0c:a8:80:2f:00:01
Slave queue ID: 0
Aggregator ID: 2
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0c:a8:80:2f:00:01
port key: 9
port priority: 255
port number: 3
port state: 61
details partner lacp pdu:
system priority: 65535
system mac address: 52:54:00:12:34:56
oper key: 0
port priority: 255
port number: 3
port state: 63
[root@server2 network-scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:00 brd ff:ff:ff:ff:ff:ff
inet 10.240.3.122/24 brd 10.240.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::ea8:80ff:fe2f:0/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master bond0 state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
4: eth2: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master bond0 state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
5: eth3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 0c:a8:80:2f:00:03 brd ff:ff:ff:ff:ff:ff
6: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 0c:a8:80:2f:00:01 brd ff:ff:ff:ff:ff:ff
inet 172.16.10.101/24 brd 172.16.10.255 scope global bond0
valid_lft forever preferred_lft forever
inet6 fe80::ea8:80ff:fe2f:1/64 scope link
valid_lft forever preferred_lft forever
[root@server2 network-scripts]# ping 172.16.10.1 -c 4
PING 172.16.10.1 (172.16.10.1) 56(84) bytes of data.
64 bytes from 172.16.10.1: icmp_seq=1 ttl=64 time=5.38 ms
64 bytes from 172.16.10.1: icmp_seq=2 ttl=64 time=3.29 ms
64 bytes from 172.16.10.1: icmp_seq=3 ttl=64 time=3.97 ms
64 bytes from 172.16.10.1: icmp_seq=4 ttl=64 time=3.11 ms
--- 172.16.10.1 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3005ms
rtt min/avg/max/mdev = 3.115/3.943/5.389/0.895 ms
[root@server2 network-scripts]#
leaf1# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Up) 0/2 (S) N/A
0099 lag 99 LACP(A)(Up) 0/9 (S) N/A
0/8 (S)
leaf1#
leaf2# show link-aggregation summary
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports Description
----- --------------- ----------- ------------- -------------
0001 lag 1 LACP(A)(Up) 0/1 (S) N/A
0002 lag 2 LACP(A)(Up) 0/2 (S) N/A
0099 lag 99 LACP(A)(Up) 0/8 (S) N/A
0/9 (S)
leaf2#
[root@master1 ~]# kubeadm config images list
I0608 09:54:34.987170 2894 version.go:254] remote version is much newer: v1.24.1; falling back to: stable-1.21
registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.13
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.13
registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.13
registry.aliyuncs.com/google_containers/kube-proxy:v1.21.13
registry.aliyuncs.com/google_containers/pause:3.4.1
registry.aliyuncs.com/google_containers/etcd:3.4.13-0
registry.aliyuncs.com/google_containers/coredns/coredns:v1.8.0
[root@master1 ~]#
生成脚本。
kubeadm config images list >> image.list
编辑脚本。
vi image.list
#!/bin/bash
img_list='registry.aliyuncs.com/google_containers/kube-apiserver:v1.21.3
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.21.3
registry.aliyuncs.com/google_containers/kube-scheduler:v1.21.3
registry.aliyuncs.com/google_containers/kube-proxy:v1.21.3
registry.aliyuncs.com/google_containers/pause:3.4.1
registry.aliyuncs.com/google_containers/etcd:3.4.13-0
registry.aliyuncs.com/google_containers/coredns/coredns:v1.8.0'
for img in ${img_list}
do
docker pull $img
done
[root@Server1 Build_WRF]# tar xzvf zlib-1.2.7.tar.gz
[root@Server1 Build_WRF]# cd zlib-1.2.7
[root@Server1 zlib-1.2.7]# ./configure --prefix=$DIR/grib2
[root@Server1 zlib-1.2.7]# make
[root@Server1 zlib-1.2.7]# make install
4.2.4 编译安装libpng
[root@Server1 Build_WRF]# tar xzvf libpng-1.2.50.tar.gz
[root@Server1 Build_WRF]# cd libpng-1.2.50
[root@Server1 libpng-1.2.50]# ./configure --prefix=$DIR/grib2
[root@Server1 libpng-1.2.50]# make
[root@Server1 libpng-1.2.50]# make install
4.2.5 编译安装mpich
[root@Server1 Build_WRF]# tar xzvf mpich-3.0.4.tar.gz
[root@Server1 Build_WRF]# cd mpich-3.0.4
[root@Server1 mpich-3.0.4]# ./configure --prefix=$DIR/mpich
[root@Server1 mpich-3.0.4]# make
[root@Server1 mpich-3.0.4]# make install
4.2.6 编译安装jasper
[root@Server1 Build_WRF]# tar xzvf jasper-1.900.1.tar.gz
[root@Server1 Build_WRF]# cd jasper-1.900.1
[root@Server1 jasper-1.900.1]# ./configure --prefix=$DIR/grib2
[root@Server1 jasper-1.900.1]# make
[root@Server1 jasper-1.900.1]# make install
4.2.7 编译安装netcdf
[root@Server1 Build_WRF]# tar xzvf netcdf-4.1.3.tar.gz
[root@Server1 Build_WRF]# cd netcdf-4.1.3
[root@Server1 netcdf-4.1.3]# ./configure --prefix=$DIR/netcdf \
--disable-dap --disable-netcdf-4 --disable-shared
[root@Server1 netcdf-4.1.3]# make
[root@Server1 netcdf-4.1.3]# make install