查看 gpu 支持
# 查看 gpu 支持
nvidia-smi
Wed Nov 11 09:28:37 2020
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 418.67 Driver Version: 418.67 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla T4 Off | 00000000:21:01.0 Off | 0 |
| N/A 56C P0 28W / 70W | 0MiB / 15079MiB | 6% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| No running processes found |
+——————————————————————————————————————+
tensor cuda 版本对应关系
安装 bert 环境
# 安装 bert 环境
conda create --name leon python=3.6 pip tensorflow-gpu==1.11.0 numpy pandas
# 会 安装 cuda 9 和 cudNN 7
cudatoolkit: 9.2-0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
cudnn: 7.6.5-cuda9.2_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
tensorboard: 1.11.0-py36hf484d3e_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
tensorflow: 1.11.0-gpu_py36h9c9050a_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
tensorflow-base: 1.11.0-gpu_py36had579c0_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
tensorflow-gpu: 1.11.0-h0d30ee6_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
# in
source activate leon
# Note that the server MUST be running on Python >= 3.5 with Tensorflow >= 1.10 (one-point-ten)
pip install bert-serving-server # server
pip install bert-serving-client # client, independent of `bert-serving-server`
erver does not support Python 2!
pip install keras_preprocessing
pip install keras_applications
pip install h5py==2.8.0
pip install gas
pip install bert-base==0.9.0
启动 bert
pre-trained model 下载链接
chinese_L-12_H-768_A-12
# 启动 bert server
bert-base-serving-start -bert_model_dir /home/Ner/chinese_L-12_H-768_A-12 -model_pb_dir /home/Ner/model_pb_dir -model_dir /home/Ner/model_pb_dir -mode NER -num_worker=1
bert_model_dir = /home/Ner/chinese_L-12_H-768_A-12
ckpt_name = bert_model.ckpt
config_name = bert_config.json
cors = *
cpu = False
device_map = []
fp16 = False
gpu_memory_fraction = 0.5
http_max_connect = 10
http_port = None
mask_cls_sep = False
max_batch_size = 1024
max_seq_len = 128
mode = NER
model_dir = /home/Ner/model_pb_dir
model_pb_dir = /home/Ner/model_pb_dir
num_worker = 1
pooling_layer = [-2]
pooling_strategy = REDUCE_MEAN
port = 5555
port_out = 5556
prefetch_size = 10
priority_batch_size = 16
tuned_model_dir = None
verbose = False
xla = False
I:VENTILATOR:[__i:__i: 91]:lodding ner model, could take a while...
pb_file exits /home/Ner/model_pb_dir/ner_model.pb
I:VENTILATOR:[__i:__i:100]:optimized graph is stored at: /home/Ner/model_pb_dir/ner_model.pb
I:VENTILATOR:[__i:_ru:148]:bind all sockets
I:VENTILATOR:[__i:_ru:153]:open 8 ventilator-worker sockets, ipc://tmpUp04dl/socket,ipc://tmpG9e8EV/socket,ipc://tmpG51b6v/socket,ipc://tmpMLsgx6/socket,ipc://tmpKBelYG/socket,ipc://tmpExAqph/socket,ipc://tmp0zzwQR/socket,ipc://tmpIeRChs/socket
I:VENTILATOR:[__i:_ru:157]:start the sink
I:SINK:[__i:_ru:317]:ready
I:VENTILATOR:[__i:_ge:239]:get devices
I:VENTILATOR:[__i:_ge:271]:device map:
worker 0 -> gpu 0
I:WORKER-0:[__i:_ru:497]:use device gpu: 0, load graph from /home/Ner/model_pb_dir/ner_model.pb
# Use
from bert_serving.client import BertClient
bc = BertClient()
bc.encode(['First do it', 'then do it right', 'then do it better'])