### **登录**
sudo -i password
### **进入目录**
cd /root/AI
### **查看文件夹**
ls -l
### **文件提权**
chmod +x xxx.sh
### **安装**
./xxx.sh
### **或者**
bash xxx.sh
### **添加到环境变量**
**source ~/.bashrc**
### **安装conda包**
```
cd conda-packages/
conda install --use-local *
```
### **安装pip包**
```
cd /root/AI/pip-packages
pip install --no-index --find-links=. *
```
### **查看python版本**
python --version
### ///**可能会有**
```
sudo apt-get install -y libsm6
sudo apt-get install -y libxrender1
sudo apt-get install -y libxext-dev
```
### ///**验证**
**python**
```
import tensorflow as tf
print(tf.constant('xxx').numpy().decode())
import cv2
import numpy as np
print(cv2.resize(np.zeros((6,6)),(3,3)))
```
### **jupyter notebook**
```
ifconfig
jupyter notebook --ip xxx.xxx.xxx.xx --allow-root
```
*****
### **//导入**
from tensorflow import keras
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint
from tensorflow.keras.layers import Input,Dense
from tensorflow.keras.models import Model
from PIL import Image #从Image库里加载PIL
import glob
import cv2
import numpy as np
import random
import os
import sys
### **//图片集划分**
```
width=32
height=32
channel=3
train_ratio=0.8 #0.8<=train_ratio<=0.9
crop_fix_size=(30,30)
crop_ratio=0.5 #0.5<=crop_ratio<=0.8
```
### **//图片处理**
#### **剪裁**
x=random.randint(0,img.shape[0]-crop_fix_size[0]-1)
y=random.randint(0,img.shape[1]-crop_fix_size[1]-1)
img=img[x:x+crop_fix_size[0],y:y+crop_fix_size[1],:]
#### **缩放**
cv2.resize(img,dsize=size,interpolation=cv2.INTER_AREA)
#### **翻转**
cv2.flip(img1,1,dst=None) #水平
cv2.flip(img1,0,dst=None) #垂直
cv2.flip(img1,-1,dst=None) #镜像(对角)
cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #灰度
#### **过滤**
import os
path=r'./path/'+filename+'.jpg'
s=round(os.path.getsize(path)/float(1024),2)
if s<10.0:
os.remove(path) 删除该文件,如果小于10kb
### **//数据集处理**
#### **数据集配置**
lr=0.03 #0.03<=lr<=0.1
batch=10 #1<=batch<=10
epoch=40 #10<=epoch<=40
patienceEpoch=5
size=width,height
#### **数据集划分**
files,labels,clazz=CountFiles(r"flower_photos")
c=list(zip(files,labels))
random.shuffle(c)
files,labels=zip(*c)
labels=np.array(labels)
labels=keras.utils.to_categorical(labels,clazz)
train_num=int(train_ratio*len(files))
train_x,train_y=files[:train_num],labels[:train_num]
test_x,test_y=files[train_num:],labels[train_num:]
### **//模型配置**
model_vgg16_conv=VGG16(weights=None,include_top=False,pooling='avg')
input=Input(shape=(width,height,channel),name='image_input')
output_vgg16_conv=model_vgg16_conv(input)
x=output_vgg16_conv
x=Dense(clazz,activation='softmax',name='predictions')(x)
model=Model(inputs=input,outputs=x)
### **//模型编译**
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(lr=lr,decay=0.),
metrics=['acc'])
steps_per_epoch=int(len(train_x)/batch)-1,
validation_steps=int(len(test_x)/batch)-1,
model.fit(
LoadImageGen(train_x,train_y,batch=batch),
steps_per_epoch=int(len(train_x)/batch),
epochs=epoch,
verbose=1,
validation_data=LoadImageGen(test_x,test_y,batch=batch),
validation_steps=int(len(test_x)/batch),
callbacks=[(EarlyStopping(monitor='val_acc',patience=patienceEpoch)),tensorBoardCallBack,modelCheckpoint],)