68点的人脸关键点检测:

import paddle
import paddle.nn as nn
# GoogLeNet加BN层加速模型收敛
class Inception(nn.Layer): # 定义Inception块(Inception v1)
def __init__(self,c1, c2, c3, c4):
super(Inception, self).__init__()
self.relu = nn.ReLU()
self.p1_1 = nn.Conv2D(c1[0], c1[1], 1)
self.p2_1 = nn.Conv2D(c1[0], c2[0], 1)
self.p2_2 = nn.Conv2D(c2[0], c2[1], 3, padding=1)
self.p3_1 = nn.Conv2D(c1[0], c3[0], 1)
self.p3_2 = nn.Conv2D(c3[0], c3[1], 5, padding=2)
self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2D(c1[0], c4, 1)
def forward(self, x):
p1 = self.relu(self.p1_1(x))
p2 = self.relu(self.p2_2(self.p2_1(x)))
p3 = self.relu(self.p3_2(self.p3_1(x)))
p4 = self.relu(self.p4_2(self.p4_1(x)))
return paddle.concat([p1, p2, p3, p4], axis=1)
import paddle
import paddle.nn as nn
# 空间注意力机制
class SAM_Module(nn.Layer):
def __init__(self):
super(SAM_Module, self).__init__()
self.conv_after_concat = nn.Conv2D(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3)
self.sigmoid_spatial = nn.Sigmoid()
def forward(self, x):
# Spatial Attention Module
module_input = x
avg = paddle.mean(x, axis=1, keepdim=True)
mx = paddle.argmax(x, axis=1, keepdim=True)
mx = paddle.cast(mx, 'float32')
x = paddle.concat([avg, mx], axis=1)
x = self.conv_after_concat(x)
x = self.sigmoid_spatial(x)
x = module_input * x
return x
import paddle
import paddle.nn as nn
class SpatialGatingUnit(nn.Layer):
def __init__(self, dim, dim_seq, act = None):
super(SpatialGatingUnit, self).__init__()
dim_out = dim // 2
self.norm = nn.LayerNorm(normalized_shape = dim_out)
self.proj = nn.Conv1D(in_channels = dim_seq, out_channels = dim_seq, kernel_size = 1)
self.act = act
def forward(self, x):
res, gate = paddle.chunk(x, 2, axis=-1)
gate = self.norm(gate)
gate = self.proj(gate.transpose((0, 2, 1)))
if self.act:
gate = self.act(gate)
y = gate * res.transpose((0, 2, 1))
y = y.transpose((0, 2, 1))
return y/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
def convert_to_list(value, n, name, dtype=np.int):sgu = SpatialGatingUnit(32, 16)
paddle.summary(sgu, (3, 32, 32))---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
LayerNorm-1 [[3, 32, 16]] [3, 32, 16] 32
Conv1D-1 [[3, 16, 32]] [3, 16, 32] 272
===========================================================================
Total params: 304
Trainable params: 304
Non-trainable params: 0
---------------------------------------------------------------------------
Input size (MB): 0.01
Forward/backward pass size (MB): 0.02
Params size (MB): 0.00
Estimated Total Size (MB): 0.04
---------------------------------------------------------------------------
{'total_params': 304, 'trainable_params': 304}import paddle
import paddle.nn as nn
class gMLPBlock(nn.Layer):
def __init__(self, dim, dim_ff, seq_len, act = None):
super(gMLPBlock, self).__init__()
self.proj_in = nn.Sequential(
nn.Linear(dim, dim_ff),
nn.GELU()
)
self.sgu = SpatialGatingUnit(dim_ff, seq_len, act)
self.proj_out = nn.Linear(dim_ff // 2, dim)
def forward(self, x):
x = self.proj_in(x)
x = self.sgu(x)
x = self.proj_out(x)
return xgMLP_block = gMLPBlock(32, 32, 16)
paddle.summary(gMLP_block, (3, 32, 32))-------------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===============================================================================
Linear-4 [[3, 32, 32]] [3, 32, 32] 1,056
GELU-1 [[3, 32, 32]] [3, 32, 32] 0
LayerNorm-2 [[3, 32, 16]] [3, 32, 16] 32
Conv1D-2 [[3, 16, 32]] [3, 16, 32] 272
SpatialGatingUnit-2 [[3, 32, 32]] [3, 32, 16] 0
Linear-5 [[3, 32, 16]] [3, 32, 32] 544
===============================================================================
Total params: 1,904
Trainable params: 1,904
Non-trainable params: 0
-------------------------------------------------------------------------------
Input size (MB): 0.01
Forward/backward pass size (MB): 0.11
Params size (MB): 0.01
Estimated Total Size (MB): 0.12
-------------------------------------------------------------------------------
{'total_params': 1904, 'trainable_params': 1904}import paddle
import paddle.nn as nn
def pair(val):
return (val, val) if not isinstance(val, tuple) else val
class PatchEmbed(nn.Layer):
""" Image to Patch Embedding
"""
def __init__(self, img_size=32, patch_size=16, in_chans=3, embed_dim=768):
super(PatchEmbed, self).__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.project = nn.Conv2D(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size)
def forward(self, x):
x = self.project(x)
x = x.flatten(2)
x = paddle.transpose(x, perm=[0, 2, 1])
return x
class Residual(nn.Layer):
def __init__(self, fn):
super(Residual, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class PreNorm(nn.Layer):
def __init__(self, dim, fn):
super(PreNorm, self).__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class gMLPVision(nn.Layer):
def __init__(
self,
image_size,
patch_size,
num_classes,
dim,
depth,
ff_mult = 4,
channels = 3,
):
super(gMLPVision, self).__init__()
self.image_height, self.image_width = pair(image_size)
self.patch_height, self.patch_width = pair(patch_size)
assert (self.image_height % self.patch_height) == 0 and (self.image_width % self.patch_width) == 0, 'image height and width must be divisible by patch size'
num_patches = (self.image_height // self.patch_height) * (self.image_width // self.patch_width)
dim_ff = dim * ff_mult
self.to_patch_embed = nn.Sequential(
# 'b c (h p1) (w p2) -> b (h w) (c p1 p2)', p1 = patch_height, p2 = patch_width
PatchEmbed(img_size=image_size, patch_size=patch_size, in_chans=3, embed_dim=channels * self.patch_height * self.patch_width),
nn.Linear(channels * self.patch_height * self.patch_width, dim)
)
self.layers = nn.LayerList([Residual(PreNorm(dim, gMLPBlock(dim = dim, dim_ff = dim_ff, seq_len = num_patches))) for i in range(depth)])
self.norn = nn.LayerNorm(dim)
self.to_logits = nn.Sequential(
nn.Linear(dim, num_classes)
)
def forward(self, x):
x = self.to_patch_embed(x)
layers = self.layers
x = nn.Sequential(*layers)(x)
x = self.norn(x)
x = x.mean(axis=1)
return self.to_logits(x)gmlp_vision = gMLPVision(
image_size = 32,
patch_size = 2,
num_classes = 10,
dim = 128,
depth = 30
)
paddle.summary(gmlp_vision, (32, 3, 32, 32))===================================================================
Total params: 4,968,486
Trainable params: 4,968,486
Non-trainable params: 0
-------------------------------------------------------------------
Input size (MB): 0.38
Forward/backward pass size (MB): 4577.50
Params size (MB): 18.95
Estimated Total Size (MB): 4596.83
-------------------------------------------------------------------
{'total_params': 4968486, 'trainable_params': 4968486}!pip install paddlex -i https://mirror.baidu.com/pypi/simpleimport paddlex as pdx
yolo_v3 = pdx.det.YOLOv3(
num_classes=2,
backbone='DarkNet53'
)
yolo_v3.get_model_info(){'version': '1.3.11',
'Model': 'YOLOv3',
'_Attributes': {'model_type': 'detector',
'num_classes': 2,
'labels': None,
'fixed_input_shape': None},
'_init_params': {'num_classes': 2,
'backbone': 'DarkNet53',
'anchors': None,
'anchor_masks': None,
'ignore_threshold': 0.7,
'nms_score_threshold': 0.01,
'nms_topk': 1000,
'nms_keep_topk': 100,
'nms_iou_threshold': 0.45,
'label_smooth': False,
'train_random_shapes': [320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
'input_channel': 3},
'completed_epochs': 0}!git clone https://gitee.com/PaddlePaddle/PaddleXfatal: destination path 'PaddleX' already exists and is not an empty directory.%cd PaddleX/examples/human_segmentation/home/aistudio/PaddleX/examples/human_segmentation!python pretrain_weights/download_pretrain_weights.py!python video_infer.py --model_dir pretrain_weights/humanseg_mobile_inference --video_path data/test.mp4
!hub install chinese_ocr_db_crnn_mobile==1.1.2[2021-07-30 19:38:41,290] [ INFO] - Successfully installed chinese_ocr_db_crnn_mobile-1.1.1
2021-07-30 19:38:41,300 - INFO - Lock 140042593361488 released on /home/aistudio/.paddlehub/tmp/chinese_ocr_db_crnn_mobile!pip install shapely
!pip install pyclipperimport paddlehub as hub
import cv2
ocr = hub.Module(name="chinese_ocr_db_crnn_mobile")
result = ocr.recognize_text(images=[cv2.imread('/home/aistudio/work/OCR/ocrdemo.png')],
output_dir='/home/aistudio/work/OCR/ocr_result',
visualization=True)
import paddle
# 使用paddle.Model完成模型的封装
model = paddle.Model(Net)
# 为模型训练做准备,设置优化器,损失函数和精度计算方式
model.prepare(optimizer=paddle.optimizer.Adam(parameters=model.parameters()),
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
# 调用fit()接口来启动训练过程
model.fit(train_dataset,
epochs=1,
batch_size=64,
verbose=1)import paddle.vision as vision
import paddle
import paddle.vision.transforms as transforms
from paddle.vision.transforms import Normalize
normalize = transforms.Normalize(
[0.4914*255, 0.4822*255, 0.4465*255], [0.2023*255, 0.1994*255, 0.2010*255])
trainTransforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Transpose(),
normalize
])
testTransforms = transforms.Compose([
transforms.Transpose(),
normalize
])
trainset = vision.datasets.Cifar10(mode='train', transform=trainTransforms)
trainloader = paddle.io.DataLoader(trainset, batch_size=128, num_workers=0, shuffle=True)
testset = vision.datasets.Cifar10(mode='test', transform=testTransforms)
testloader = paddle.io.DataLoader(testset, batch_size=128, num_workers=0, shuffle=True)Cache file /home/aistudio/.cache/paddle/dataset/cifar/cifar-10-python.tar.gz not found, downloading https://dataset.bj.bcebos.com/cifar/cifar-10-python.tar.gz
Begin to download
Download finishedmodel = paddle.Model(gmlp_vision)# 调用飞桨框架的VisualDL模块,保存信息到目录中。
callback = paddle.callbacks.VisualDL(log_dir='gMLP_log_dir')
def create_optim(parameters):
step_each_epoch = len(trainloader) // 128
lr = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.25,
T_max=step_each_epoch * 120)
return paddle.optimizer.Adam(learning_rate=lr,
parameters=parameters,
weight_decay=paddle.regularizer.L2Decay(3e-4))
model.prepare(create_optim(model.parameters()), # 优化器
paddle.nn.CrossEntropyLoss(), # 损失函数
paddle.metric.Accuracy(topk=(1, 5))) # 评估指标model.fit(trainloader,
testloader,
epochs=120,
eval_freq=2,
shuffle=True,
save_dir='gMLP_case1_chk_points/',
save_freq=20,
batch_size=128,
callbacks=callback,
verbose=1)Epoch 120/120
step 391/391 [==============================] - loss: 1.3216 - acc_top1: 0.6175 - acc_top5: 0.9607 - 311ms/step
Eval begin...
step 79/79 [==============================] - loss: 0.6662 - acc_top1: 0.6014 - acc_top5: 0.9587 - 92ms/step
Eval samples: 10000model.train(
num_epochs=270,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
learning_rate=0.000125,
lr_decay_epochs=[210, 240],
save_dir='output/yolov3_darknet53',
use_vdl=True)from paddlex.det import transforms
import paddlex as pdx
# 下载和解压昆虫检测数据集
insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz'
pdx.utils.download_and_decompress(insect_dataset, path='./')
# 定义训练和验证时的transforms
# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html
train_transforms = transforms.Compose([
transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(),
transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize(
target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(),
transforms.Normalize()
])
eval_transforms = transforms.Compose([
transforms.Resize(
target_size=608, interp='CUBIC'), transforms.Normalize()
])
# 定义训练和验证所用的数据集
# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection
train_dataset = pdx.datasets.VOCDetection(
data_dir='insect_det',
file_list='insect_det/train_list.txt',
label_list='insect_det/labels.txt',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(
data_dir='insect_det',
file_list='insect_det/val_list.txt',
label_list='insect_det/labels.txt',
transforms=eval_transforms)2021-07-30 19:49:59 [INFO] Decompressing ./insect_det.tar.gz...
2021-07-30 19:50:00,727 - INFO - generated new fontManager
2021-07-30 19:50:00 [INFO] Starting to read file list from dataset...
2021-07-30 19:50:01 [INFO] 169 samples in file insect_det/train_list.txt
creating index...
index created!
2021-07-30 19:50:01 [INFO] Starting to read file list from dataset...
2021-07-30 19:50:01 [INFO] 24 samples in file insect_det/val_list.txt
creating index...
index created!# 可使用VisualDL查看训练指标,参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html
num_classes = len(train_dataset.labels)
# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
model = pdx.det.YOLOv3(num_classes=num_classes, backbone='DarkNet53')# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#id1
# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
model.train(
num_epochs=270,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
learning_rate=0.000125,
lr_decay_epochs=[210, 240],
save_dir='output/yolov3_darknet53',
use_vdl=True)2021-07-15 20:52:54 [INFO] Decompressing output/yolov3_darknet53/pretrain/DarkNet53_ImageNet1k_pretrained.tar...
2021-07-15 20:52:55 [INFO] Load pretrain weights from output/yolov3_darknet53/pretrain/DarkNet53_ImageNet1k_pretrained.
2021-07-15 20:52:55 [INFO] There are 260 varaibles in output/yolov3_darknet53/pretrain/DarkNet53_ImageNet1k_pretrained are loaded.
2021-07-15 20:53:03 [INFO] [TRAIN] Epoch=1/270, Step=2/21, loss=18702.005859, lr=0.0, time_each_step=3.84s, eta=6:5:32
2021-07-15 20:53:04 [INFO] [TRAIN] Epoch=1/270, Step=4/21, loss=8156.868164, lr=0.0, time_each_step=2.12s, eta=3:21:43
2021-07-15 20:53:04 [INFO] [TRAIN] Epoch=1/270, Step=6/21, loss=10086.267578, lr=1e-06, time_each_step=1.53s, eta=2:25:42
输入一个值(x),乘以权重,结果就是网络的输出值。权重可以随着网络的训练进行更新,从而找到最佳的值,这样网络就能尝试匹配输出值与目标值。这里的权重其实就是一种参数。for epoch in range(MAX_EPOCH):
// 训练代码
print('{}[TRAIN]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))
if ():
break
model.train()
所以最好是在前期使用一个较大的学习速率让权重变化得更快。越往后,我们可以降低学习速率,这样可以作出更加精良的调整。⼀种自然的观点是使用提前终止的想法。就是保持学习速率为⼀个常量直到验证准确率开始变差,然后按照某个量下降学习速率。我们重复此过程若干次,直到学习速率是初始值的 1/1024(或者1/1000),然后终止训练。if i % 200 == 0:
timestring = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
print('{}[VALID]epoch {}, iter {}, output loss: {}'.format(timestring, epoch, i, loss.numpy()))import numpy as np
import cv2
import matplotlib.pyplot as plt
img = cv2.imread(PATH_TO_IMAGE)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.show()
!pip install --upgrade visualdlwriter = LogWriter("./log/lenet/run1")writer.add_hparams({'learning rate':0.0001, 'batch size':64, 'optimizer':'Adam'}, ['train/loss', 'train/acc'])add_scalar接口记录对应数值writer.add_scalar(tag="train/loss", step=step, value=cost)
writer.add_scalar(tag="train/acc", step=step, value=accuracy)img = np.reshape(batch[0][0], [28, 28, 1]) * 255
writer.add_image(tag="train/input", step=step, img=img)writer.add_histogram(tag='train/{}'.format(param), step=step, values=values)writer.add_pr_curve(tag='train/class_{}_pr_curve'.format(i),
labels=label_i,
predictions=prediction_i,
step=step,
num_thresholds=20)
writer.add_roc_curve(tag='train/class_{}_pr_curve'.format(i),
labels=label_i,
predictions=prediction_i,
step=step,
num_thresholds=20)fluid.io.save_inference_model(dirname='./model', feeded_var_names=['img'],target_vars=[predictions], executor=exe)
!pip install interpretdlimport interpretdl as it
from paddle.vision.models import resnet50
paddle_model = resnet50(pretrained=True)
sg = it.SmoothGradInterpreter(paddle_model, use_cuda=True)
gradients = sg.interpret("/home/aistudio/8.png", visual=True, save_path="/home/aistudio/work/interpret/SmoothGrad.png")ResNet50_result = [n for n, v in paddle_model.named_sublayers()]
print(ResNet50_result)['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer1.0', 'layer1.0.conv1', 'layer1.0.bn1', 'layer1.0.conv2', 'layer1.0.bn2', 'layer1.0.conv3', 'layer1.0.bn3', 'layer1.0.relu', 'layer1.0.downsample', 'layer1.0.downsample.0', 'layer1.0.downsample.1', 'layer1.1', 'layer1.1.conv1', 'layer1.1.bn1', 'layer1.1.conv2', 'layer1.1.bn2', 'layer1.1.conv3', 'layer1.1.bn3', 'layer1.1.relu', 'layer1.2', 'layer1.2.conv1', 'layer1.2.bn1', 'layer1.2.conv2', 'layer1.2.bn2', 'layer1.2.conv3', 'layer1.2.bn3', 'layer1.2.relu', 'layer2', 'layer2.0', 'layer2.0.conv1', 'layer2.0.bn1', 'layer2.0.conv2', 'layer2.0.bn2', 'layer2.0.conv3', 'layer2.0.bn3', 'layer2.0.relu', 'layer2.0.downsample', 'layer2.0.downsample.0', 'layer2.0.downsample.1', 'layer2.1', 'layer2.1.conv1', 'layer2.1.bn1', 'layer2.1.conv2', 'layer2.1.bn2', 'layer2.1.conv3', 'layer2.1.bn3', 'layer2.1.relu', 'layer2.2', 'layer2.2.conv1', 'layer2.2.bn1', 'layer2.2.conv2', 'layer2.2.bn2', 'layer2.2.conv3', 'layer2.2.bn3', 'layer2.2.relu', 'layer2.3', 'layer2.3.conv1', 'layer2.3.bn1', 'layer2.3.conv2', 'layer2.3.bn2', 'layer2.3.conv3', 'layer2.3.bn3', 'layer2.3.relu', 'layer3', 'layer3.0', 'layer3.0.conv1', 'layer3.0.bn1', 'layer3.0.conv2', 'layer3.0.bn2', 'layer3.0.conv3', 'layer3.0.bn3', 'layer3.0.relu', 'layer3.0.downsample', 'layer3.0.downsample.0', 'layer3.0.downsample.1', 'layer3.1', 'layer3.1.conv1', 'layer3.1.bn1', 'layer3.1.conv2', 'layer3.1.bn2', 'layer3.1.conv3', 'layer3.1.bn3', 'layer3.1.relu', 'layer3.2', 'layer3.2.conv1', 'layer3.2.bn1', 'layer3.2.conv2', 'layer3.2.bn2', 'layer3.2.conv3', 'layer3.2.bn3', 'layer3.2.relu', 'layer3.3', 'layer3.3.conv1', 'layer3.3.bn1', 'layer3.3.conv2', 'layer3.3.bn2', 'layer3.3.conv3', 'layer3.3.bn3', 'layer3.3.relu', 'layer3.4', 'layer3.4.conv1', 'layer3.4.bn1', 'layer3.4.conv2', 'layer3.4.bn2', 'layer3.4.conv3', 'layer3.4.bn3', 'layer3.4.relu', 'layer3.5', 'layer3.5.conv1', 'layer3.5.bn1', 'layer3.5.conv2', 'layer3.5.bn2', 'layer3.5.conv3', 'layer3.5.bn3', 'layer3.5.relu', 'layer4', 'layer4.0', 'layer4.0.conv1', 'layer4.0.bn1', 'layer4.0.conv2', 'layer4.0.bn2', 'layer4.0.conv3', 'layer4.0.bn3', 'layer4.0.relu', 'layer4.0.downsample', 'layer4.0.downsample.0', 'layer4.0.downsample.1', 'layer4.1', 'layer4.1.conv1', 'layer4.1.bn1', 'layer4.1.conv2', 'layer4.1.bn2', 'layer4.1.conv3', 'layer4.1.bn3', 'layer4.1.relu', 'layer4.2', 'layer4.2.conv1', 'layer4.2.bn1', 'layer4.2.conv2', 'layer4.2.bn2', 'layer4.2.conv3', 'layer4.2.bn3', 'layer4.2.relu', 'avgpool', 'fc']gradcam = it.GradCAMInterpreter(paddle_model, use_cuda=True)
heatmap = gradcam.interpret(
"/home/aistudio/8.png",
'conv1',
visual=True,
save_path="/home/aistudio/work/interpret/GradCAM.png")
我有一个模型:classItem项目有一个属性“商店”基于存储的值,我希望Item对象对特定方法具有不同的行为。Rails中是否有针对此的通用设计模式?如果方法中没有大的if-else语句,这是如何干净利落地完成的? 最佳答案 通常通过Single-TableInheritance. 关于ruby-on-rails-Rails-子类化模型的设计模式是什么?,我们在StackOverflow上找到一个类似的问题: https://stackoverflow.co
我需要从一个View访问多个模型。以前,我的links_controller仅用于提供以不同方式排序的链接资源。现在我想包括一个部分(我假设)显示按分数排序的顶级用户(@users=User.all.sort_by(&:score))我知道我可以将此代码插入每个链接操作并从View访问它,但这似乎不是“ruby方式”,我将需要在不久的将来访问更多模型。这可能会变得很脏,是否有针对这种情况的任何技术?注意事项:我认为我的应用程序正朝着单一格式和动态页面内容的方向发展,本质上是一个典型的网络应用程序。我知道before_filter但考虑到我希望应用程序进入的方向,这似乎很麻烦。最终从任何
exe应该在我打开页面时运行。异步进程需要运行。有什么方法可以在ruby中使用两个参数异步运行exe吗?我已经尝试过ruby命令-system()、exec()但它正在等待过程完成。我需要用参数启动exe,无需等待进程完成是否有任何rubygems会支持我的问题? 最佳答案 您可以使用Process.spawn和Process.wait2:pid=Process.spawn'your.exe','--option'#Later...pid,status=Process.wait2pid您的程序将作为解释器的子进程执行。除
我有一个包含模块的模型。我想在模块中覆盖模型的访问器方法。例如:classBlah这显然行不通。有什么想法可以实现吗? 最佳答案 您的代码看起来是正确的。我们正在毫无困难地使用这个确切的模式。如果我没记错的话,Rails使用#method_missing作为属性setter,因此您的模块将优先,阻止ActiveRecord的setter。如果您正在使用ActiveSupport::Concern(参见thisblogpost),那么您的实例方法需要进入一个特殊的模块:classBlah
我有一些Ruby代码,如下所示:Something.createdo|x|x.foo=barend我想编写一个测试,它使用double代替block参数x,这样我就可以调用:x_double.should_receive(:foo).with("whatever").这可能吗? 最佳答案 specify'something'dox=doublex.should_receive(:foo=).with("whatever")Something.should_receive(:create).and_yield(x)#callthere
我有一个表单,其中有很多字段取自数组(而不是模型或对象)。我如何验证这些字段的存在?solve_problem_pathdo|f|%>... 最佳答案 创建一个简单的类来包装请求参数并使用ActiveModel::Validations。#definedsomewhere,atthesimplest:require'ostruct'classSolvetrue#youcouldevencheckthesolutionwithavalidatorvalidatedoerrors.add(:base,"WRONG!!!")unlesss
我想向我的Controller传递一个参数,它是一个简单的复选框,但我不知道如何在模型的form_for中引入它,这是我的观点:{:id=>'go_finance'}do|f|%>Transferirde:para:Entrada:"input",:placeholder=>"Quantofoiganho?"%>Saída:"output",:placeholder=>"Quantofoigasto?"%>Nota:我想做一个额外的复选框,但我该怎么做,模型中没有一个对象,而是一个要检查的对象,以便在Controller中创建一个ifelse,如果没有检查,请帮助我,非常感谢,谢谢
我正在为一个项目制作一个简单的shell,我希望像在Bash中一样解析参数字符串。foobar"helloworld"fooz应该变成:["foo","bar","helloworld","fooz"]等等。到目前为止,我一直在使用CSV::parse_line,将列分隔符设置为""和.compact输出。问题是我现在必须选择是要支持单引号还是双引号。CSV不支持超过一个分隔符。Python有一个名为shlex的模块:>>>shlex.split("Test'helloworld'foo")['Test','helloworld','foo']>>>shlex.split('Test"
我不确定传递给方法的对象的类型是否正确。我可能会将一个字符串传递给一个只能处理整数的函数。某种运行时保证怎么样?我看不到比以下更好的选择:defsomeFixNumMangler(input)raise"wrongtype:integerrequired"unlessinput.class==FixNumother_stuffend有更好的选择吗? 最佳答案 使用Kernel#Integer在使用之前转换输入的方法。当无法以任何合理的方式将输入转换为整数时,它将引发ArgumentError。defmy_method(number)
我有一些非常大的模型,我必须将它们迁移到最新版本的Rails。这些模型有相当多的验证(User有大约50个验证)。是否可以将所有这些验证移动到另一个文件中?说app/models/validations/user_validations.rb。如果可以,有人可以提供示例吗? 最佳答案 您可以为此使用关注点:#app/models/validations/user_validations.rbrequire'active_support/concern'moduleUserValidationsextendActiveSupport: