如何使用UIBezierPath进行此形状

问题描述

我正在尝试使用import cv2,os import numpy as np import matplotlib.pyplot as plt import tensorflow as tf #from keras.backend.tensorflow_backend import set_session import keras,sys,time,warnings from keras.models import * from keras.layers import * import pandas as pd dir_data = "dataset1/" dir_seg = dir_data + "/annotations_prepped_train/" dir_img = dir_data + "/images_prepped_train/" import cv2,os import numpy as np import matplotlib.pyplot as plt import seaborn as sns ## seaborn has white grid by default so I will get rid of this. sns.set_style("whitegrid",{'axes.grid' : False}) import random def give_color_to_seg_img(seg,n_classes): ''' seg : (input_width,input_height,3) ''' if len(seg.shape)==3: seg = seg[:,:,0] seg_img = np.zeros( (seg.shape[0],seg.shape[1],3) ).astype('float') colors = sns.color_palette("hls",n_classes) for c in range(n_classes): segc = (seg == c) seg_img[:,0] += (segc*( colors[c][0] )) seg_img[:,1] += (segc*( colors[c][1] )) seg_img[:,2] += (segc*( colors[c][2] )) return(seg_img) input_height,input_width = 224,224 output_height,output_width = 224,224 n_classes=20 def getimageArr( path,width,height ): img = cv2.imread(path,1) img = np.float32(cv2.resize(img,( width,height ))) / 127.5 - 1 return img def getSegmentationArr( path,nClasses,height ): seg_labels = np.zeros(( height,nClasses )) img = cv2.imread(path,1) img = cv2.resize(img,height )) img = img[:,0] for c in range(nClasses): seg_labels[:,c ] = (img == c ).astype(int) ##seg_labels = np.reshape(seg_labels,( width*height,nClasses )) return seg_labels images = os.listdir(dir_img) images.sort() segmentations = os.listdir(dir_seg) segmentations.sort() X = [] Y = [] for im,seg in zip(images,segmentations) : X.append( getimageArr(dir_img + im,input_width,input_height ) ) Y.append( getSegmentationArr( dir_seg + seg,n_classes,output_width,output_height ) ) X,Y = np.array(X),np.array(Y) print(X.shape,Y.shape) VGG_Weights_path = "C:/Users/umer Zukaib/Documents/python works/segmentation/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5" def FCN8(nClasses,input_height=224,input_width=224): ## input_height and width must be devisible by 32 because maxpooling with filter size = (2,2) is operated 5 times,## which makes the input_height and width 2^5 = 32 times smaller assert input_height%32 == 0 assert input_width%32 == 0 IMAGE_ORDERING = "channels_last" img_input = Input(shape=(input_height,3)) ## Assume 224,224,3 ## Block 1 x = Conv2D(64,(3,3),activation='relu',padding='same',name='block1_conv1',data_format=IMAGE_ORDERING )(img_input) x = Conv2D(64,name='block1_conv2',data_format=IMAGE_ORDERING )(x) x = MaxPooling2D((2,2),strides=(2,name='block1_pool',data_format=IMAGE_ORDERING )(x) f1 = x # Block 2 x = Conv2D(128,name='block2_conv1',data_format=IMAGE_ORDERING )(x) x = Conv2D(128,name='block2_conv2',name='block2_pool',data_format=IMAGE_ORDERING )(x) f2 = x # Block 3 x = Conv2D(256,name='block3_conv1',data_format=IMAGE_ORDERING )(x) x = Conv2D(256,name='block3_conv2',name='block3_conv3',name='block3_pool',data_format=IMAGE_ORDERING )(x) pool3 = x # Block 4 x = Conv2D(512,name='block4_conv1',data_format=IMAGE_ORDERING )(x) x = Conv2D(512,name='block4_conv2',name='block4_conv3',data_format=IMAGE_ORDERING )(x) pool4 = MaxPooling2D((2,name='block4_pool',data_format=IMAGE_ORDERING )(x)## (None,14,512) # Block 5 x = Conv2D(512,name='block5_conv1',data_format=IMAGE_ORDERING )(pool4) x = Conv2D(512,name='block5_conv2',name='block5_conv3',data_format=IMAGE_ORDERING )(x) pool5 = MaxPooling2D((2,name='block5_pool',7,512) #x = Flatten(name='flatten')(x) #x = Dense(4096,name='fc1')(x) # <--> o = ( Conv2D( 4096,( 7,7 ),data_format=IMAGE_ORDERING))(o) # assuming that the input_height = input_width = 224 as in VGG data #x = Dense(4096,name='fc2')(x) # <--> o = ( Conv2D( 4096,( 1,1 ),data_format=IMAGE_ORDERING))(o) # assuming that the input_height = input_width = 224 as in VGG data #x = Dense(1000,activation='softmax',name='predictions')(x) # <--> o = ( Conv2D( nClasses,kernel_initializer='he_normal',data_format=IMAGE_ORDERING))(o) # assuming that the input_height = input_width = 224 as in VGG data vgg = Model( img_input,pool5 ) vgg.load_weights(VGG_Weights_path) ## loading VGG weights for the encoder parts of FCN8 n = 4096 o = ( Conv2D( n,name="conv6",data_format=IMAGE_ORDERING))(pool5) conv7 = ( Conv2D( n,name="conv7",data_format=IMAGE_ORDERING))(o) ## 4 times upsamping for pool4 layer conv7_4 = Conv2DTranspose( nClasses,kernel_size=(4,4),strides=(4,use_bias=False,data_format=IMAGE_ORDERING )(conv7) ## (None,10) ## 2 times upsampling for pool411 pool411 = ( Conv2D( nClasses,name="pool4_11",data_format=IMAGE_ORDERING))(pool4) pool411_2 = (Conv2DTranspose( nClasses,kernel_size=(2,data_format=IMAGE_ORDERING ))(pool411) pool311 = ( Conv2D( nClasses,name="pool3_11",data_format=IMAGE_ORDERING))(pool3) o = Add(name="add")([pool411_2,pool311,conv7_4 ]) o = Conv2DTranspose( nClasses,kernel_size=(8,8),strides=(8,data_format=IMAGE_ORDERING )(o) #o = (Activation('softmax'))(o) print('the value of O=',o) new_s = o[None,10:,10,:] print('the value of new_s=',new_s.values()) model = Model(img_input,o) return model model = FCN8(nClasses = n_classes,input_height = 224,input_width = 224) model.summary() from sklearn.utils import shuffle train_rate = 0.85 index_train = np.random.choice(X.shape[0],int(X.shape[0]*train_rate),replace=False) index_test = list(set(range(X.shape[0])) - set(index_train)) X,Y = shuffle(X,Y) X_train,y_train = X[index_train],Y[index_train] X_test,y_test = X[index_test],Y[index_test] print(X_train.shape,y_train.shape) print(X_test.shape,y_test.shape) from keras import optimizers sgd = optimizers.SGD(lr=1E-2,decay=5**(-4),momentum=0.9,nesterov=True) #loss='mean_squared_error',optimizer='adam' model.compile(loss='mean_squared_error',optimizer=sgd) hist1 = model.fit(X_train,y_train,validation_data=(X_test,y_test),batch_size=32,epochs=1,verbose=2) for key in ['loss','val_loss']: plt.plot(hist1.history[key],label=key) plt.legend() plt.show() y_pred = model.predict(X_test) y_predi = np.argmax(y_pred,axis=3) y_testi = np.argmax(y_test,axis=3) print(y_testi.shape,y_predi.shape) def IoU(Yi,y_predi): ## mean Intersection over Union ## Mean IoU = TP/(FN + TP + FP) IoUs = [] Nclass = int(np.max(Yi)) + 1 for c in range(Nclass): TP = np.sum( (Yi == c)&(y_predi==c) ) FP = np.sum( (Yi != c)&(y_predi==c) ) FN = np.sum( (Yi == c)&(y_predi != c)) IoU = TP/float(TP + FP + FN) print("class {:02.0f}: #TP={:6.0f},#FP={:6.0f},#FN={:5.0f},IoU={:4.3f}".format(c,TP,FP,FN,IoU)) IoUs.append(IoU) mIoU = np.mean(IoUs) print("_________________") print("Mean IoU: {:4.3f}".format(mIoU)) IoU(y_testi,y_predi) 将UIView转换为这种形状,目前我只能做左下角的操作,寻求添加其他角的帮助。

enter image description here

仅用于左下角的代码

UIBezierPath

我做了几次尝试添加其他角的尝试,但是我的 let mask = CAShapeLayer() mask.frame = self.innerLayout.layer.bounds let path = UIBezierPath() let radius: CGFloat = 50 let rect = mask.bounds path.move(to: rect.origin) path.addLine(to: CGPoint(x: rect.maxX,y: rect.minY)) path.addLine(to: CGPoint(x: rect.maxX,y: rect.maxY)) path.addLine(to: CGPoint(x: rect.minX + radius,y: rect.maxY)) path.addArc(withCenter: CGPoint(x: rect.minX,y: rect.maxY),radius: radius,startAngle: 0,endAngle: CGFloat(M_PI_2 * 3),clockwise: false) mask.path = path.cgPath self.innerLayout.layer.mask = mask 的形状很有趣。我只是通过复制和粘贴(以及更改原点)来添加它,我相信我们将使用这部分代码4次以添加4个角

UIView

解决方法

这是要在Playground中运行的完整示例。这样的几何形状总是棘手的。弄错坐标或使弧线走错方向非常容易。

import pandas as pd

df = pd.DataFrame({'foo':[1,2,3],'bar':[4,5,6]})

def my_function(x):
    #print(x)
    # some computation
    # returns multiple values (tuple)
    # simplified here
    return 1,1,1

df = df[df.foo > 10]
df['r1'],df['r2'],df['r3'] = zip(*df.apply(my_function,axis=1))
df