#In this txt, we try to apply the ridge regression to make some prediction #################################Help###################################### Help := proc():print(``):end: #################################Algorithm################################# read `DATA.txt`: read `EM19Proj1.txt`: with(linalg): with(LinearAlgebra): #DividM(t,L): input a rate t and output the first t*total items of data set L and the #remaining items of the data set L. DividM:=proc(t,L) local i: [[seq(L[i],i=1..trunc(nops(L)*t))],[seq(L[i],i=1+trunc(nops(L)*t)..nops(L) ) ]]: end: #InitialD(Feature): input a Feature is a list of [Feature1,Feature2,..,Featuren,ObjFeature] #Output the X,Y as we wanted InitialD:=proc(Feature) local i,j,X,Y,L: L:=ExtractFields(MR,Feature): X:=[seq([1,seq(L[i][j],j=1..nops(L[1])-1)],i=1..nops(L))]: Y:=[seq([L[i][-1]],i=1..nops(L))]: [X,Y]: end: #MatrixT(A): input a m*n list A and output its n*m transformation. MatrixT:=proc(A) local i,j: if nops(A[1])<>1 then [seq([seq(A[j][i],j=1..nops(A))],i=1..nops(A[1]))]: else [seq([A[j]],j=1..nops(A))]: fi: end: #DecideB(X,Y,alpha): input the input-data matrix X and output-data matrix Y and the index #alpha, then using the ridge regression to get the coefficient beta: DecideB:=proc(X,Y,alpha) local i,TX: TX:=Matrix(MatrixT(X)): [convert(inverse(TX.Matrix(X)+ScalarMatrix(alpha^2,nops(X[1]),nops(X[1]))).TX.Matrix(Y),list)]: end: #DecideA(X,Y,K): input the input-data matrix X and output-data matrix Y and #the range (0,K,0.1) for alpha. Then apply the MSE to find the min one which corresponds #to the alpha we want DecideA:=proc(X,Y,K) local i, alpha, beta,beta_optm,error_temp,error_optm,alpha_optm: alpha:=0: beta := DecideB(X,Y,alpha): error_optm:= Error(X,Y,beta): beta_optm := beta: alpha_optm:=0: while alpha < K do alpha := alpha + 0.001: beta := DecideB(X,Y,alpha): error_temp:=Error(X,Y,beta): if error_temp < error_optm then error_optm := error_temp: beta_optm := beta: alpha_optm :=alpha: fi: od: [beta_optm,evalf(error_optm),alpha_optm]: end: #Error(X,Y,beta): Error Error:=proc(X,Y,beta)local L,LN,LY,i: L := convert(Matrix(Y),list)-Sub(X,beta): LY := convert(Matrix(Y),list): LN := [seq(L[i]/LY[i],i=1..nops(L))]: evalf(norm(LN,2)/nops(L)^2): end: #Sub(X,beta): means subscribe beta into the formula with X to get the prediction of Y_hat Sub:=proc(X,beta): convert(Matrix(X).Matrix(MatrixT(beta)),list): end: #################################Main func################################# Main:=proc(rate,K,Feature,way) local X,X_test,X_train,Y,Y_test,Y_train,beta,er: X:=InitialD(Feature)[1]: Y:=InitialD(Feature)[2]: X_train:=DividM(rate,X)[1]: X_test:=DividM(rate,X)[2]: Y_train:=DividM(rate,Y)[1]: Y_test:=DividM(rate,Y)[2]: beta:=DecideA(X_train,Y_train,K)[1]: er:=Error(X_test,Y_test,beta): if way=1 then DrawP(beta,X_test,Y_test): else [beta,er]: fi: end: #################################Draw func################################# with(plots): DrawP:=proc(beta,X,Y) local p1,p2,Y_hat,i: Y_hat:=Sub(X,beta): p1 := plot([seq(i,i=1..nops(Y))], convert(Matrix(Y_hat),list), style = point, symbol=box, symbolsize = 20, color = red): p2 := plot([seq(i,i=1..nops(Y))], convert(Matrix(Y),list), style = point, symbol= circle, symbolsize = 20, color = blue): display(p1,p2): end: