#C25.txt, April 25, 2019, Math640 (Sp 2019), Dr. Z. Neural networks, back-propagation Help:=proc(): print(` sig(u) , NN2(x,W,Wp), Loss1(x,t,W,Wp), Loss(S,W,Wp), QD(y,t) `): end: sig:=proc(u) : 1/(1+exp(-u)): end: #NN2(x,W,Wp): a Maple implementation of Fig. 6 in https://arxiv.org/pdf/1411.2738.pdf #(the amazing article of Xin Rong) #inputs a numerical column vector (data of descriptive feautures) of length K say #a matrix W of size by N, say, and a matrix Wp of size N by M, say #outputs the numerical vector of size M , y, computed by this "deep" neural net NN2:=proc(x,W,Wp) local K,N,M,y,h,i,j: K:=nops(x): N:=nops(W): M:=nops(Wp): if not K=nops(W[1]) then RETURN(FAIL): fi: h:=[seq(add(W[i][j]*x[j],j=1..K),i=1..N)]: h:=[seq(sig(h[i]),i=1..N)]: y:=[seq(add(Wp[i][j]*h[j],j=1..N),i=1..M)]: y:=[seq(sig(y[i]),i=1..M)]: y: end: #Loss1(x,t,W,Wp): inputs vectors x and t (t=actual target, "gold standard") #and outputs the loss using the neural net (W,Wp) Loss1:=proc(x,t,W,Wp) local i,y: y:=NN2(x,W,Wp): 0.5*add((y[i]-t[i])^2,i=1..nops(y)): end: #Loss(S,W,Wp): the total loss of the data set S Loss:=proc(S,W,Wp) local s: add(Loss1(op(s),W,Wp),s in S)/nops(S): end: #QD(y,t): Eq. (78) of the above paper Ej' QD:=proc(y,t) local j: [seq((y[j]-t[j])*y[j]*(1-y[j]),j=1..nops(y))]: end: #BPG1(x,t,W,Wp,eta): Implementing Eqs. (80) and (85), to be contunued as homework BGP1:=proc(x,t,W,Wp,eta) local K,N,M,y,h,i,j,DWp,Ejp: #JUST STARTED, K:=nops(x): N:=nops(W): M:=nops(Wp): if not K=nops(W[1]) then RETURN(FAIL): fi: h:=[seq(add(W[i][j]*x[j],j=1..K),i=1..N)]: h:=[seq(sig(h[i]),i=1..N)]: y:=[seq(add(Wp[i][j]*h[j],j=1..N),i=1..M)]: y:=[seq(sig(y[i]),i=1..M)]: y: end: