% fnn2_3_3_9_1.m by Chun-Tang Chao, 2008. 7 (No CopyRight. For free distrubution.) % Trained System: y= sin(pi*x2)/(2+sin(pi*x1)) -1<= x1 <= 1 and 0<= x2 <= 1 % FNN Structure: Two Inputs x1 and x2. % Three term nodes for x1, and three term nodes for x2. % Nine rules. One Output. clear all; clc; tnode_n1=3; tnode_n2=3; % Three term nodes for x1, and three term nodes for x2. rule_n=tnode_n1*tnode_n2; % Nine rules % Taining Patterns pairs_n=247; % number of training data pairs x1=2*rand(pairs_n,1)-1; % -1<= x1 <= 1 x2=rand(pairs_n,1); % 0<= x2 <= 1 d=sin(pi*x2)./(2+sin(pi*x1)); % Desired O/P of Trained Sys. % Fuzzy-Neural Structure Definition & Initialization mx1_o_connect=[1 2 3; 4 5 6; 7 8 9]; % Term nodes o/p connection mx2_o_connect=[1 4 7; 2 5 8; 3 6 9]; r_f_connect=[1 1; 1 2; 1 3; 2 1; 2 2; 2 3; 3 1; 3 2; 3 3]; % Rule nodes i/p connection mx1_old=[0.6 0 -0.6]; mx2_old=[0.75 0.5 0.25]; % Parameter Initialization stdx1_old=[0.5 0.5 0.5]; stdx2_old=[0.25 0.25 0.25]; w_old=[0.2396 0.3389 0.2396 0.3536 0.5000 0.3536 0.6741 0.9533 0.6741]; % Learning Rate Definition eta_m=0.01; eta_std=0.01; eta_w=0.01; alpha_m=0.9; alpha_std=0.9; alpha_w=0.9; % delta_weight 0 Initialization delta_w=zeros(1,rule_n); delta_mx1=zeros(1,tnode_n1); delta_mx2=zeros(1,tnode_n2); delta_stdx1=zeros(1,tnode_n1); delta_stdx2=zeros(1,tnode_n2); for epoch=1:300 % epoch number for i=1:pairs_n % Forward Operation % Input xa=[x1(i) x1(i) x1(i)]; xb=[x2(i) x2(i) x2(i)]; % Term Nodes Layer O/P [計算隸屬度] ya2=exp(-(((xa-mx1_old)./stdx1_old).^2)); % 注意 點 指令 yb2=exp(-(((xb-mx2_old)./stdx2_old).^2)); % Rule Nodes Operation [算出每一rule的 firing strength] for j=1:rule_n Ro(j)=ya2(r_f_connect(j,1))*yb2(r_f_connect(j,2)); end Ro_sum=sum(Ro); % O/P Layer y(i)=(w_old*Ro')/Ro_sum; % ------------------------------------------------------------------ % Back-Propagation Adjustment (O/P weight w) for j=1:rule_n pE_d_pw= -(d(i)-y(i))*Ro(j)/Ro_sum; w_new(j)=w_old(j)-eta_w*pE_d_pw+alpha_w*delta_w(j); end % Back-Propagation Adjustment (mean values mx1 of term nodes of x1) for j=1:tnode_n1 wy=0; for k=1:tnode_n2 p=mx1_o_connect(j,k); wy=wy+ (w_old(p)-y(i))* Ro(p); end pE_d_pmx1= -((d(i)-y(i))/Ro_sum)* wy * 2 * (xa(j)-mx1_old(j))/((stdx1_old(j))^2) ; mx1_new(j)=mx1_old(j)-eta_m*pE_d_pmx1+alpha_m*delta_mx1(j); end % Back-Propagation Adjustment (mean values mx2 of term nodes of x2) for j=1:tnode_n2 wy=0; for k=1:tnode_n1 p=mx2_o_connect(j,k); wy=wy+ (w_old(p)-y(i))* Ro(p); end pE_d_pmx2= -((d(i)-y(i))/Ro_sum)* wy * 2 * (xb(j)-mx2_old(j))/((stdx2_old(j))^2) ; mx2_new(j)=mx2_old(j)-eta_m*pE_d_pmx2+alpha_m*delta_mx2(j); end % Back-Propagation Adjustment (std values stdx1 of term nodes of x1) for j=1:tnode_n1 wy=0; for k=1:tnode_n2 p=mx1_o_connect(j,k); wy=wy+ (w_old(p)-y(i))* Ro(p); end pE_d_pstdx1= -((d(i)-y(i))/Ro_sum)* wy * 2 * ((xa(j)-mx1_old(j))^2)/((stdx1_old(j))^2) ; stdx1_new(j)=stdx1_old(j)-eta_std * pE_d_pstdx1 + alpha_std * delta_stdx1(j); end % Back-Propagation Adjustment (std values stdx2 of term nodes of x2) for j=1:tnode_n2 wy=0; for k=1:tnode_n1 p=mx2_o_connect(j,k); wy=wy+ (w_old(p)-y(i))* Ro(p); end pE_d_pstdx2= -((d(i)-y(i))/Ro_sum)* wy * 2 * ((xb(j)-mx2_old(j))^2)/((stdx2_old(j))^2) ; stdx2_new(j)=stdx2_old(j)-eta_std * pE_d_pstdx2 + alpha_std * delta_stdx2(j); end delta_w=w_new-w_old; w_old=w_new; delta_mx1=mx1_new-mx1_old; mx1_old=mx1_new; delta_mx2=mx2_new-mx2_old; mx2_old=mx2_new; delta_stdx1=stdx1_new-stdx1_old; stdx1_old=stdx1_new; delta_stdx2=stdx2_new-stdx2_old; stdx2_old=stdx2_new; end err=d-y'; mse_err(epoch)=mse(err); fprintf('epoch=%d, MSE=%f. \n',epoch,mse_err(epoch)); end % Learning Curve Plotting plot(1:300, mse_err); title('Learning Curve') xlabel('Number of Learning Epoch'); ylabel('MSE'); % Learning Results O/P fprintf('\n\n\n\nLearning Results O/P: \n\n'); for k=1:tnode_n1 fprintf('mx1(%d)=%f, stdx1(%d)=%f \n',k,mx1_new(k),k,stdx1_new(k)); end for k=1:tnode_n2 fprintf('mx2(%d)=%f, stdx2(%d)=%f \n',k,mx2_new(k),k,stdx2_new(k)); end for k=1:rule_n fprintf('w(%d)=%f \n',k,w_new(k)); end % In the following paper, the program was written in C language. % C.T. Chao and C.C. Teng*, “Implementation of a fuzzy inference system using a normalized fuzzy neural network,” Fuzzy Sets and Systems, vol.75, no.1, pp. 17-31, October 1995. (SCI)