Demo entry 6647049

PFP

   

Submitted by anonymous on Oct 18, 2017 at 20:58
Language: Python 3. Code size: 2.6 kB.

p=10
epsilon=0.01  # la tolérence
x=(0,1)
x1=x[0]
x2=x[1]
g=(0,0)
theta=0.01
def PFP( f,epsilon,theta,x1,x2 ):
	[g1,g2]=grad(x1,x2)
	k=1
	while (  abs(g1) > epsilon or abs(g2) > epsilon) :#and k<10000000:
		[g1,g2]=grad(x1,x2)
		x1=x1-theta*g1
		x2=x2-theta*g2
		#print (x1,x2)
		print (g1,g2)
		#print ("\n")
		k+=1

	return x1,x2,k


def f(x1,x2):
	return (1-x1)**2+p(x2-x1*x1)**2




def Newton(theta,x1,x2):
    k=1
    [g1,g2]=grad(x1,x2)
    while (  abs(g1) > epsilon or abs(g2) > epsilon) and k<200:
         [g1,g2]=grad(x1,x2)
         x1=x1-theta*g1
         x2=x2-theta*g2
         k1=1
         theta=0.1
         while (  abs(gradg(theta,x1,x2)/gradgradg(theta,x1,x2)) > epsilon )and k1<100:
            theta=theta-gradg(theta,x1,x2)/gradgradg(theta,x1,x2)
            k1+=1
            #print(gradg(theta,x1,x2)/gradgradg(theta,x1,x2),theta,k1)
         #print('\n')
         
         k+=1
    return x1,x2,k
	
def grad(x1,x2):
	g1=-2*(1-x1)-4*p*(x2-x1**2)*x1
	g2=2*p*(x2-x1**2)
	return [g1,g2]


#def g(theta,x1,x2):
#	return f(x1-theta*grad(x1,x2)[0] , x2-theta*grad(x1,x2)[1])
	
def g(theta,x1,x2):
	return (1-x1-theta*grad(x1,x2)[0])**2 + 10*(x2-theta*grad(x1,x2)[1]-(x1-theta*grad(x1,x2)[0])**2)**2

def gradgradg(theta,x1,x2):# le gradient second de g
	return 2*grad(x1,x2)[0]**2 + 10*2*(  (-grad(x1,x2)[1]+2*(x1-theta*grad(x1,x2)[0])*grad(x1,x2)[0])**2  -   2*grad(x1,x2)[0]*grad(x1,x2)[0]*(x2-theta*grad(x1,x2)[1]-(x1-theta*grad(x1,x2)[0])**2)   )
	                                            
	
def gradg(theta,x1,x2):# le gradient de g
	return 2*(1-x1+theta*grad(x1,x2)[0])*grad(x1,x2)[0]+20*(-grad(x1,x2)[1]+2*(x1-theta*grad(x1,x2)[0])*grad(x1,x2)[0])*(x2-theta*grad(x1,x2)[1]-(x1-theta*grad(x1,x2)[0])**2)
	                                                                   

x1,x2,k=PFP( f,epsilon,theta,x1,x2 )
print ('x1=',x1,'  x2=',x2,'   iteration:',k)

#theta=0.001  x1= 0.9876518718210758   x2= 0.9749565618575149    iteration: 7989
#theta=0.0001 x1= 0.9876441750282122   x2= 0.9749410448189318    iteration: 79828
#theta=0.01   x1= 0.9877112339714871   x2= 0.9750762417047367    iteration: 804
#theta=0.02   x1= 0.9877573522160836   x2= 0.9751692246110555    iteration: 404
#Donc le plus grand pas on peux prendre est theta=0.02 (car ca n'arrete pas quand theta=0.03), et il faut 404 boucles


x1,x2,k = Newton(theta,x1,x2)
print (x1,x2,k)

#result de Newton
#0.986755824880326 0.9723115765436894 76
#Donc la method de Newton est beaucoup plus vite que utiliser un pas fix(comme il prend que 76 boucles).

This snippet took 0.01 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).