Demo entry 6684468

Training

   

Submitted by Bruce on Dec 15, 2017 at 11:04
Language: Python 3. Code size: 1.0 kB.

# A training iteration
Loop: 
	#training data
	Sh={},Th={}, St={}, Tt={}   
	for (h,r,t)ϵ S do:
		e <-random(h, t)
		#tail is missing
		if e == h then:
			Sh.add([e, r])
			#candidate sampling for tail = all positive tail entity and some sampled negative tail entity
			Th.add({t^|(h, r, t^} + sample(E,py))
		else:
			#head is missing
			St.add([e, r])
			#candidate sampling for head = all positive head entity and some sampled negative head entity
			Tt.add({h^|(h^, r, t} + sample(E,py))
		end
	end
	#mini batch
	for each batch(Sbh, Tbh, Sbt, Tbt) belongs to (Sh, Th, St, Tt):
		# training
		for (sh, th, st, tt):
			Oh <- softmax(We * tanh(dropout(g(Der * (r .* f(Deh * eh + beh)) + beh))))
			Ot <- softmax(We * tanh(dropout(g(Deh * (r .* f(Der * er + ber)) + ber))))
			l <- l - sum({1(h, st[0], st[1])} .* log(Oh)) - sum({1(sh[0], sh[1], t)} .* log(Ot))
		end
		# L1 loss
		lr <- relu1(We) + relu1(Wr) + relu1(Deh) + relu1(Der) + relu1(Det) + relu1(Drt)
		update all parameters with l + alpha * lr
	end
Endloop

This snippet took 0.01 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).