#!/usr/bin/env python
try:
	#import pypar
	proc_num = pypar.size()
	myid = pypar.rank()
	node = pypar.get_processor_name()
except:
	proc_num = 1
	myid = 0
	node = ''
master_id = 0

if proc_num > 1: run_on_mp = 1
else: run_on_mp = 0

#run_on_mp = 1

#based on Jurgen Hermanns http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
import os, sys, fcntl, grp
import cPickle, time, cStringIO
from tools import getConnectionCursor, inquireDB
from db_vars import *

from socket import socket, AF_INET, SOCK_DGRAM #*
from threading import *
import traceback

def myout(s): pass
try:
	#from rpy import *
	#set_rpy_output(myout)
	RUN_BY_RPY = True
except: 
	RUN_BY_RPY = False
#RUN_BY_RPY = False

#HOST = ''
PORT = 1970
BUFSIZ = 1024
GRP_NAME = 'apache'
LOG_NAME = 'analyze_d.log'
PID_NAME = 'analyze_d.pid'
if os.path.exists(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'DAEMON_HOST')):
	exec open(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'DAEMON_HOST')).read()

ADDR_SER = ('', PORT) #(HOST, PORT)
ADDR_CLI = ('localhost', PORT)
LOGFILE = os.path.join(os.path.split(os.path.realpath(__file__))[0], LOG_NAME) #'/var/log/analyze_d.log'
PIDFILE = os.path.join(os.path.split(os.path.realpath(__file__))[0], PID_NAME) 
#PIDFILE = '/var/tmp/analyze_d.pid'
LOCKFD = None
GRP_ID = grp.getgrnam(GRP_NAME)[2]


thread_event = Event()
thread_lock = RLock()
__request_quit = 0
__checked_quit = Event()

procs_state = dict(zip(range(proc_num), [1]*proc_num)) # {procNo:1, ...}, 1 means available, 0 not available
procs_lock = RLock()
procs_event = Event()

reqs_treating = {}
reqs_lock = RLock()


REQUESTS = {'analysis':'New requests', 'quit':'Please quit', 'ask_identity':'Pease tell me your name', 'identity':'This is analyze_d', 'ready_to_quit':'Ready to quit'}


argvs = sys.argv[1:]

start_dir = os.getcwd() # or os.path.split(os.path.abspath(__file__))
work_dir = os.path.split(os.path.realpath(__file__))[0]
#R_code_dir = os.path.abspath('R_code') # os.path.realpath('R_code') # under cgi-bin
R_code_dir = os.path.join(work_dir, 'R_code') #os.path.abspath('R_code') # os.path.realpath('R_code') # under cgi-bin

error_msg = []
os.chdir(R_code_dir)

def preRun():
	from rpy import r, set_rpy_output
	set_rpy_output(myout)
	r("source('LinearDual.R')")
	
#if RUN_BY_RPY:
#	try:
#		preRun() #r("source('LinearDual.R')") # The first run of R will always fail, I don't why. So do the first run here
#	except:
#		pass #traceback.print_exc()
os.chdir(start_dir)



def runReq_rpy(req_type, req_id, user_params): # now use runReq_rpy_pipe instead!
	from rpy import r, set_rpy_output
	set_rpy_output(myout)

	print >>sys.stdout, '\n', time.asctime(), '-- req_type:', req_type, '-- req_id:', req_id
 	
	rlt = None
	error_msg = []
	try:
		lcwd = os.getcwd()
		os.chdir(R_code_dir)
		#r("rm(list=ls())")
		if req_type in (TYPE_NORM_ANALYSIS, TYPE_LINEAR_DUAL):
			r.assign("user.params", user_params)
			rlt =  r("source('LinearDual.R')")
		elif req_type == TYPE_LINEAR_AFFY:
			r.assign('user.params', user_params)
			rlt = r("source('LinearAffy.R')")
		elif req_type == TYPE_NORMPCA:
			rlt = r("source('normPCA.R')")
			for k,v in user_params.items(): r.assign(k, v)
			rlt = r("fullNormalize(intensity_file, output_dir, chart_dir, plotPCA=plotPCA, plotAF=plotAF, plotAFD=plotAFD, plotHK=plotHK, sig_level=sig_level, ave_num=ave_num)") 
		else:
			rlt = None # do other analysis
		os.chdir(lcwd)
	except: 
		os.chdir(lcwd)
		error_msg.append('Error happened in R: <p>') #might caused by wrong parameters from user.')
		cfile = cStringIO.StringIO()
		traceback.print_exc(None,cfile)
		value = cfile.getvalue()

		#msg2yipeng = "To: ywang@skcc.org\nSubject: WebArray Error Message\n\n%s" % value
		#MAIL = '/usr/sbin/sendmail'
		#p = os.popen('%s -t' % MAIL, 'w')
		#p.write(msg2yipeng)
		#exitcode = p.close()
		
		value = value.replace('\n', '<br>')
		value = value.replace('"', '\'\'')
		err_tk = 'error: '
		err_st = value.find(err_tk)
		if err_st >= 0: value = value[err_st+len(err_tk):]
		error_msg.append(value)
	try:
		r("rm(list=ls())")
	except: pass

	if error_msg:
		error_msg.append('''<p><spacer type='vertical' size=250>
<table><tr><td height=120></td></table>
<hr width=50% align=center>
<center>
<font size=-1>
	Please report bugs to <a href=mailto:xxia@skcc.org>Xiao-Qin Xia</a><br>For questions about analysis, please contact <a href=mailto:ywang@skcc.org>Yipeng Wang</a><br><a href=http://www.skcc.org>Sidney Kimmel Cancer Center</a></font>
</center>
''')
		msg = ' '.join(error_msg)
		req_state = STATE_ERROR
		sql_update = 'UPDATE requests SET req_state=%d, error_msg="%s" WHERE req_id=%d' % (req_state, msg, req_id)
	else: 
		req_state = STATE_SOLVED
		date_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
		sql_update = 'UPDATE requests SET req_state=%d, solve_time="%s" WHERE req_id=%d' % (req_state, date_time, req_id)
	n = inquireDB(sql_update)
	del r, set_rpy_output
	return rlt

def runReq_rpy_pipe(req_type, req_id, user_params):
	error_msg = []

	print >>sys.stdout, '\n', time.asctime(), '-- req_type:', req_type, '-- req_id:', req_id

	try:
		#pin, pout = os.popen2(os.path.join(os.path.split(os.path.abspath(__file__))[0], 'runrpy'))
		pin, pout = os.popen2(os.path.join(work_dir, 'runrpy'))
		os.write(pin.fileno(), cPickle.dumps((req_type, req_id, user_params, R_code_dir)) )
		pin.close()
		rlt, sql_update = cPickle.loads(pout.read())
		pout.close()
		# for debug
		#sql_update = '''UPDATE requests SET req_state=%d, error_msg="%s" WHERE req_id=%d''' % (1, sql_update, req_id)
	except:
		rlt = None
		
		error_msg.append('Fatal Error ocurred') #might caused by wrong parameters from user.')
		cfile = cStringIO.StringIO()
		traceback.print_exc(None,cfile)
		value = cfile.getvalue()
		value = value.replace('\n', '<br>')
		value = value.replace('"', '\'\'')
		err_tk = 'error: '
		err_st = value.find(err_tk)
		if err_st >= 0: value = value[err_st+len(err_tk):]
		error_msg.append(value)
		error_msg.append('''<p><spacer type='vertical' size=250>
<table><tr><td height=120></td></table>
<hr width=50% align=center>
<center>
<font size=-1>
	Please report bugs to <a href=mailto:xxia@skcc.org>Xiao-Qin Xia</a><br>For questions about analysis, please contact <a href=mailto:ywang@skcc.org>Yipeng Wang</a><br><a href=http://www.skcc.org>Sidney Kimmel Cancer Center</a></font>
</center>
''')
		msg = ' '.join(error_msg)
		req_state = STATE_ERROR
		sql_update = 'UPDATE requests SET req_state=%d, error_msg="%s" WHERE req_id=%d' % (req_state, msg, req_id)
		
	n = inquireDB(sql_update)
	return rlt

def runReq_pipe(req_type, req_id, user_params):
	error_msg = []

	print >>sys.stdout, '\n\n\n'+'-'*60, '\n[', time.asctime(), ']   -- req_type:', req_type, '-- req_id:', req_id, '\n'+'-'*60+'\n'

	try:
		#pin, pout = os.popen2(os.path.join(os.path.split(os.path.abspath(__file__))[0], 'runrpy'))
		#pin, pout = os.popen2(os.path.join(work_dir, 'runR'))
		pin, pout = os.popen2(os.path.join(work_dir, TYPE_CMDS[req_type]))
		#os.write(pin.fileno(), cPickle.dumps((req_type, req_id, user_params, R_code_dir)) )
		os.write(pin.fileno(), cPickle.dumps((req_type, req_id, user_params)) )
		pin.close()
		rlt, sql_update = cPickle.loads(pout.read())
		pout.close()
		# for debug
		#sql_update = '''UPDATE requests SET req_state=%d, error_msg="%s" WHERE req_id=%d''' % (1, sql_update, req_id)
	except:
		error_msg.append('Fatal Error ocurred') #might caused by wrong parameters from user.')
		cfile = cStringIO.StringIO()
		traceback.print_exc(None,cfile)
		value = cfile.getvalue()
		value = value.replace('\n', '<br>')
		value = value.replace('"', '\'\'')
		err_tk = 'error: '
		err_st = value.find(err_tk)
		if err_st >= 0: value = value[err_st+len(err_tk):]
		error_msg.append(value)
		error_msg.append('''<p><spacer type='vertical' size=250>
<table><tr><td height=120></td></table>
<hr width=50% align=center>
<center>
<font size=-1>
	Please report bugs to <a href=mailto:xxia@skcc.org>Xiao-Qin Xia</a><br>For questions about analysis, please contact <a href=mailto:ywang@skcc.org>Yipeng Wang</a><br><a href=http://www.skcc.org>Sidney Kimmel Cancer Center</a></font>
</center>
''')
		rlt = msg = ' '.join(error_msg)
		req_state = STATE_ERROR
		sql_update = 'UPDATE requests SET req_state=%d, error_msg="%s" WHERE req_id=%d' % (req_state, msg, req_id)
		
	print >>sys.stdout, sql_update
	print >>sys.stdout, rlt
	n = inquireDB(sql_update)
	return rlt

#runReq = runReq_rpy
#runReq = runReq_rpy_pipe
runReq = runReq_pipe


def nextProc():
	procs_lock.acquire()
	ps = procs_state.keys()
	#ps.sort()
	#ps.reverse()
	ps.remove(master_id)
	not_found = 1
	while not_found:
		for k in ps:
			if procs_state[k]:
				not_found = 0
				break
		if not_found:
			if procs_state[master_id]:
				k = master_id
				not_found = 0
				
		if not_found:
			procs_lock.release()
			procs_event.clear()
			procs_event.wait()
			procs_lock.acquire()	
	procs_lock.release()
	
	return k

class startProc(Thread): # this thread object send a job to a related Proc
	def __init__(self, proc_id, req_type='stop', req_id=0, user_params={}):
		Thread.__init__(self)
		self.myid = proc_id
		self.req_type = req_type
		self.req_id = req_id
		self.user_params = user_params
		self.value = None	
		self.value_lock = RLock()

	def params(self, req_type='stop', req_id=0, user_params={}):
		self.req_type = req_type
		self.req_id = req_id
		self.user_params = user_params

	def run(self):
		self.value_lock.acquire()
		self.value = None
		self.value_lock.release()
		if self.req_type != 'stop': 
			if self.myid == master_id:
				value = self.masterProc()
			else:
				pypar.send((self.req_type, self.req_id, self.user_params), self.myid)
				#if myid == master_id: job_event.set()
				value = pypar.receive(self.myid)
				
			self.value_lock.acquire()
			if value is not None:
				self.value = value
			else: self.value = 'failed'
			self.value_lock.release()

		reqs_lock.acquire()
		reqs_treating[self.req_id] = 0 #'completed'
		reqs_lock.release()
		
		procs_lock.acquire()
		procs_state[self.myid] = 1
		procs_lock.release()

		procs_event.set()

	def masterProc(self):
		#exec open(os.path.join(work_dir, 'db')).read()
		#connection, cursor = getConnectionCursor()
		lcwd = os.getcwd()
		#go_on = 1
		#while go_on:
			#if myid == master_id: job_event.wait()
		rlt = runReq(self.req_type, self.req_id, self.user_params)
		
		#cursor.close()
		#connection.close()
		return rlt

		
def stopProcs():
	procs_lock.acquire()
	procs = procs_state.keys()
	procs_lock.release()
	for k in procs:
		pypar.send(('stop',0,{}), k)
			


#job_event = Event() # used for communication between startProc and doJob in master_id proc.
#job_event.clear()
def doJob():
	#connection, cursor = getConnectionCursor()
	go_on = 1
	while go_on:
		#if myid == master_id: job_event.wait()
		req_type, req_id, user_params = pypar.receive(source = master_id)
		if req_type == 'stop': break
		rlt = runReq(req_type, req_id, user_params)
		pypar.send(rlt, master_id)

	#cursor.close()
	#connection.close()

def setQuit(q = 1):
	global __request_quit
	thread_lock.acquire()
	__request_quit = q
	__checked_quit.clear()
	thread_lock.release()
	thread_event.set()
	
	__checked_quit.wait()
	

def toQuit():
	thread_lock.acquire()
	a = __request_quit
	__checked_quit.set()
	thread_lock.release()
	return a

def sendQuit():
	udpCliSock = socket(AF_INET, SOCK_DGRAM)
	REQUEST = REQUESTS['quit']
	udpCliSock.sendto(REQUEST, ADDR_CLI)
	data, addr = udpCliSock.recvfrom(BUFSIZ)
	udpCliSock.close()

def findMe():
	udpCliSock = socket(AF_INET, SOCK_DGRAM)
	udpCliSock.sendto(REQUESTS['ask_identity'], ADDR_CLI)
	udpCliSock.settimeout(1) #udpCliSock.setblocking(0)
	isThere = 0
	try:
		data, addr = udpCliSock.recvfrom(BUFSIZ)
		#print data, addr
		if data == REQUESTS['identity']: isThere = 1
	except: pass #raise
	udpCliSock.close()
	return isThere

class Log:
	"""file like for writes with auto flush after each write
	to ensure that everything is logged, even during an
	unexpected exit."""
	MAX_SIZE = 1000000
	def __init__(self, f):
		self.f = f
		self.name = f.name
		i = os.path.basename(self.name).find('.')
		if i < 0:
			self.back_name = self.name+'_1'
		else:
			i = self.name.rfind('.') 
			self.back_name = self.name[:i]+'_1'+self.name[i:]
		self.limitSize()
	def write(self, s):
		self.limitSize()
		self.f.write(s)
		self.f.flush()
	def limitSize(self):
		self.f.seek(0,2)
		size = self.f.tell()
		if size > self.MAX_SIZE:
			#import shutil
			#shutil.copyfile(self.name, self.back_name)
			self.f.close()
			if os.path.exists(self.back_name): os.remove(self.back_name)
			os.rename(self.name, self.back_name)
			self.f = open(self.name, 'a+')
			#self.f.truncate(0)
			#self.f.flush()

class MyTimer(Thread):
	interval = 60.00
	def __init__(self, interval=None):
		Thread.__init__(self)
		self.event = Event()
		self.rlock = RLock()
		self.__quit = 0
		if interval is not None: self.interval = interval

	def job(self):
		self.event.set()
		thread_event.set()

	def quit(self):
		self.rlock.acquire()
		self.__quit = 1
		if hasattr(self, 'timer'): self.timer.cancel()
		self.event.set()
		self.rlock.release()

	def isRunning(self):
		self.rlock.acquire()
		go_on = not self.__quit
		self.rlock.release()
		return go_on
	
	def run(self):
		while self.isRunning():
			#self.job()
			self.timer = Timer(self.interval, self.job)
			self.timer.start()
			#self.rlock.acquire()
			self.event.clear()
			#self.rlock.release()
			self.event.wait()

		#while 1:
		#	thread_event.set()
		#	if toQuit(): break
		#	time.sleep(1) # sleep 60 seconds

my_timer = MyTimer()

class Connector(Thread):
	def __init__(self):
		Thread.__init__(self)

	def run(self):
		udpSerSock = socket(AF_INET, SOCK_DGRAM)
		udpSerSock.bind(ADDR_SER)
		reqs = REQUESTS #{'analysis':'New requests', 'quit':'Please quit', 'identity':'Pease tell me your name'}
		
		while 1:
			data, addr = udpSerSock.recvfrom(BUFSIZ)
			if data == reqs['analysis']: thread_event.set() # requests from web CGI
			elif data == reqs['quit']: 
				setQuit(); 
				udpSerSock.sendto(reqs['ready_to_quit'], addr)
				break
			elif data == reqs['ask_identity']:
				udpSerSock.sendto(reqs['identity'], addr)
		udpSerSock.close()


class Analyzer(Thread):
	def __init__(self):
		Thread.__init__(self)

	def run(self):
		self.analyze()
		
	def analyze(self):
		#connection, cursor = getConnectionCursor()
		sql_request = 'SELECT category, req_info, req_id FROM requests WHERE req_state=%d ORDER BY req_id' % STATE_WAITING
		
		my_timer.start()
		if not run_on_mp:
			while not toQuit():
				rlt = inquireDB(sql_request, fetch=True)
				if len(rlt) < 1:
					thread_event.clear()
					thread_event.wait() # use Event().wait() here
					continue
				for k in rlt: #cursor.fetchall():
					req_type, user_params, req_id = k
					user_params = cPickle.loads(user_params)
					runReq(req_type, req_id, user_params)
					break # only do one computation for each inquiry. So it will react changes in time!
		else:
			while not toQuit():
				reqs_lock.acquire()
				for k,v in reqs_treating.items():
					if v == 0: del reqs_treating[k] # completed
				#reqs_lock.release()
				new_reqs = []
				rlt = inquireDB(sql_request, fetch=True)
				n = len(rlt) #cursor.execute(sql_request)
				if n > 0:
					#reqs_lock.acquire()	
					for i in rlt: #cursor.fetchall():
						if not reqs_treating.has_key(i[2]): new_reqs.append(i)
				for k in new_reqs: 
					reqs_treating[k[2]] = 1
					break # only do one computation for each inquiry. So it will react changes in time!
				reqs_lock.release()
					
				if not new_reqs:
					thread_event.clear()
					thread_event.wait() # use Event().wait() here
					continue
				for k in new_reqs:
					req_type, user_params, req_id = k
					user_params = cPickle.loads(user_params)

					proc_id = nextProc()
					procs_lock.acquire()
					procs_state[proc_id] = 0
					procs_lock.release()
		
					startProc(proc_id, req_type=req_type, req_id=req_id, user_params=user_params).start()
					break # only do one computation for each inquiry. So it will react changes in time!
			stopProcs()

		#cursor.close()
		#connection.close()
		my_timer.quit()

def analyze():
	
	#t = MyTimer()
	#t.start()
	Connector().start()
	if run_on_mp:
		#jb = jobThread()
		#jb.start()
		Analyzer().start()
		#t.quit()
		#stopProcs()
	else:
		Analyzer().analyze()
		#t.quit()
	
def main():
	#change to data directory if needed
	#os.chdir("/root/data")

	#redirect outputs to a logfile
	sys.stdout = sys.stderr = Log(open(LOGFILE, 'a+'))
	#ensure the that the daemon runs a normal user
	#os.setegid(48)	 #set group first "pydaemon"
	#os.seteuid(48)	 #set user "pydaemon"
	
	#start the user program here:

	if myid == master_id:
		analyze()
		unlockPID()
	else:
		doJob()
	


import struct # used to pack the flock structure
import fcntl # needed for file locking and other file operations
#import fcntlh # values used by fcntl. created using h2py from
# /usr/include/sys/fcntl.h

def writelock( f ):
	fd = f.fileno()
	flock = struct.pack('2h8l', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
	if fcntl.fcntl(fd, fcntl.F_SETLKW, flock) != -1:
		return 1
	else:
		return 0

def readlock( f ):
	fd = f.fileno()
	flock = struct.pack('2h8l', fcntl.F_RDLCK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
	if fcntl.fcntl(fd, fcntl.F_SETLKW, flock) != -1:
		return 1
	else:
		return 0

def unlock( f ):
	fd = f.fileno()
	flock = struct.pack('2h8l', fcntl.F_UNLCK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
	if fcntl.fcntl(fd, fcntl.F_SETLKW, flock) != -1:
		return 1
	else:
		return 0



def lockPIDFile():
    global lockfd
    try:
        lockfd = os.open(PIDFILE, os.O_CREAT | os.O_EXCL | os.O_RDWR)
        os.write(LOCKFD, "%d" % os.getpid())
        return 1
    except OSError: # Already locked
        LOCKFD = None
        return 0

def unlockPIDFile():
    if not LOCKFD:
        return 0
    try:
        os.close(LOCKFD)
        os.remove(PIDFILE)
        return 1
    except OSError:
        return 0

#if __name__ == "__main__":
def preMain(): #pass
	is_running = findMe() # now use lockPID()
	if argvs:
		arg = argvs[0].lower()
		if arg == 'quit' or arg == 'stop':
			if myid == master_id and is_running:
				sendQuit()
			sys.exit(0)
		elif arg == 'restart':
			if myid == master_id and is_running:
				sendQuit()
			time.sleep(1)
		else:
			print 'Wrong arguments'
			sys.exit(0)
	elif False: #is_running: # now use lockPID()
		print 'Already run!\n'
		sys.exit(0)



	# do the UNIX double-fork magic, see Stevens' "Advanced
	# Programming in the UNIX Environment" for details (ISBN 0201563177)
	try:
		pid = os.fork()
		if pid > 0:
			# exit first parent
			sys.exit(0)
	except OSError, e:
		print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
		sys.exit(1)

	# decouple from parent environment
	#if not os.getuid(): # 0 is root
	try:
		os.setgid(GRP_ID)
	except: pass
	os.chdir("/")   #don't prevent unmounting....
	os.setsid()
	# set mask for R produced files
	#os.umask(0)
	os.umask(006)

	# do second fork
	try:
		pid = os.fork()
		if pid > 0:
			# exit from second parent, print eventual PID before
			#print "Daemon PID %d" % pid

			#open(PIDFILE,'w').write("%d"%pid)
			sys.exit(0)
	except OSError, e:
		print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
		sys.exit(1)

	lockPID()
	#print lockPIDFile()

# def lockPid():
	#fpid = open(PIDFILE, 'w')
	#fn = fpid.fileno()
	#import struct
	#if not fpid: sys.exit(0)
	#fpid.write('%d' % os.getpid())

	##rv = fcntl.flock(fn, fcntl.LOCK_EX)
	##if not rv: fpid.write('%d' % os.getpid())
	##else: sys.exit(1)

	##lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
	##rv = fcntl.fcntl(fpid, fcntl.F_SETLKW, lockdata)
	##if rv == -1: print -1; sys.exit(1)
	##else: fpid.write('%d' % os.getpid())

	#if writelock(fpid): fpid.write('%d' % os.getpid())
	#else: sys.exit(1)
	

def lockPID():
	global LOCKFD
	fn = os.open(PIDFILE, os.O_WRONLY | os.O_CREAT) #, FILE_MODE)	
	#import struct
	#lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
	#rv = fcntl.fcntl(fn, fcntl.F_SETLK, lockdata)
	try:
		LOCKFD = fcntl.flock(fn, fcntl.LOCK_EX | fcntl.LOCK_NB)
	except IOError: 
		print 'Another copy of %s has already run!' % os.path.split(__file__)[1]
		sys.exit(0)
	os.ftruncate(fn, 0)
	#if os.ftruncate(fn, 0) < 0:
	#	print 'ftruncate error'
	#	sys.exit(1)
	mypid = '%d\n' % os.getpid()
	if os.write(fn, mypid) != len(mypid):
		print 'write error'
		sys.exit(1)
	val = fcntl.fcntl(fn, fcntl.F_GETFD, 0)
	if val < 0:
		print 'fcntl F_GETFD error'
		sys.exit(1)
	val |= fcntl.FD_CLOEXEC
	if fcntl.fcntl(fn, fcntl.F_SETFD, val) < 0:
		print('fcntl F_SETFD error')
		sys.exit(1)
		
def unlockPID():
	try:
		os.close(LOCKFD)
		os.remove(PIDFILE)
		return 1
	except OSError:
		return 0
	

if __name__ == "__main__":
	preMain()
	# start the daemon main loop
	main()

	#os.close(fn)
	

