#!/usr/bin/env python

import os, sys, time, cPickle, traceback, cStringIO, re, sets
from db_vars import *

from tools import *
#from toolfunc import *
from pipeR import *
from finishJob import convertImg, outputZip

__DEBUG__ = False
#__DEBUG__ = False

error_msg = []
#R_halt_str = re.compile(r'\nExecution halted\n*\Z')
error_head = '<h1>Error Message</h1><br><p>'

#PIPE_CMD = 'python filldbs.py --ACCEPT_DATA_FROM_PIPE'

#def htmlStr(s): return s.replace('\n','<br>').replace('"', "''")

def runPipe(DEBUG=False):
	if DEBUG:
		req_id = inquireDB('SELECT MAX(req_id) FROM requests', fetch=True)[0][0]
		req_type, user_params = inquireDB('SELECT category, req_info FROM requests WHERE req_id=%d' % req_id, fetch=True)[0]
		user_params = cPickle.loads(user_params)
	else: req_type, req_id, user_params = cPickle.loads( sys.stdin.read() )
		
	savePid(req_id, append=False)

	result_dir = user_params['result_dir']
	req_name = user_params.get('req_name', None)
	graph_dir = os.path.join(result_dir, 'chart')
	obj = user_params.get('plot_pdf', False) and ['.png', '.eps'] or '.png'
	#cwd = os.getcwd()
	#os.chdir(os.path.split(os.path.abspath(__file__))[0])
	rlt = None
	#error_msg = []
	succeeded = False
	try: 
		#Rin, Rout = os.popen4(PIPE_CMD)
		#os.write(Rin.fileno(), cPickle.dumps(user_params))
		#Rin.close()
		#rlt = Rout.read()
		#Rout.close()
		#os.chdir(cwd)
		rlt = Main(req_type, req_id, user_params)
		#if True or type(rlt) is tuple: # returned by runR_pipe
		if type(rlt) is tuple: # runR_pipe return a tuple: (rlt, sql_update)
			# convert images
			convertImg(graph_dir, src='.pdf', obj=obj)
			# output zip
			outputZip(result_dir, req_id=req_name, req_type=req_type)

			# update solve_time in analyze_dmp
			#rlt = rlt[0], re.sub(r'\bsolve_time=".+?"', 'solve_time="%s"' % time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()), rlt[1], 1 )

			sys.stdout.write( cPickle.dumps(rlt) )
			sys.stdout.close()
			succeeded = True

	except: 
		#os.chdir(cwd)
		#rlt = None
		if type(rlt) is tuple: rlt = rlt[0]
		if rlt: 
			error_msg.extend(htmlStr(rlt))
			rlt = None
		error_msg.append('<p>Error happened: <p>') #might caused by wrong parameters from user.')
		cfile = cStringIO.StringIO()
		traceback.print_exc(None,cfile)
		value = htmlStr(cfile.getvalue())

		err_tk = 'error: '
		err_st = value.find(err_tk)
		if err_st >= 0: value = value[err_st+len(err_tk):]
		error_msg.append(value)
	if succeeded: return
			
	if rlt and rlt[-14:] == 'Error ocurred!': 
		error_msg.append(htmlStr(rlt))
			
	if True:
		# convert images
		convertImg(graph_dir, src='.pdf', obj=obj)
		# output zip
		outputZip(result_dir, req_id=req_name, req_type=req_type)

	date_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
	if error_msg:
		error_msg.insert(0, error_head)
		error_msg.append(bug_report_fmt)
		msg = esc_sql_1.sub(r'\\\1', ' '.join(error_msg))
		req_state = STATE_ERROR
		sql_update = 'UPDATE requests SET req_state=%d, error_msg="%s", solve_time="%s" WHERE req_id=%d' % (req_state, msg, date_time, req_id)
		if DEBUG: return error_msg
	else: 
		if DEBUG: return rlt
		req_state = STATE_SOLVED
		if rlt: sql_update = 'UPDATE requests SET req_state=%d, error_msg="%s", solve_time="%s" WHERE req_id=%d' % (req_state, esc_sql_1.sub(r'\\\1', htmlStr(rlt)), date_time, req_id)
		else: sql_update = 'UPDATE requests SET req_state=%d, solve_time="%s" WHERE req_id=%d' % (req_state, date_time, req_id)
		
	sys.stdout.write( cPickle.dumps( (rlt, sql_update) ) )
	sys.stdout.close()

def KWDict(cur, pfid):
	'''for given platform (pfid), get a dict with items like dbnm_kw:[probe_ids]'''

def mkGrpByKw(kw, grpdic, grpset, kwset_dic, kwdic, kw_used):
	'''
	kw: a dbkw string
	grpdic: {(dbnm, line_No):newgrp, ...}
	grpset: sets.Set([(dbnm, line_No), ...])
	kwset_dic: {dbnm:[kws_set, ...], ... }
	kwdic: {dbkw:[(dbnm, line_No), ...], ... }
	kw_used: {dbkw:Ture, ... }
	'''
	if (kw in kw_used) or not kw: return #discard null dbkws
	kw_used[kw] = True

	# find different (dbnm, line_No) pairs
	dbnm_lns = sets.Set(kwdic[kw])
	dif = dbnm_lns - grpset
	if not dif: return

	# find or make grp
	com = dbnm_lns & grpset # grpset.intersection(dbnmlns)
	if com: grp = grpdic[com.pop()]
	else: grp = sets.Set()
	grpset.update(dif) # update grpset
	for a in dif: grpdic[a] = grp # update grpdic

	for dbnm_ln in dif: #kwdic[kw]:
		#dbnm_ln in grpdic: continue # dic is faster than set
		#dbnm_ln in grpset: continue
		grp.add(dbnm_ln)
		dbnm, ln_No = dbnm_ln
		for k in kwset_dic[dbnm][ln_No]: 
			mkGrpByKw(k, grpdic, grpset, kwset_dic, kwdic, kw_used)

def LinesAcrossDBs(kwset_dic, probe_dic):
	# get common lines across DBs 
	# make dbkw_dic: {dbkw:[(dbnm, line_No), ...], ... }
	# probe_dic is a dict like {dbnm:{pf_id:[pbid_set, ...], ...}, ...}
	# kwset_dic is a dict like {dbnm:[dbkws_set, ...], ... }
	dbkw_dic = {} 
	for dbnm, kws_sets in kwset_dic.items():
		for ln in range(len(kws_sets)): 
			map(lambda kw:dbkw_dic.setdefault(kw, []).append((dbnm, ln)), kws_sets[ln])

	# make grpdic: {(dbnm, line_No):newgrp, ...}
	grpdic, grpset, kw_used = {}, sets.Set(), {}
	for kw, dbkwlns in dbkw_dic.items():
		mkGrpByKw(kw, grpdic, grpset, kwset_dic, dbkw_dic, kw_used)

	grps = sets.Set(grpdic.values()) # now grps is a set of new grps

	# validate grps: only grps with all dbnms would be reserved
	dbnms = len(sets.Set(pfs_dic.keys()))
	grps = filter(lambda a:dbnms==len(sets.Set(map(lambda b:b[0], a))), grps)

	# merge lines by grps
	
	pbdic = {} # make a new structure similar to probe_dic
	for dbnm, pfids in pfs_dic.items(): 
		pbdic[dbnm] = tmp = {}
		for pfid in pfids: 
			tmp[pfid] = []

	# make dbnmln_dic: {(dbnm, ln):dbkw, ...}, a reverse of dbkw_dic
	dbnmln_dic = {}
	for dbkw, dbnmln in dbkw_dic.items(): dbnmln_dic[dbnmln] = dbkw

	xpf_names = []
	for grp in grps:
		# add a line in pbdic
		for pfids in pfs_dic.values():
			for pbid_set in pfids.values():
				pbid_set.append(sets.Set())
		# fill the line
		for dbnm, i in grp: # each dbnm
			for pfid, pbids in pbdic[dbnm].items(): # each pf_id
				pbids[-1].update(probe_dic[dbnm][pfid][i]) # last line
		# add a row name to xpf_names
		xpf_names.append(dbnmln_dic[(dbnm, i)].replace('::', '_') )

	return xpf_names, pbdic


def	getCommonLines(cur, pfs_dic, by_col=None, mapfn=None, map_db_pf_col={}): #, merge='logmean'):
	'''
	pfs_dic is a dict like {dbname:[pf_ids]}
	if by_col is not None, find common lines with same values of "by_col", else use default method: xpf and others
	mapfn is the name of a probe-mapping file: 'dbnm_fileID'
	map_db_pf_col is a dict: {dbnm:{pfid:colname, ...}, ...}
	return dict: {dbname:{pf_id:[probe_ids]}, ...}, ... }
	'''
	
	# set bigger buffer for group_concat (default is 1024)
	cur.execute('SET @@group_concat_max_len=8192') 
	cur.connection.commit()

	if by_col == 'automatic': #is None: # it is automatic (by mapf_id or xpf_id)
		rlt = getCommonLines_default(cur, pfs_dic) #, merge=merge)
		if rlt: return rlt
		by_col = 'idx'
	# find common lines by "by_col"

	if by_col == 'probe-mapping file':
		return getCommonLines_mapfn(cur, pfs_dic, mapfn)

	if by_col == 'user-specified columns':
		return getCommonLines_mapcols(cur, pfs_dic, map_db_pf_col)
	
	# get probes
	if by_col in ('gene_symbol', 'unique_id', 'idx'):
		sql_mod = '''
			SELECT %(col)s, 
				GROUP_CONCAT(DISTINCT idx ORDER BY idx SEPARATOR ',')
			FROM %(db)s.probe
			WHERE platform_id=%(pf)s
			GROUP BY %(col)s
			ORDER BY %(col)s
			''' # % {'db':dbnm, 'pf':pf_id, 'col':by_col} #(by_col, dbnm, pf_id, by_col, by_col)
	else: # should be something in db
		sql_mod = '''
			SELECT k.kw,
				GROUP_CONCAT(DISTINCT p.idx ORDER BY p.idx SEPARATOR ',')
			FROM %(db)s.probe p, %(db)s.dbxref x, %(db)s.dbkw k
			WHERE p.platform_id=%(pf)s AND p.id=x.probe_id AND x.dbkw_id=k.id AND k.dbname="%(col)s"
			GROUP BY k.kw
			ORDER BY k.kw
			''' # % {'db':dbnm, 'pf':pf_id, 'col':by_col} #(dbnm, dbnm, dbnm, pf_id, by_col)

	i_xpf, i_pbs = 0, 1

	probe_dic = {} # like {dbnm:{pf_id:[pbid_set, ...], ...}, ...} #{dbnm:{pf_id:[[xpf_id, pb_ids, k_ids, k_db::kw], ...], ...}, ...}
	xpf_set = None
	for dbnm, pfids in pfs_dic.items():
		probe_dic[dbnm] = pbdic = {}
		# query from DB
		for pfid in pfids:
			sql = sql_mod % {'db':dbnm, 'pf':pfid, 'col':by_col}
			pbdic[pfid] = pbdictmp = inquireDB(sql, cursor=cur, fetch=True)
			# update xpf_set
			xpf_ids = map(lambda a:a[i_xpf], pbdictmp)
			if xpf_set is None: xpf_set = sets.Set(xpf_ids)
			else: xpf_set.intersection_update(xpf_ids)
	return matchLinesByKw(pfs_dic, probe_dic, xpf_set, i_xpf, i_pbs)

def ProbeType_no_use(s):
	tp = 'tsv'
	if 'Probe Set ID' in s and 'GeneChip Array' in s: return 'affy'
	return tp

def	getCommonLines_mapcols(cur, pfs_dic, map_db_pf_col={}, sep='\t'): #, merge='logmean'):
	'''
	pfs_dic is a dict like {dbname:[pf_ids]}
	map_db_pf_col is a dict: {dbnm:{pfid:colname, ...}, ...}
	return dict: {dbname:{pf_id:[probe_ids]}, ...}, ... }
	Note that for the column using for alignment, each row are considered as one value (keyword), although there may have more values within a cell in Affy annotation files.
	'''
	i_xpf, i_pbs = 0, 1

	probe_dic = {} # like {dbnm:{pf_id:[pbid_set, ...], ...}, ...} #{dbnm:{pf_id:[[xpf_id, pb_ids, k_ids, k_db::kw], ...], ...}, ...}
	xpf_set = None
	for dbnm, pfids in pfs_dic.items():
		probe_dic[dbnm] = pbdic = {}
		# query from DB
		for pfid in pfids: # pfid is a str
			fn = inquireDB('SELECT f.location, f.name FROM %s.filexref x, %s.fileinfo f WHERE x.tb_id=%s AND x.tbname="platform" AND x.file_id=f.id' % (dbnm, dbnm, pfid), cursor=cur, fetch=True)
			fn = os.path.join(fn[0][0], fn[0][1])
			pf_type, annotation_ln = ProbeFileType(fn)
			if pf_type == 'affy' and annotation_ln > 0: this_join_chs = '///'
			else: this_join_chs = join_chs
			fp = open(fn)
			for i in range(annotation_ln): fp.readline()
			head = fp.readline().strip()
			#tp = ProbeType(head)
			#if tp == 'tsv': 
			#	delim = ''
			#	sep_str = sep
			#else: # should be affy
			#	delim = '"'
			#	sep_str = '","'
			##.split(sep)
			#if delim: head = head[1:-1]
			#head = head.split(sep_str)
			if head[0] == head[-1] == '"' and '","' in head: 
				delim =  '"'
				sep_str = '","'
				head = head[1:-1].split(sep_str)
			else: 
				delim = None
				sep_str = sep
				head = map(rmDelim, head.split(sep_str))
			head = filter(lambda b:b, map(str.strip, head))
			
			i_col = head.index(map_db_pf_col[dbnm][pfid])
			if 'idx' in head: 
				i_idx = head.index('idx')
				def getTwoCols(a, sep=sep_str, delim=delim):
					if delim: b = a[1:-1].split(sep)
					else: b = map(rmDelim, a.split(sep))
					return (int(b[i_idx].strip()), b[i_col].strip())
				v_cols = map(getTwoCols, fp.xreadlines())
				v_cols.sort() # (idx, colvalue) sorted by idx
				v_cols = map(lambda a:a[1], v_cols) # only colvalues left
			else:
				def getOneCol(a, sep=sep_str, delim=delim):
					if delim: b = a[1:-1].split(sep)
					else: b = map(rmDelim, a.split(sep))
					return b[i_col].strip()
				#v_cols = map(lambda a:a[i_col].strip(), fp.xreadlines() )
				v_cols = map(getOneCol, fp.xreadlines() )
				#v_cols = map(lambda a:(a+1, v_cols[a]), xrange(len(v_cols)) )
			pbids = inquireDB('SELECT idx, id FROM %s.probe WHERE platform_id=%s ORDER BY idx ASC' % (dbnm, pfid), cursor=cur, fetch=True)
			#pbids = map(lambda a:a[1], pbids)
			pbids = map(lambda a:a[0], pbids) # use idx now
			#pbdic[pfid] = pbdictmp = {} # vcol:sets.Set(probe_ids)
			pbdictmp = {} # {vcol:sets.Set(probe_ids), ... }
			#map(lambda (col,pid):pbdictmp.setdefault(col,sets.Set()).add(pid), zip(v_cols, pbids) ) # not supported in Py3k
			#map(lambda a:pbdictmp.setdefault(a[0],sets.Set()).add(a[1]), zip(v_cols, pbids) )
			def mapV_ID(a, pdic=pbdictmp, sep=this_join_chs):
				vstr, pid = a[0], a[1]
				if sep in vstr:
					vs = map(str.strip, vstr.split(vstr))
					map(lambda a:pdic.setdefault(a, sets.Set()).add(pid), vs)
				else: pdic.setdefault(vstr, sets.Set()).add(pid)
			map(mapV_ID, zip(v_cols, pbids) )
			pbdictmp = pbdictmp.items() # [(vcol, sets.Set(probe_ids), ...]
			pbdictmp.sort()
			pbdic[pfid] = pbdictmp
			# update xpf_set
			if xpf_set is None: xpf_set = sets.Set(v_cols)
			else: xpf_set.intersection_update(v_cols)
	return matchLinesByKw(pfs_dic, probe_dic, xpf_set, i_xpf, i_pbs)

def evalPbidStr(s):
	s = eval('[%s]' % s)
	if len(s)>1 and s[-1] <= s[-2]: s.pop()
	return s

def matchLinesByKw(pfs_dic, probe_dic, xpf_set, i_xpf, i_pbs):
	'''
	pfs_dic is a dict like {dbname:[pf_ids]}
	probe_dic = {}: start from {dbnm:{pf_id:[(pb_xpfid, pb_ids_string_or_Set)], ...}, ...} change to be like {dbnm:{pf_id:[pbid_set, ...], ...}, ...} #{dbnm:{pf_id:[[xpf_id, pb_ids, k_ids, k_db::kw], ...], ...}, ...}
	xpf_set is a Set of keywords shared by all platform.
	'''
	# get common lines across platforms
	xpf_set = filter(lambda a:a, xpf_set) # remove invalid (empty) dbkw
	if len(pfs_dic)>1 or len(pfs_dic.values()[0])>1:
		xpf_dic = dict(zip(xpf_set, xpf_set)) # dict is faster
		for dbnm, pfids in pfs_dic.items():
			pbdic = probe_dic[dbnm]
			for pfid in pfids: # null value (None, '', etc.) will be removed
				#pbdic[pfid] = filter(lambda a:a[i_xpf] and (a[i_xpf] in xpf_dic), pbdic[pfid])
				pbdic[pfid] = filter(lambda a:a[i_xpf] in xpf_dic, pbdic[pfid])
	# seperate xpf and pbs
	pbidlist = evalPbidStr
	xpf_names = None
	for dbnm, pfids in pfs_dic.items():
		pbdic = probe_dic[dbnm]
		for pfid in pfids: # map into list (or Set?)
			#pbdic[pfid] = map(lambda a:eval('[%s]' % a[i_pbs]), pbdic[pfid])
			if xpf_names is None: xpf_names = map(lambda a:str(a[i_xpf]), pbdic[pfid])
			#pbdic[pfid] = map(lambda a:eval('sets.Set([%s])' % a[i_pbs]), pbdic[pfid]) # a[i_pbs] is a str like "id,id,..." (in getCommonLines)
			#pbdic[pfid] = map(lambda a:a[i_pbs], pbdic[pfid]) # a[i_pbs] is already a Set in getCommonLines_mapcols)
			#pbdic[pfid] = map(lambda a:((type(a[i_pbs]) is str) and (eval('sets.Set([%s])' % a[i_pbs]),) or (a[i_pbs],) )[0], pbdic[pfid])
			pbdic[pfid] = map(lambda a:((type(a[i_pbs]) is str) and (sets.Set(pbidlist(a[i_pbs])),) or (a[i_pbs],) )[0], pbdic[pfid])
	#xpf_names = map(lambda a:str(a[i_xpf]), pbdic[pfid])
	
	return (xpf_names, probe_dic) # mergeProbes(probe_dic, merge=merge)

# getPfPairs was copied from filldbs.py, or use from filldbs import getPfPairs
def getPfPairs_now_in_tools(fn, sep='\t'): 
	fp = open(fn)
	head = map(string.lower, fp.readline().split(sep))
	if not head: return None
	head_other = ['platform_a', 'unique_id_a', 'platform_b', 'unique_id_b']
	head_affy = ['a array name', 'a probe set name', 'b array name', 'b probe set name']
	if len(sets.Set(head_other).intersection(head)) == 4: head_use = head_other # head_other will override head_affy
	elif len(sets.Set(head_affy).intersection(head)) == 4: head_use = head_affy
	else: return None
	idx = map(lambda a:head.index(a), head_use)
	lines = map(lambda a:(lambda b=a.split(sep):map(lambda c:b[c], idx))(), fp.xreadlines())
	#if not lines: return None
	#lines.insert(0, head_use)
	return lines

def getMatchDics(fn, pfnms=None):
	'''
	return {pf_nm:[probe_unid_set, ...], ...}
	'''
	# note that platfoms from different databases but with same name should be same and will be treated as the same platform!
	match_dic = {} # {{match_id:{pf_nm:probe_unid_Set, ...}, ...} #{match_id:{dbnm:{pf_id:probe_unid_Set, ...}, ...}, ...}
	pbxpf_dic = {} # {pf_nm:{probe_unid:match_id, ...}, ...} #{dbnm:{pf_id:{probe_unique_id:match_id, ...}, ...}, ...}
	if not pfnms:
		pfnms = sets.Set() # platform names
		chk_pfs = True
	else: chk_pfs = False
	lines = getPfPairs(fn)
	cur_mid = 0
	for pfa, uida, pfb, uidb in lines:
		if chk_pfs:
			pfnms.add(pfa)
			pfnms.add(pfb)
		mid_a = pbxpf_dic.setdefault(pfa,{}).get(uida, None)
		mid_b = pbxpf_dic.setdefault(pfb,{}).get(uidb, None)
		if not mid_a and not mid_b: # a new match, add it
			cur_mid = cur_mid + 1
			pbxpf_dic[pfa][uida] = pbxpf_dic[pfb][uidb] = cur_mid
			match_dic[cur_mid] = {pfa:sets.Set([uida]), pfb:sets.Set([uidb])}
		elif mid_a and mid_b: # has both.
			if mid_a != mid_b: # make them the same match if they are not.
				midmin, midmax, pfmax, uidmax = mid_a < mid_b and (mid_a, mid_b, pfb, uidb) or (mid_b, mid_a, pfa, uida)
				#match_dic[midmin].setdefault(pfmax, sets.Set()).update(match_dic[midmax][pfmax])
				match_min = match_dic[midmin]
				for pfnm, pbset in match_dic[midmax].items():
					if pfnm in match_min: match_min[pfnm].update(pbset)
					else: match_min[pfnm] = pbset
					for pb in pbset: pbxpf_dic[pfnm][pb] = midmin
				del match_dic[midmax]
				#pbxpf_dic[pfmax][uidmax] = midmin
		else : # only has one, then add the other
			mid_yes, mid_no, pf_yes, pf_no, uid_yes, uid_no = mid_a and (mid_a, mid_b, pfa, pfb, uida, uidb) or (mid_b, mid_a, pfb, pfa, uidb, uida)
			pbxpf_dic[pf_no][uid_no] = pbxpf_dic[pf_yes][uid_yes]
			match_dic[mid_yes].setdefault(pf_no, sets.Set(())).add(uid_no)
	#return match_dic
	# make probe_dic from match_dic
	del lines
	probe_unids = {} # {pf_nm:[probe_unid_set, ...], ...}
	for pf in pfnms: probe_unids[pf] = []
	match_dic = match_dic.items()
	match_dic.sort()
	match_dic = map(lambda a:a[1], match_dic) # now match_dic has only values left: [{pf_nm:probe_unid_Set, ...}, ...]
	for pfpb in match_dic:
		if pfnms.difference(pfpb.keys()): # skip line doesn't have all platforms
			continue
		for pfnm, pbset in pfpb.items(): 
			if pfnm in pfnms: probe_unids[pfnm].append(pbset)
	return probe_unids
		
def	getCommonLines_mapfn(cur, pfs_dic, mapfn): #, merge='logmean'):
	'''
	pfs_dic is a dict like {dbname:[pf_ids]}
	mapfn is the name of a probe-mapping file: 'dbnm_fileID' or a abspath
	return dict: {dbname:{pf_id:[probe_ids]}, ...}, ... }
	'''
	if mapfn == os.path.abspath(mapfn) and os.path.exists(mapfn): # abspath
		fn = mapfn
	else: # 'dbnm_fileID' 
		i = mapfn.rfind('_')
		if i < 1: return 'File "%s" doesn\'t exist' % mapfn
		fdb, fn = mapfn[:i], mapfn[i+1:] 
		floc, fn, fraw = inquireDB('SELECT location, name, raw_name FROM %s.fileinfo WHERE id=%s' % (fdb, fn), cursor=cur, fetch=True)[0]
		fn = os.path.join(floc, fn)
	if not os.path.exists(fn): return 'File "%s" doesn\'t exist' % fraw
	# get platform names
	pfnms_dic = {} # {dbnm:{pfid:pfname, ...}, ...}
	pfnms = sets.Set()
	for dbnm, pfids in pfs_dic.items(): # use CONCAT(id) to get str
		pfnms_dic[dbnm] =  nmids = dict(inquireDB('SELECT CONCAT(id), name FROM %s.platform WHERE id IN (%s)' % (dbnm, str(list(pfids))[1:-1]), cursor=cur, fetch=True))
		pfnms.update(nmids.values())
	probe_unids = getMatchDics(fn, pfnms) # {pf_nm:[probe_unid_set, ...], ...}
	if pfnms.difference(probe_unids.keys()): # has platform cannot be matched
		return 'Some platforms (%s) cannot be aliged!' % list(pfnms.difference(probe_unids.keys()))[1:-1]
	xpf_names = map(lambda a:list(a)[0], probe_unids[list(pfnms)[0]])
	if not xpf_names: return None
	# get probe ids from unique_ids
	probe_ids = {} # {dbname:{pf_id:[[probe_ids], ...]}, ...}, ... }
	#pbid_dic = {} # {dbnm:{pf_id:{unique_id:'id1,id2,...', ...}, ...}, ...}
	sql_mod = '''
		SELECT unique_id, GROUP_CONCAT(DISTINCT idx ORDER BY idx SEPARATOR ',')
		FROM %s.probe
		WHERE platform_id=%s
		GROUP BY unique_id
		ORDER BY unique_id
		''' 
	pbidlist = evalPbidStr
	for dbnm, pfids in pfs_dic.items():
		for pfid in pfids:
			#pfid = int(pfid) # it is a str 
			#pbid_dic.setdefault(dbnm, {})[pfid] = pbnmdic = dict(inquireDB(sql_mod % (dbnm, pfid), cursor=cur, fetch=True))) # {unique_id:'id1,id2,...', ...}
			#pbunidic = dict(map(lambda a:(a[0], eval('[%s]' % a[1])), inquireDB(sql_mod % (dbnm, pfid), cursor=cur, fetch=True))) # {unique_id:'id1,id2,...', ...}
			pbunidic = dict(map(lambda a:(a[0], pbidlist(a[1])), inquireDB(sql_mod % (dbnm, pfid), cursor=cur, fetch=True))) # {unique_id:'id1,id2,...', ...}
			pfnm = pfnms_dic[dbnm][pfid]
			unids = probe_unids[pfnm]
			# map probe unique_ids to ids
			def mapIds(unids):
				a = []
				map(lambda b:a.extend(pbunidic[b]), unids) #for unid in unids: a.extend(pbunidic[unid])
				return a
			probe_ids.setdefault(dbnm, {})[pfid] = map(mapIds, unids)
	return xpf_names, probe_ids


def	getCommonLines_default(cur, pfs_dic): #, merge='logmean'):
	'''
	pfs_dic is a dict like {dbname:[pf_ids]}
	if by_col is not None, find common lines with same values of "by_col", else use default method: xpf and others
	return dict: {dbname:{pf_id:[[probe_ids], ...]}, ...}, ... }
	'''

	rlt = getCommonLines_mapf(cur, pfs_dic)
	if rlt is not None: return rlt

	# get probes # SEPARATOR should used before ORDER BY
	MULTI_DB = len(pfs_dic) > 1
	if MULTI_DB:
		sql_mod = '''
			SELECT p.xpf_id, 
				GROUP_CONCAT(DISTINCT p.idx ORDER BY p.idx SEPARATOR ','), 
				GROUP_CONCAT(DISTINCT k.id ORDER BY k.id SEPARATOR ','), 
				GROUP_CONCAT(DISTINCT CONCAT_WS('::', k.dbname, k.kw) SEPARATOR '","') 
			FROM %s.probe p, %s.dbxref x, %s.dbkw k
			WHERE p.platform_id=%s && p.id=x.probe_id && x.dbkw_id=k.id
			GROUP BY p.xpf_id
			ORDER BY p.xpf_id
			''' # % (dbnm, dbnm, dbnm, pf_id)
	else:
		sql_mod = '''
			SELECT p.xpf_id, 
				GROUP_CONCAT(DISTINCT p.idx ORDER BY p.idx SEPARATOR ',')
			FROM %s.probe p
			WHERE p.platform_id=%s
			GROUP BY p.xpf_id
			ORDER BY p.xpf_id
			''' # % (dbnm, dbnm, dbnm, pf_id)
	return matchLinesByID(cur, pfs_dic, sql_mod)

def	getCommonLines_mapf(cur, pfs_dic): #, merge='logmean'):
	'''
	pfs_dic is a dict like {dbname:[pf_ids]}
	if by_col is not None, find common lines with same values of "by_col", else use default method: xpf and others
	return dict: {dbname:{pf_id:[[probe_ids], ...]}, ...}, ... }
	'''

	# get probes # SEPARATOR should used before ORDER BY
	MULTI_DB = len(pfs_dic) > 1
	if MULTI_DB:
		sql_mod = '''
			SELECT p.mapf_id, 
				GROUP_CONCAT(DISTINCT p.idx ORDER BY p.idx SEPARATOR ','), 
				GROUP_CONCAT(DISTINCT k.id ORDER BY k.id SEPARATOR ','), 
				GROUP_CONCAT(DISTINCT CONCAT_WS('::', k.dbname, k.kw) SEPARATOR '","') 
			FROM %s.probe p, %s.dbxref x, %s.dbkw k
			WHERE p.platform_id=%s && p.mapf_id IN NOT NULL && p.id=x.probe_id && x.dbkw_id=k.id
			GROUP BY p.mapf_id
			ORDER BY p.mapf_id
			''' # % (dbnm, dbnm, dbnm, pf_id)
	else:
		sql_mod = '''
			SELECT p.mapf_id, 
				GROUP_CONCAT(DISTINCT p.idx ORDER BY p.idx SEPARATOR ',')
			FROM %s.probe p
			WHERE p.platform_id=%s && p.mapf_id IS NOT NULL
			GROUP BY p.mapf_id
			ORDER BY p.mapf_id
			''' # % (dbnm, dbnm, dbnm, pf_id)
	return matchLinesByID(cur, pfs_dic, sql_mod)

def matchLinesByID(cur, pfs_dic, sql_mod):
	MULTI_DB = len(pfs_dic) > 1
	i_xpf, i_pbs, i_k, i_kws = 0, 1, 2, 3

	probe_dic = {} # like {dbnm:{pf_id:[pbid_set, ...], ...}, ...} #{dbnm:{pf_id:[[xpf_id, pb_ids, k_ids, k_db::kw], ...], ...}, ...}
	kwset_dic = {} # like {dbnm:[kws_set, ...], ... }
	xpf_names = None
	pbidlist = evalPbidStr
	for dbnm, pfids in pfs_dic.items():
		probe_dic[dbnm] = pbdic = {}
		xpf_set = None #sets.Set()
		# query from DB
		for pfid in pfids:
			sql = sql_mod % (MULTI_DB and (dbnm, dbnm, dbnm, pfid) or (dbnm, pfid) )
			pbdic[pfid] = pbdictmp = inquireDB(sql, cursor=cur, fetch=True)
			# update xpf_set
			xpf_ids = map(lambda a:a[i_xpf], pbdictmp)
			if xpf_set is None: # first_pf:
				xpf_set = sets.Set(xpf_ids)
			else: xpf_set.intersection_update(xpf_ids)
			if not xpf_set: return None
			## get the set of all db_kw
			#map(lambda a:dbkw_set.union(eval('["%s"]' % a[i_kws])), pbdictmp)
		# get common lines within a DB
		if len(pfids) > 1: # need to be filtered by xpf_id
			xpf_set = filter(lambda a:a, xpf_set) # remove invalid dbkw
			xpf_dic = dict(zip(xpf_set, xpf_set)) # dict faster than set, set much faster than list<F21>
			for pfid in pfids: # get common lines within db and map strings into list
				pbdic[pfid] = filter(lambda a:a[i_xpf] in xpf_dic, pbdic[pfid])
		if MULTI_DB: # make dbkw set for each line
			#kwset_dic[dbnm] = kwset_tmp = map(lambda a:eval('sets.Set(["%s"])' % a[i_kws]), pbdic[pfids[0]])
			kwset_dic[dbnm] = kwset_tmp = map(lambda a:sets.Set(pbidlist(a[i_kws])), pbdic[pfids[0]])
			for pfid in pfids[1:]:
				#map(lambda a:kwset_tmp.update(eval('["%s"]' % a[i_kws])), pbdic[pfid])
				map(lambda a:kwset_tmp.update(pbidlist(a[i_kws])), pbdic[pfid])
		for pfid in pfids: # map into list (or Set?)
			#pbdic[pfid] = map(lambda a:eval('([%s], ["%s"])' % (a[i_pbs], a[i_kws]) ), pbdic[pfid])
			#pbdic[pfid] = map(lambda a:eval('[%s]' % a[i_pbs]), pbdic[pfid])
			if xpf_names is None and not MULTI_DB: xpf_names = map(lambda a:str(a[i_xpf]), pbdic[pfid])
			#pbdic[pfid] = map(lambda a:eval('sets.Set([%s])' % a[i_pbs]), pbdic[pfid])
			pbdic[pfid] = map(lambda a:sets.Set(pbidlist(a[i_pbs])), pbdic[pfid])

	
	# get common lines across DBs
	if MULTI_DB: xpf_names, probe_dic = LinesAcrossDBs(kwset_dic, probe_dic)
	#else: xpf_names = map(lambda a:str(a[i_xpf]), pbdic[pfid])

	if not xpf_names: return None
	return (xpf_names, probe_dic) #merge(Probes(probe_dic, merge=merge)


exec get_dbstr()

def Main(req_type, req_id, user_params):
	param4R = {# XPF, MatchLines and xpf_names if XPF
		'host' : host,
		'port' : port,
		'user' : user,
		'password' : passwd,
		'grps' : [],
		'merge_method' : 'median',
		'bg_correct' : {},
		'norm_in_array' : {},
		'norm_in_pf' : {},
		'norm_x_pf' : 'none',
		'nbin' : 8,
		'use_ratio' : True, 'ratio_by' : 'array', 
		'use_rma' : False, 'save_data' : False, 'plot_chart' : False, 'plot_pdf' : False, 'sort_rlt' : False,
		'sum_rlt' : False, 'sum_rlt_by' : 'gene_symbol', 
		'screen_gene' : 'all',  'p_threshold' : 0.01, 'gene_num' : 100, # all, by_pvalue, by_number
		'clust_chs' : False, 'clust_grps' : False, 'plot_heatmap_chs' : False, 'plot_heatmap_grps' : False, 
		'probe_info' : 'in files', 
		'coa_analysis' : False, 'coa_num' : 5, 'bga_analysis' : False, 'bga_num' : 5,
		'plot_genome' : False, 'plot_genome_by' : 'nucleotide', 'weak_perc' : 0.025, 
		'n_smooth' : 3, 'fun_smooth' : 'median', 'gn_probefile' : '', 
		'tn_analysis' : False, 'gn_p_label' : True, 'gn_p_threshold' : 0.01,  
		'tn_threshold_M' : 1, 'tn_threshold_A' : 0.8,
		'bac_cgh' : False, 'cgh_m1':'0.33', 'cgh_m2':'0.67', 'cgh_m3':'1.5', 
		'nucleotide_num' : 10000, 'probe_num' : 1000, 'max_section' : 100,
		'match_probe' : True,
		'screen_for_cluster' : True, 'screen_for_heatmap' : True, 'screen_for_coa' : True, 
		'screen_for_bga' : True, 'screen_for_genome' : False,
		'analysis_method' : 'Mixed_ANOVA',
		'contrasts' : [], 'contrasts_simple' : [],
		'anova_platform' : True, 'anova_array' : True, 'anova_dye' : True, 'anova_individual' : True, 'anova_sample' : True,
		'anova_factor' : 'factors_in_db', # or factors_by_user, groups_only, user_model
		'uf_name':[], 'uf_type':[], 'uf_value':[], 'user_model':'', 'user_model_disp':'', 
		'uf_in_model':[], 'uf_nmdic':{}, 'is_mixed_model':False, 'has_group':True, 'has_intercept':True,
		'debug_mode':False, 'row_num':10000, 'max_chart':10, 
		'ref_name' : '',
		'result_dir' : '.', 
		'wd' : os.path.join(os.path.split(os.path.abspath(__file__))[0], 'R_code'),
		'work_dir' : os.path.dirname(os.path.abspath(__file__)),
		'nproc' : 1,
		'req_name' : '',
		} #'mapf':'', 'map_db_pf':[], 'mapcols':[]

	# accept simple params from user_params
	simple_params = ['host', 'port', 'user', 'password', 'merge_method', 'norm_x_pf', 'nbin', 
			'use_ratio', 'ratio_by', 'use_rma', 'save_data', 'plot_chart', 'plot_pdf', 'sort_rlt', 'sum_rlt', 'sum_rlt_by',
			'screen_gene', 'p_threshold', 'gene_num', 'clust_chs', 'clust_grps', 'plot_heatmap_chs', 'plot_heatmap_grps', 
			'coa_analysis', 'coa_num', 'bga_analysis', 'bga_num',
			'probe_info', 'plot_genome', 'plot_genome_by', 'nucleotide_num', 'probe_num',
			'screen_for_cluster', 'screen_for_heatmap', 'screen_for_coa', 'screen_for_bga', 'screen_for_genome', 
			'max_section', 'weak_perc', 'tn_analysis', 'gn_p_label', 'gn_p_threshold', 'gn_probefile', 
			'n_smooth', 'fun_smooth', 
			'tn_threshold_M', 'tn_threshold_A', 'bac_cgh', 'cgh_m1', 'cgh_m2', 'cgh_m3',
			'match_probe', 'analysis_method', 'contrasts', 'contrasts_simple',
			'anova_platform', 'anova_array', 'anova_dye', 'anova_individual', 'anova_sample', 
			'anova_factor', 'uf_type', 'uf_nmdic', 'uf_in_model', 'user_model_disp', 
			'is_mixed_model', 'has_group', 'has_intercept',
			'debug_mode', 'row_num', 'max_chart',
			'result_dir', 'ref_name', 'req_name']
	for k in simple_params:
		if k in user_params: param4R[k] = user_params[k]

	dif_params = {'uf_name_parsed':'uf_name', 'uf_value_parsed':'uf_value', 'user_model_parsed':'user_model'}#, 'by_col':'match_method'}
	for k,v in dif_params.items():
		if k in user_params: param4R[v] = user_params[k]

	# nproc
	try:
		#from analyze_dmp import getInfo
		#info = getInfo()
		#nproc = int(info[:info.index(' ')])
		import socket
		param4R['HOSTNAME'] = socket.gethostname() #os.uname()[1] # R Sys.getenv may get '' for HOSTNAME in the program is run by a daemon launched in /etc/init.d/. In such a case, Python os.uname will offer correct name.
		#node_fn = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'CLUSTER_NODES')
		#if os.path.exists(node_fn): nproc = filter(lambda b:b, map(lambda a:a.strip(), open(node_fn).readlines()))
		#else: 
		#	#nproc = len(os.popen('cat /proc/cpuinfo | grep processor').readlines())/2
		#	nproc = CPUInfo()['n_cores']
		if PARALLEL_IN_NODE_ONLY: # a varialbe from tools.py from DAEMON_HOST
			nproc = CPUInfo()['n_cores'] # this can be better/faster for those are not too heavy computations !!!
		else:
			nproc = CPUInfo()['nodes']
		param4R['nproc'] = nproc
	except: pass

	#req_type, req_id, user_params = cPickle.loads( sys.stdin.read() )
	param4R['grps'] = grps = user_params.get('groups', []) # a list of group, a group is a list of dbnm_pfID_arrayID_chN
	if not grps: return
	
	param4R['match_method'] = by_col = user_params.get('by_col', 'automatic') #None)
	#by_col = 'Affy'
	#if by_col == 'automatic': by_col = None
	merge_method = user_params.get('merge_method', 'log mean')

	# get other options
	db_pf = user_params.get('db_pf', []) # [dbnm_pfid, ...], db_pf is the dbname_pfid for bg_correct, norm_in_array and norm_in_pf
	#if db_pf:
	#	bg_correct = dict(zip(db_pf, user_params['bg_correct'])) # {dbnm_pfid:method, ...}
	#	norm_in_array = dict(zip(db_pf, user_params['norm_in_array'])) # {dbnm_pfid:method, ...}
	#	norm_in_pf = dict(zip(db_pf, user_param['norm_in_pf'])) # {dbnm_pfid:method, ...}
	#else: bg_correct = norm_in_array = norm_in_pf = {}
	bg_correct, norm_in_array, norm_in_pf = param4R['bg_correct'], param4R['norm_in_array'], param4R['norm_in_pf']
	if db_pf:
		map(lambda a:bg_correct.setdefault(a[0][:a[0].rfind('_')], {}).setdefault(a[0][a[0].rfind('_')+1:], a[1]) , zip(db_pf, user_params['bg_correct']) ) # {dbnm:{pfid:method, ... }, ... }
		map(lambda a:norm_in_array.setdefault(a[0][:a[0].rfind('_')], {}).setdefault(a[0][a[0].rfind('_')+1:], a[1]) , zip(db_pf, user_params['norm_in_array']) ) # {dbnm:{pfid:method, ... }, ... }
		map(lambda a:norm_in_pf.setdefault(a[0][:a[0].rfind('_')], {}).setdefault(a[0][a[0].rfind('_')+1:], a[1]) , zip(db_pf, user_params['norm_in_pf']) ) # {dbnm:{pfid:method, ... }, ... }

	mapfn = user_params.get('mapf', None)
	map_db_pf = user_params.get('map_db_pf', []) # [dbnm_pfid, ...], db_pf is the dbname_pfid for bg_correct, norm_in_array and norm_in_pf
	map_db_pf_col = {} # {dbnm:{pfid:col, ...}, ...}
	if map_db_pf:
		map(lambda a:map_db_pf_col.setdefault(a[0][:a[0].rfind('_')], {}).setdefault(a[0][a[0].rfind('_')+1:], a[1]), zip(map_db_pf, user_params['mapcols']) ) 

	# and normalization method for XPF
	
	# and method/parameters for analysis


	#dbs = {} # {dbnm:{array_id:True, ...}, ...}
	pfids = {} # {dbnm:[pf_id, ...], ...}
	#array_pf = {} # {dbnm:{array_id:pf_id, ...}, ...}
	for grp in grps:
		for j in range(len(grp)):
			db_ary = grp[j]
			i3 = db_ary.rfind('_')
			i2 = db_ary.rfind('_', 0, i3)
			i1 = db_ary.rfind('_', 0, i2)
			#dbnm, pf_id, array_id, chN = db_ary[:i1], int(db_ary[i1+1:i2]), int(db_ary[i2+1:i3]), int(db_ary[i3+1:])
			dbnm, pf_id, array_id, chN = db_ary[:i1], db_ary[i1+1:i2], db_ary[i2+1:i3], db_ary[i3+1:]
			try: 
				if int(chN) <= 0: chN = '1'
			except: pass
			#dbs.setdefault(dbnm, {})[array_id] = True
			#array_pf.setdefault(dbnm, {})[array_id] = pf_id
			pfids.setdefault(dbnm, sets.Set()).add(pf_id)
			grp[j] = (dbnm, pf_id, array_id, chN) # now grp is a list of tuple: (dbnm, pf_id, array_id, chN)
	#if not dbs: return
	#if not array_pf: return
	if not pfids: return
	for dbnm in pfids.keys(): pfids[dbnm] = list(pfids[dbnm])
	cur = getCursor()

	## get platform information first
	#pfids = {} # {dbnm:[pf_id, ...], ...}
	#array_pf = {} # {dbnm:{array_id:pf_id, ...}, ...}
	#for dbnm, arrays in dbs.items():
	#	#arrays = arrays.keys()
	#	#pfids[dbnm] = list(inquireDB('SELECT DISTINCT platform_id FROM %s.array WHERE id IN (%s)' % (dbnm, str(arrays)[1:-1]), cursor=cur, fetch=True))
	#	arpf = inquireDB('SELECT DISTINCT id, platform_id FROM %s.array WHERE id IN (%s)' % (dbnm, str(arrays.keys())[1:-1]), cursor=cur, fetch=True)
	#	array_pf[dbnm] = dict(arpf)
	#	pfids[dbnm] = list(sets.Set(map(lambda a:a[1], arpf)))
	#XPF = len(dbs) > 1 or len(pfids.values()[0])>1
	#XPF = len(array_pf) > 1 or len(pfids.values()[0])>1
	XPF = len(pfids) > 1 or len(pfids.values()[0])>1
	if XPF and param4R['match_probe']: 
		xM = getCommonLines(cur, pfids, by_col=by_col, mapfn=mapfn, map_db_pf_col=map_db_pf_col)
		if not xM:
			error_msg.append('<p>No matched lines found across multi-platform, cannot do analysis!<p>')
			return
		if type(xM) == str:
			error_msg.append(xM)
			return
		xpf_names, MatchLines = xM
		#file('/home/xxia/temp/MatchLines.txt', 'w').write(str(MatchLines))
		if not MatchLines or not MatchLines.values()[0] or not MatchLines.values()[0].values()[0]: 
			error_msg.append('<p>No matched lines found across multi-platform, cannot do analysis!<p>')
			return
	# read and merge intensities
	cur.close()
	
	param4R['XPF'] = XPF #r.assign('XPF', XPF)
	if XPF and param4R['match_probe']: 
		#param4R['MatchLines'] = setToList(MatchLines) #r.assign('MatchLines', setToList(MatchLines))
		#param4R['MatchLines'] = setToIntList(MatchLines) # Convert to int since Rpy cannot handle with long integer !!!
		param4R['MatchLines'] = setToStrList(MatchLines) # Convert to str since R will str too.
		param4R['xpf_names'] = xpf_names # r.assign('xpf_names', xpf_names)
		del MatchLines, xpf_names # release memory
	
	#return (grps, XPF, MatchLines, xpf_names)\
	#return runR_rpy(param4R)

	#return runR_pipe(param4R, req_id)

	#fntmp = os.tempnam()
	#ftmp = open(fntmp, 'w')
	import tempfile
	ftmp = tempfile.NamedTemporaryFile('w')
	fntmp = ftmp.name
	from py2R import getStr
	ftmp.write('user_params <- ')
	ftmp.write(getStr(param4R))
	ftmp.flush()
	#ftmp.close()
	if __DEBUG__:
		import shutil
		try: shutil.copy(fntmp, '/home/xxia/temp/user_params.R') #image.Rdata')
		except: pass
	#ftmp = open(fntmp)
	#saveParam(param4R, fntmp) # use another python and exit after calling in order to save memory


	del param4R # release memory
	CMDS = ['source("%s")' % os.path.join(os.path.split(os.path.abspath(__file__))[0], 'analyzeDBsR').replace('\\', '\\\\') ]
	#CMDS.append('load("%s")' % fntmp)
	CMDS.append('source("%s")' % fntmp.replace('\\', '\\\\') )
	#if (__DEBUG__): CMDS.append
	CMDS.append('IN_DEBUG_MODE <- %s' % (__DEBUG__ and 'TRUE' or 'FALSE') )
	CMDS.append('rlt <- analyzeMPMDB(user_params, con=NULL)' )

	rlt_sql = pipeR(CMDS=CMDS, req_id=req_id)

	ftmp.close()
	#os.unlink(fntmp)

	return rlt_sql

def saveParam(param, fn):
	import cPickle
	fntmp = os.tempnam()
	ftmp = open(fntmp, 'w')
	ftmp.write(cPickle.dumps(param))
	ftmp.close()
	ftmp = open(fntmp)
	pin, pout = os.popen4('python')
	CMDS = ['import cPickle',
			'from rpy import r',
			'param = cPickle.loads(open("%s").read())' % fntmp,
			'r.assign("user_params", param)',
			'''r('save(user_params, file="%s")')''' % fn,
			'''if (%d): r("save.image(file='/home/xxia/temp/image.Rdata')")''' % (__DEBUG__ and 1 or 0)
			]
	for cmd in CMDS: os.write(pin.fileno(), cmd + '\n')
	pin.close()
	pout.read()
	pout.close()
	ftmp.close()
	os.unlink(fntmp)

def setToList(ML):
	def mysorted(a):
		a.sort()
		return(a)
	for pfs in ML.values():
		for pf in pfs: pfs[pf] = map(lambda a:mysorted(list(a)), pfs[pf])
		#for pf in pfs: pfs[pf] = map(lambda a:sorted(list(a)), pfs[pf])
	return ML

def setToIntList(ML):
	def mysorted(a):
		a = map(int, a)
		a.sort()
		return(a)
	for pfs in ML.values():
		for pf,v in pfs.items(): 
			pfs[pf] = map(lambda a:mysorted(list(a)), v)
		#for pf in pfs: pfs[pf] = map(lambda a:sorted(list(a)), pfs[pf])
	return ML

def setToStrList(ML):
	def mysorted(a):
		a.sort()
		a = map(lambda b:(b[-1]!='L') and b or b[:-1], map(str, a)) # remove tailing "L" for long int
		return(a)
	for pfs in ML.values():
		for pf,v in pfs.items(): 
			pfs[pf] = map(lambda a:mysorted(list(a)), v)
		#for pf in pfs: pfs[pf] = map(lambda a:sorted(list(a)), pfs[pf])
	return ML

def runR_rpy(param4R): # not used now
	def myout(s): pass
	from rpy import r, set_rpy_output
	set_rpy_output(myout)
	#exec get_dbstr()

	#r.assign('grps', grps)
	##r.assign('array_pf', array_pf)
	#r.assign('bg_correct', bg_correct)
	#r.assign('norm_in_array', norm_in_array)
	#r.assign('norm_in_pf', norm_in_pf)
	#for k,v in param4R.items(): r.assign(k, v)
	
	#try:
	r.assign('user_params', param4R)
	#except:
	#	#param4R['MatchLines'] = param4R['xpf_names'] = None
	#	#open('/home/xxia/temp/param4R.txt', 'w').write(repr(param4R))
	#	raise

	if (__DEBUG__): r("save.image(file='/home/xxia/temp/image.Rdata')")
	r('source("%s")' % os.path.join(os.path.split(os.path.abspath(__file__))[0], 'analyzeDBsR'))
	r.assign('IN_DEBUG_MODE', __DEBUG__)
	#r('DTList <- readGrps(user_params, "%s", %d, "%s", "%s", con=NULL)' % (host, port, user, passwd) )
	#r('analyzeSingleChannels(DTList)')
	r('rlt <- analyzeMPMDB(user_params, con=NULL)')

def runR_pipe(param4R, req_id): # not used now
	stdout = sys.stdout
	stderr = sys.stderr
	sys.stdout = dummystdout()
	def myout(s): pass
	from rpy import r, set_rpy_output
	set_rpy_output(myout)
	
	r.assign('user_params', param4R)

	if (__DEBUG__): r("save.image(file='/home/xxia/temp/image.Rdata')")

	#fntmp = '/tmp/tt.Rdata'
	fntmp = os.tempnam()
	ftmp = open(fntmp, 'w')
	ftmp.close()
	sys.stdout = stdout
	sys.stderr = stderr
	#os.chmod(fntmp, 0664)
	r('save(user_params, file="%s")' % fntmp)
	ftmp = open(fntmp)
	#r('q()') # this line may cause problem

	CMDS = ['source("%s")' % os.path.join(os.path.split(os.path.abspath(__file__))[0], 'analyzeDBsR') ]
	CMDS.append('load("%s")' % fntmp)
	#if (__DEBUG__): CMDS.append
	CMDS.append('IN_DEBUG_MODE <- %s' % (__DEBUG__ and 'TRUE' or 'FALSE') )
	CMDS.append('rlt <- analyzeMPMDB(user_params, con=NULL)' )

	rlt_sql = pipeR(CMDS=CMDS, req_id=req_id)

	ftmp.close()
	os.unlink(fntmp)

	return rlt_sql

class dummystdout:
	def write(self, *args):
		pass
	def flush(self, *args):
		pass


if __name__ == '__main__':
	try:	
		runPipe() #Main() #runPipe()
	except:
		cfile = cStringIO.StringIO()
		traceback.print_exc(None,cfile)
		value = htmlStr('\n<br>'.join(error_msg) + '\n<p>' + cfile.getvalue())
		os.write(sys.stdout.fileno(), cPickle.dumps( ('Nothing done', value) ) )


