diff options
-rwxr-xr-x | client/scirec.py | 16 | ||||
-rw-r--r-- | docs/scire2.sql | 2 | ||||
-rwxr-xr-x | scire/.lib/DB_functions.php | 52 | ||||
-rw-r--r-- | scire/add_job.php | 4 | ||||
-rw-r--r-- | server/modules/GACL_functions.py | 64 | ||||
-rw-r--r-- | server/modules/__init__.py | 0 | ||||
-rw-r--r-- | server/modules/client.py | 2 | ||||
-rw-r--r-- | server/modules/job.py | 295 |
8 files changed, 286 insertions, 149 deletions
diff --git a/client/scirec.py b/client/scirec.py index 437f3f4..78c6799 100755 --- a/client/scirec.py +++ b/client/scirec.py @@ -52,7 +52,7 @@ def run_jobs(client,jobs): # 2 = Sanity check failed for jobid in jobs: - job=client.get_job(jobid,True) + job=client.get_job(jobid) if sim or debug or verbose: print "Job %s:" % job['jobid'] @@ -83,6 +83,11 @@ def run_jobs(client,jobs): scriptfd = open(scriptfile,'w') scriptfd.writelines(job['script']['script_data']) scriptfd.close() + + # As we are sure that the job is now on the client, + # let's mark it as downloaded + client.mark_job_as('Downloaded', job['jobid']) + os.chmod(scriptfile,0755) # Is this dangerous? @@ -97,12 +102,15 @@ def run_jobs(client,jobs): #print 'Command output to return: %s' % output if rcode == 'ScireDepErr': success = 'Failed' + client.mark_job_as('Failed', jobid) client.job_return(job['jobid'],success,output) elif rcode == 'Aborted': success = 'Aborted' + client.mark_job_as('Cancelled', jobid) client.job_return(job['jobid'],success) elif int(rcode) == int(job['script']['success_code']): success = 'Succeeded' + client.mark_job_as('Finished', jobid) job['script']['return_output'] = 1; #FIXME don't hardcode hacks like this. fix the DB/UI if job['script']['return_output'] and (job['script']['return_output']== 1): client.job_return(job['jobid'],success,output) @@ -128,8 +136,9 @@ def run_job(client,job,command): if client.job_cancelled(jobid): return 'Aborted','Job aborted' - - deps = job['job_dependency'] + # RLAZO: Comment for testing +# deps = job['job_dependency'] + deps = 'None' if deps != 'None': deplist = () deplist = job['job_dependency'].split(',') @@ -140,6 +149,7 @@ def run_job(client,job,command): rmsg = "Dependency for jobid %s: %s. Status: %s" % (jobid,jobdep,d_status) return 'ScireDepErr',rmsg + client.mark_job_as('Running', jobid) status,output = commands.getstatusoutput(''.join([command,' 2>&1'])) if debug: diff --git a/docs/scire2.sql b/docs/scire2.sql index 71a3437..7bf55f5 100644 --- a/docs/scire2.sql +++ b/docs/scire2.sql @@ -14,7 +14,7 @@ INSERT INTO jobs_status (statusid, statusname) values (2, 'Downloaded'); INSERT INTO jobs_status (statusid, statusname) values (3, 'Running'); INSERT INTO jobs_status (statusid, statusname) values (4, 'Failed'); INSERT INTO jobs_status (statusid, statusname) values (5, 'Finished'); - +INSERT INTO jobs_status (statusid, statusname) values (6, 'Cancelled'); INSERT INTO `permissions` VALUES (0,'View Users','Able to list the users',1,'2007-03-26 01:29:28','Default'),(23,'Test Permission 1','this is only a test.',1,'2007-03-26 02:20:40','Default'),(24,'Add ClientGroup','In the clients section, lets you add a group of clients.',2,'2007-04-01 17:03:11','Default'),(25,'Edit ClientGroup','Allows editing of the name and parent of a clientgroup.',2,'2007-04-01 17:07:32','Default'),(26,'Delete ClientGroup','deleting of a clientgroup. clients within move to parent.',2,'2007-04-01 17:08:39','Default'),(27,'Assign Permission','allows assigning permissions of users to clients. This is a very important permission!',2,'2007-04-01 17:52:25','Default'),(28,'View Permissions','see the list of active permissions.',2,'2007-04-01 17:53:17','Default'),(29,'Edit Client','edit client information.',2,'2007-04-01 18:10:02','Default'),(30,'Delete Client','remove a client.',2,'2007-04-01 18:10:21','Default'),(31,'Change User Pass/Name','If set, the ability to change a user\'s password will be enabled.',1,'2007-04-01 21:16:08','Default'),(32,'Add UserGroup','allows creation of usergroups.',1,'2007-04-01 23:18:59','Default'); diff --git a/scire/.lib/DB_functions.php b/scire/.lib/DB_functions.php index 66b7079..a84cb33 100755 --- a/scire/.lib/DB_functions.php +++ b/scire/.lib/DB_functions.php @@ -353,14 +353,37 @@ function scire_add_job($script, $priority, $creator, $permission, $description, if (!$result) { return $db->error; } - $result = $db->insert('job_conditions', array('jobid' => $jobid, 'job_dependency' => $job_dependency, 'run_schedule' => $run_schedule, 'validity_period' => $validity_period)); + + if ($run_schedule != "") { + $cron = new CronParser($run_schedule); + $nextRun = $cron->calculateNextRun(); + var_dump($nextRun); + $nextRun = mktime( $nextRun[1], $nextRun[0], 0, $nextRun[3], $nextRun[2], $nextRun[4] ); + $expTime = $nextRun + ( $validity_period * 60); + $nextRun = strftime( '%Y-%m-%d %T', $nextRun ); + $expTime = strftime( '%Y-%m-%d %T', $expTime ); + } else { + $nextRun = ""; + $expTime = ""; + } + # Add conditions + $result = $db->insert('job_conditions', array('jobid' => $jobid, 'job_dependency' => $job_dependency, 'run_schedule' => $run_schedule, 'deploy_time' => $nextRun, 'expiration_time' => $expTime, 'validity_period' => $validity_period)); if (!$result) { return $db->error; } - + + # Add history entry only if the job is assigned to a specific + # client. Managing groups require a different approach #Now add the clients. + $status = get_statusid('Pending'); if ($clients) { foreach ($clients as $client) { + $result = $db->insert('job_history', array('jobid' => $jobid, 'clientid' => $client, + 'statusid' => $status, + 'eventmsg' => 'Job created')); + if (!$result) { + return $db->error; + } $result = $db->insert('jobs_clients', array('jobid' => $jobid, 'clientid' => $client)); if (!$result) { return $db->error; @@ -450,6 +473,20 @@ function scire_edit_job($jobid, $fields) { } } +function get_statusid($statusname) { + global $db; + $name = htmlentities($statusname); + $result = $db->select('SELECT statusid FROM jobs_status WHERE statusname = \'' . $name . '\''); + + if ($result) { + var_dump( $result[0]['statusid'] ); + + return $result[0]['statusid']; + } + else { + return $db->error; + } +} function scire_add_script($name, $desc, $location, $script_data, $log_location, $success_code, $run_as, $priority, $permission, $pp_location, $pp_script_data, $script_tags) { global $db; @@ -473,7 +510,16 @@ function scire_add_script($name, $desc, $location, $script_data, $log_location, return 0; #Success } - +function get_dyn_tag_value($scriptid,$tag) { + global $db; + $scriptid = (int) $scriptid; + $result = $db->select('tag_value', 'dyn_tags', "`scriptid` = $scriptid AND `tag` = '$tag'"); + if ($result && count($result) > 0) { + return $result[0]['tag_value']; + } else { + return false; + } +} diff --git a/scire/add_job.php b/scire/add_job.php index 37499ca..140f83d 100644 --- a/scire/add_job.php +++ b/scire/add_job.php @@ -39,7 +39,8 @@ if ($_POST['ADD']) { $scheduleComplete = $_POST["minute1"] and $_POST["hour1"] and $_POST["day1"] and $_POST["month1"] and $_POST["weekday1"]; if ($scheduleComplete) { - $str = implode(" ", array($_POST["minute1"], $_POST["hour1"],$_POST["day1"], $_POST["month1"], $_POST["weekday1"])); + $str = implode(" ", array($_POST["minute1"], $_POST["hour1"], + $_POST["day1"], $_POST["month1"], $_POST["weekday1"])); } else { $str = ""; } @@ -47,7 +48,6 @@ if ($_POST['ADD']) { $dependency = 1; try { - #$cron = new CronParser($str); $result = scire_add_job($_POST['script'], $priority, $_SESSION['userid'], $permission, $description, $pending, $_POST['clients'], $_POST['clientgroups'], $dependency, $str, $_POST['validity_period']); if (!$result) { diff --git a/server/modules/GACL_functions.py b/server/modules/GACL_functions.py new file mode 100644 index 0000000..147fa28 --- /dev/null +++ b/server/modules/GACL_functions.py @@ -0,0 +1,64 @@ +from adodb import * +from modules.client import * + +def get_client_groups(db,id) + #options. + option = 'NO_RECURSE' # * If $option == 'RECURSE' it will get all ancestor groups. defaults to only get direct parents. + # * @return array Array of Group ID #'s, or FALSE if Failed + print "get_object_groups(): Object ID: %s id Option: %s\n" % (id, option) + object_type = 'axo' + group_table = 'gacl_axo_groups' + map_table = 'gacl_groups_axo_map' + + if not id: + print "get_object_groups(): Object ID: (%s ) is empty, this is required" % str(id) + return false + + + if option == 'RECURSE': + query = """ + SELECT DISTINCT g.id AS group_id + FROM %s gm + LEFT JOIN %s g1 ON g1.id=gm.group_id + LEFT JOIN %s g ON g.lft<=g1.lft AND g.rgt>=g1.rgt + """ % (map_table,group_table,group_table) + else: + query = """ + SELECT gm.group_id + FROM %s gm + """ % map_table + + query += " WHERE gm.axo_id=%s " % str(id) + print query + cursor = db.conn.Execute(query) + #fixme error check the return + + while (not cursor.EOF): + row = cursor.FetchRow() + retarr.append(row[0]) + + return retarr + + + +# # Add the client into the gacl AXO table +# db.conn.Execute('LOCK TABLES `gacl_axo_seq` WRITE'); +# # we add one to get the next valid free id +# id = db.conn.GetRow('SELECT id FROM `gacl_axo_seq`')[0] + 1; +# result = db.conn.Execute('UPDATE `gacl_axo_seq` SET id=%s', id); +# db.conn.Execute('UNLOCK TABLES'); + +# result2 = db.conn.Execute('INSERT INTO `gacl_axo` (id,section_value,value,order_value,name,hidden) VALUES (%s,%s,%s,%s,%s,%s)', (id,'clients',client_info['hostname'],1,client_info['hostname'],0) ); + +def get_group_clients(db, group): + """This function gets the members of groups. Returns an array + containing those clients, empty otherwise""" + + # Missing recursive groups + members = [] + query = "SELECT axo_id FROM gacl_groups_axo_map WHERE group_id = %s" % group + print query + cursor = db.conn.Execute(query) + while (not cursor.EOF): + members.append(cursor.FetchRow()[0]) + return members diff --git a/server/modules/__init__.py b/server/modules/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/server/modules/__init__.py diff --git a/server/modules/client.py b/server/modules/client.py index 849cbd4..8023e89 100644 --- a/server/modules/client.py +++ b/server/modules/client.py @@ -42,7 +42,7 @@ def add_client(client_digest,certbuf,client_info): try: # Add the client into the gacl AXO table db.conn.Execute('LOCK TABLES `gacl_axo_seq` WRITE'); - # wee add one to get the next valid free id + # we add one to get the next valid free id id = db.conn.GetRow('SELECT id FROM `gacl_axo_seq`')[0] + 1; result = db.conn.Execute('UPDATE `gacl_axo_seq` SET id=%s', id); db.conn.Execute('UNLOCK TABLES'); diff --git a/server/modules/job.py b/server/modules/job.py index 3a2f696..ec0986c 100644 --- a/server/modules/job.py +++ b/server/modules/job.py @@ -4,38 +4,40 @@ sys.path.append("../postprocess") from ScireDB import * from adodb import * from client import * +import GACL_functions import md5 import datetime +import pdb POSTPROCESS_DIR = "postprocess" debug = True def register(): - return ['recover_spool', 'gen_summary', 'get_jobs', 'spool_jobs', 'unspool_job', 'get_job', 'job_return', 'get_jstatus', 'job_cancelled'] + return ['recover_spool', 'gen_summary', 'get_jobs', 'spool_jobs', 'unspool_job', 'get_job', 'job_return', 'get_jstatus', 'job_cancelled', 'mark_job_as'] def job_cancelled(client_digest,jobid): client_id = get_clientid(client_digest) db = ScireDB() - job = get_job(client_digest,jobid) -# if job['origjob'] != 'None': -# jobid=job['origjob'] - # How to deal with a cancelled recurring job? + # rlazo: I think that using the last stored value on the + # job_history table will always be accurated. If the user + # wants to rerun the job again, the status will change to + # PENDING + cursor = db.conn.Execute(''' -SELECT status FROM job_history WHERE jobid=%s AND clientid=%s -''', (jobid,client_id)) - while not cursor.EOF: - hist_dict = cursor.GetRowAssoc(0) # 0 is lower, 1 is upper-case - if (hist_dict['status'] == 'Cmd:Abort'): - db.Close() - return True - cursor.MoveNext() - db.Close() - return False + SELECT jobs_status.statusname + FROM job_history LEFT JOIN jobs_status on (job_history.jobid) + WHERE job_history.jobid=%s AND job_history.clientid=%s + ''', (jobid,client_id)) + status = cursor.FetchRow() + if status: + return status[0] == "Cancelled" + else: + return False def recover_spool(self,jobs): # Here we will implement the logic to read in @@ -80,36 +82,37 @@ def gen_job_digest(client_digest,jobid): return job_digest def get_jobs(client_digest,summary,jobs): - + """Returns all the job from the database for the client identified by + client_digest that fulfill the following conditions: + - It is time to execute it (deploy_time and expiration_time) + - The latest entry on job_history for the job is marked as Pending""" client_id = get_clientid(client_digest) + # pdb.set_trace() if not client_id: print 'Could not get client ID from database for %s.' % client_digest return False db = ScireDB() - try: - row = db.conn.GetRow('SELECT statusid FROM jobs_status WHERE statusname = %s', ("Pending")) - except: - print 'Error ' + str(sys.exc_info()[1]); # retrieve the error message returned by database - return False - - if row: - pending_id = row[0] - else: + pending_id = get_status_id('Pending') + if not pending_id: print 'Error... could not load client_status' + # rlazo: Do the magic. This is all the need for a multijob to work. I hope + expand_jobs(client_id) + try: cursor = db.conn.Execute(''' SELECT jobs.jobid, jobs.priority, job_conditions.job_dependency, job_conditions.deploy_time, job_conditions.expiration_time, job_history.statusid -FROM jobs,jobs_clients LEFT JOIN job_conditions using (jobid) LEFT JOIN job_history using (jobid) -WHERE (jobs_clients.clientid=%s) +FROM jobs NATURAL JOIN jobs_clients NATURAL JOIN job_conditions NATURAL JOIN job_history +WHERE jobs_clients.clientid = %s AND jobs.jobid = jobs_clients.jobid AND (job_conditions.deploy_time < now()) AND (job_conditions.expiration_time > now()) +AND job_history.statusid = '%s' ORDER BY jobs.priority,jobs.created -''', str(client_id)) +''', (str(client_id), pending_id)) except: print 'Error ' + str(sys.exc_info()[1]); # retrieve the error message returned by database return False @@ -156,11 +159,12 @@ def job_return(client_digest,jobid,success,eventmsg=None): print "The job has returned. It was a %s\n" % success clientid = get_clientid(client_digest) if debug: print "The clientid is: %s\n" % clientid - cursor = db.conn.Execute(''' -INSERT INTO job_history -(jobid,clientid,status,eventmsg) -VALUES (%s,%s,%s,%s) -''', (jobid,clientid,success,eventmsg)) + # Commented because is already executed on run_jobs +# cursor = db.conn.Execute(''' +#INSERT INTO job_history +#(jobid,clientid,status,eventmsg) +#VALUES (%s,%s,%s,%s) +#''', (jobid,clientid,success,eventmsg)) if success == "Succeeded" and eventmsg != None: #If we have output and succeeded, check if we have a postprocess to run. #FIXME this should be a separate function. @@ -205,52 +209,25 @@ def get_jstatus(client_digest,jobid): db = ScireDB() clientid = get_clientid(client_digest) - job = get_job(client_digest,jobid) - - if job['run_interval'] > 0: - ''' - In this case, the job is recurring. So, - we need to find the most recent jobid that - has been run for this job. Then change the - jobid we query to be the most recent duped - jobid, not the original jobid. - ''' - maxjob = db.conn.GetRow(''' -SELECT MAX(jobs.jobid) FROM jobs,job_history -WHERE origjob=%s AND jobs.jobid=job_history.jobid -''',jobid) - jobid = maxjob[0] - print 'Changing jobid to %s' % str(jobid) - - s_row = db.conn.GetRow(''' -SELECT * FROM job_history -WHERE jobid=%s -AND clientid=%s -''', (jobid,clientid)) - - db.Close() - - if s_row: - return s_row[3] + # Under the current scheduling model, all jobs are treated as + # recurring, so just get the latest job entry + jobstatus = db.conn.GetRow(''' +SELECT jobs_status.statusname +FROM job_history LEFT JOIN jobs_status ON (job_history.statusid) +WHERE job_history.clientid = %s AND job_history.jobid = %s +ORDER BY job_history.eventtime DESC +LIMIT 1 +''', (clientid, jobid)) + + if jobstatus: + return jobstatus else: return False -def get_job(client_digest,jobid,dup=False,origjob=False): +def get_job(client_digest,jobid): ''' - If left to the default values for dup and origjob, get_job - will act simply as a query, not an execution mechanism. - - The dup parameter represents whether or not a recurring job is - intended to be executed. If true, the job data will be duplicated - into a unique job with a origjob value of the original jobid. - It defaults to the safer value of 0. - - The origjob parameter affects the sql select statement used to - get the job. If false, the tables are joined simply on the jobid - field. However, if set, the tables are joined on - (jobs.origjob=job_conditions.jobid). Using a parameter to - represent this prevents the need for a two-pass query. + Simplified version of get_job. ''' job = {} @@ -259,79 +236,119 @@ def get_job(client_digest,jobid,dup=False,origjob=False): jobrow[jobid] = {} db = ScireDB() - - # MATT: SetFetchMode appears to be unavailable for adodb on python. - # MATT: Hmmm... GetDict (and GetAssoc) seem to return an array not a - # dict. We could perhaps use MetaColumns to our advantage but not until - # we can reference the array elements by name. -# jobrow = db.conn.GetDict('select * from jobs where jobid='+str(jobid)) - print jobrow; -# print 'jobid: %d ' % jobid -# for key in jobrow[jobid]: -# job[key] = jobrow[jobid][key] -# print ' %s: %s' % (key,jobrow[jobid][key]) -# scriptrow = db.conn.GetDict('select * from scripts where scriptid='+str(jobrow[5])) -# for key in scriptrow.keys: -# job['script'][key] = scriptrow[key] -# print ' %s: %s' % (key,job[key]) - - try: - if origjob: - print 'Using jobs.origjob=job_conditions.jobid' - sql = db.conn.Execute('select * from jobs LEFT JOIN job_conditions on (jobs.origjob=job_conditions.jobid) WHERE job_conditions.jobid=%s AND jobs.jobid=%s', (str(origjob),str(jobid))) - else: - print 'Using ON JOBID select' - sql = db.conn.Execute('select * from jobs LEFT JOIN job_conditions using (jobid) where jobid=%s', str(jobid)) - job = sql.GetRowAssoc(0) # 0 is lower, 1 is upper-case + cursor = db.conn.Execute(''' +SELECT * +FROM jobs LEFT JOIN job_conditions on (jobs.jobid) +WHERE jobs.jobid = %d''' % jobid) + job = cursor.GetRowAssoc(0) # 0 is lower, 1 is upper-case except: - print sys.exc_info()[1]; # retrieve the error message returned by database - - print "job: \n" - print job - for key in job: - job[key] = str(job[key]) - # For the time being, let's go ahead and cast all job data - # as string, just to not have to worry about marshalling - # problems. Hopefully we will see an improvement with - # allow_none functionality in future python versions. - - #if type(job[key]) == datetime.datetime: - # job[key] = str(job[key]) -# print ' %s: %s' % (key,job[key]) - + print sys.exc_info()[1] + print "job: \n" + str(job) + tmp = {} + for e in job: + tmp [e] = str(job[e]) + job = tmp + + try: - scriptsql = db.conn.Execute('select * from scripts where scriptid=%s',str(job['script'])) - script = scriptsql.GetRowAssoc(0) + scriptsql = db.conn.Execute('select * from scripts where scriptid=%s', str(job['script'])) + job['script'] = scriptsql.GetRowAssoc(0) + except: print sys.exc_info()[1]; # retrieve the error message returned by database - job['script'] = script - job['jobid'] = jobid # Fix jobid since it's written twice from select + db.Close() + return job - if (job['run_interval'] > 0) and 0: #TEMPORARIALLY DISABLED B/C origjob doesn't exist. - if (job['origjob'] and job['origjob'] == 'None' and dup == True): +def get_latest_job_entry(clientid, jobid): + """ + Helper Function to get the latest entry on the job_history + table""" + db = ScireDB() + eventtime = db.conn.GetRow(''' +SELECT eventtime +FROM job_history +WHERE clientid = %s AND jobid = %s +ORDER BY eventtime DESC +LIMIT 1 +''', (clientid, jobid)) + if eventtime: + return eventtime[0] + else: + return false - print 'Job %s is a recurring job.' % str(jobid) - cursor = db.conn.Execute(''' - INSERT INTO jobs - (priority,created,creator,permission,script,description,pending,origjob) - VALUES (%s,now(),%s,%s,%s,%s,%s,%s) - ''', (job['priority'],job['creator'],job['permission'],job['script']['scriptid'],job['description'],job['pending'],jobid)) - if not cursor: - print 'Error executing INSERT for recurring job %s.' % str(jobid) - db.Close() - return False - else: - insertid = cursor.Insert_ID() - cursor.Close() - db.Close() - print 'Recurring job duped into jobid: %s' % str(insertid) - - print 'NOW CALLING get_job with Insert_ID' - return get_job(client_digest,insertid,False,jobid) +def get_status_id(status): + """ + Returns the jobstatus id for the job with the statusname + 'status', false otherwise""" + db = ScireDB() + statusid = db.conn.GetRow(''' +SELECT statusid +FROM jobs_status +WHERE statusname = \'%s\''''%status) + if statusid: + return statusid[0] else: - db.Close() - return job + return False + +def mark_job_as(client_digest, status, jobid, message = ""): + """ + Modifies the entry on job_history for the lastest registred + job.""" + db = ScireDB() + clientid = get_clientid(client_digest) + now = db.conn.DBTimeStamp(datetime.datetime.now()) + statusid = get_status_id(status) + if not statusid: + print "ERROR! couldn't find %s statusid" % status + return False + + eventtime = db.conn.DBTimeStamp(get_latest_job_entry(clientid, jobid)) + if eventtime: + cursor = db.conn.Execute(''' +UPDATE job_history +SET eventtime = %s, statusid = %d, eventmsg = \'%s\' +WHERE eventtime = %s and jobid = %d and clientid = %d +''' % (now, int(statusid), message, eventtime, int(jobid), int(clientid))) + else: + cursor = db.conn.Execute(''' +INSERT INTO job_history +VALUES (%s,%s,%s,%s,%s) +''', (jobid, clientid, now, statusid, message)) + return True + +def expand_jobs(clientid): + """ + Search for the group jobs that the client must be into and + does the expansion""" + db = ScireDB() + groups = GACL_functions.get_client_groups(db, clientid) + pendingid = get_status_id('Pending') + for groupid in groups: + members = GACL_functions.get_group_clients(db, groupid) + try : + cursor = db.conn.Execute(''' +SELECT DISTINCT(jobs_clients.jobid) +FROM jobs_clients LEFT JOIN job_conditions on (jobs_clients.jobid=job_conditions.jobid) +WHERE jobs_clients.groupid = %d +AND (job_conditions.deploy_time < now()) +AND (job_conditions.expiration_time > now()) +AND job_conditions.last_run_date < job_conditions.deploy_time ''' % groupid) + except: + print sys.exc_info()[1] + print "################### TERRIBLE ERROR #################" + db.conn.Execute('LOCK TABLES `jobs_clients` WRITE, `job_conditions` WRITE, `job_history` WRITE''') + while (not cursor.EOF): + jobid = cursor.GetRowAssoc(0)['jobid'] + for memberid in members: + db.conn.Execute("INSERT INTO job_history values ('%s','%s',now(),'%s','%s')" % + (jobid, memberid, pendingid, "Job Expanded")) + db.conn.Execute('INSERT INTO jobs_clients (jobid, clientid) values (%d,%d)' % + (jobid, memberid)) + db.conn.Execute('UPDATE `job_conditions` SET last_run_date = now() WHERE jobid = %d' % jobid) + cursor.MoveNext() + db.conn.Execute('UNLOCK TABLES') + return True |