Search and replace with commit in bitbucket multiple repositories - bitbucket

Is there a way to search and replace some string in Bitbucket git repositories under some project using Bitbucket administration tools/API/UI? The modification should be via git commit.
The only way that pops to mind is building some script which will go over all repositories.

I had to do just such an operation. When we migrated from svn to git, we had to change the scm developerConnection in our pom.xml:
<developerConnection>scm:git:git#github.com:codehaus-plexus/plexus-interpolation.git</developerConnection>
in all our git repos from the svn developer connection to the git one. Here is the script I used:
#!/usr/bin/python
import stashy
import os
import sys
import urllib2
import json
import base64
import getpass
from git import Repo
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from lxml import etree
import itertools
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
bitbucketBaseUrl = "https://bitbucket.company.com"
bitbucketUserName = "admin"
class BitBucketRepo:
def __init__(self, name, repoUrl):
self.name = name
self.repoUrl = repoUrl
class CommentedTreeBuilder(ET.TreeBuilder):
def __init__(self, *args, **kwargs):
super(CommentedTreeBuilder, self).__init__(*args, **kwargs)
def comment(self, data):
self.start(ET.Comment, {})
self.data(data)
self.end(ET.Comment)
def insert(originalfile, string):
with open(originalfile, 'r') as f:
with open('pom.xml.bak', 'w') as f2:
f2.write(string + "\n")
f2.write(f.read())
os.rename('pom.xml.bak', originalfile)
def validateScriptParameters():
if len(sys.argv) != 3:
sys.exit("Usage: {} [Bit Bucket Module Project key, e.g. mf for Modules - Framework] [Bit Bucket admin password]".format(
os.path.basename(sys.argv[0])))
def cloneRepo(repository):
logging.info("Cloning repo [{}]".format(repository.repoUrl))
repo = Repo.clone_from(repository.repoUrl, repository.name)
return repo
def updatePomFile(repository):
resultCode = 1
ET.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace('', "http://maven.apache.org/POM/4.0.0")
cparser = ET.XMLParser(target = CommentedTreeBuilder())
if os.path.isfile(os.getcwd() + "/" + repository.name + "/pom.xml"):
tree = ET.parse(repository.name + "/pom.xml", parser=cparser)
root = tree.getroot()
ns = {'nodes': 'http://maven.apache.org/POM/4.0.0'}
for scm in root.findall('nodes:scm', ns):
if scm is not None:
developerConnection = scm.find('nodes:developerConnection', ns)
scm.remove(developerConnection)
newDeveloperConnectionElm = Element("developerConnection")
newDeveloperConnectionElm.tail = "\n\t"
newDeveloperConnectionElm.text = str("scm:git:" + repository.repoUrl)
scm.append(newDeveloperConnectionElm)
resultCode = 0
else:
resultCode = 1
if resultCode == 0:
logging.info("Updating repository: " + repository.name)
tree.write(repository.name + "/pom.xml")
return resultCode
def updateRepo(bitbucket, projectKey):
xmlVersion = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
repoList = bitbucket.projects[projectKey].repos
for repoItem in repoList:
cloneUrl = repoItem['links']['clone'][1]['href']
if cloneUrl.startswith("http"):
cloneUrl = repoItem['links']['clone'][0]['href']
if not cloneUrl.startswith("ssh"):
logging.error("Unable to retrieve valid clone url [{}], exiting...".format(cloneUrl))
sys.exit(1)
repository = BitBucketRepo(repoItem['name'], cloneUrl)
clonedRepo = cloneRepo(repository)
resultCode = updatePomFile(repository)
if resultCode == 0:
insert(repository.name + "/pom.xml", xmlVersion)
clonedRepo.index.add([os.getcwd() + "/" + repository.name + "/pom.xml"])
clonedRepo.index.commit("CM-8991: Updating pom.xml to use git connection string instead of svn")
clonedRepo.remotes.origin.push()
validateScriptParameters()
logging.info('Bit Bucket URL [' + bitbucketBaseUrl + ']')
logging.info('User name [' + bitbucketUserName + ']')
projectKey = sys.argv[1]
bitbucketPassword = sys.argv[2]
bitbucket = stashy.connect(bitbucketBaseUrl, bitbucketUserName, bitbucketPassword)
logging.info("Module project key: [{}]".format(projectKey))
updateRepo(bitbucket, projectKey)
Note that it requires the 'stashy' python library, and is written for python2.

Related

Is there a Bitbucket API to search if a repository variable is defined in all of my workspace's repos?

Instead of defining a Bitbucket Cloud workspace variable that can be used by all the repos in the workspace, someone defined it in each repo, but not in all of them, of the workspace. Now I want to remove the variable in the individual repos, and define it in the workspace.
Is there a Bitbucket API that would do this pseudo-code?
def bb = Bitbucket.getInstance()
String workspace = "MyWorkspace"
String myVariable = "NEXUS_USER"
List<Repository> reposInWorkspace = bb.getWorkspace(workspace).getAllReposInWorkspace()
reposInWorkspace.each { repo ->
if (repo.hasVariable(myVariable)) {
println repo.name
}
}
I put a Bitbucket support ticket, and a sharp Atlassian support person gave me this Python3 script
from requests import Session
from time import sleep
username = 'your_username_not_email'
password = 'app_pw_not_bb_user_pw'
workspace = 'your_workspace'
variable_name = 'your_variable'
URL = f'https://api.bitbucket.org/2.0/repositories/{workspace}'
session = Session()
session.auth = (username, password)
def get_repos(page=None):
while True:
params = {'page': page, 'pagelen': 100}
r = session.get(URL, params=params)
while r.status_code == 429:
print("Hit the API rate limit. Sleeping for 10 sec...")
sleep(10)
print("Resuming...")
r = session.get(URL, params=params)
r_data = r.json()
for repo in r_data.get('values'):
yield repo.get('slug')
if not r_data.get('next'):
return
if page is None:
page = 1
page += 1
def get_variables(repo, page=None):
while True:
params = {'page': page, 'pagelen': 100}
r = session.get(f'{URL}/{repo}/pipelines_config/variables/', params=params)
while r.status_code == 429:
print("Hit the API rate limit. Sleeping for 10 sec...")
sleep(10)
print("Resuming...")
r = session.get(URL, params=params)
r_data = r.json()
for var in r_data.get('values'):
yield var.get('key')
if not r_data.get('next'):
return
if page is None:
page = 1
page += 1
def has_variable(var):
if var == variable_name:
return True
def main():
for repo in get_repos():
for var in get_variables(repo):
if has_variable(var):
print(f'{repo}')
if __name__ == '__main__':
main()

Jenkins - How to run job only if another job failed for n-times

i want to trigger/start a job (jobB) only if another job (jobA) failed for n-times after the last success.
i saw this parameterized trigger plugin - but for triggers you only can say "Failed" but you can't define whether it should trigger after a failed counter.
Thanks
Chris
here my groovy script thats solved the issue. Used groovy-postbuild plugin to exec the script on jobA. Thanks Ian W for your input.
import hudson.model.*
import jenkins.model.Jenkins
job_name = "jobA"
job_name_to_run = "jobB"
triggerThreshold = 2
last_succ_num = 0
last_job_num = 0
def currentBuild = Thread.currentThread().executable
def job = Hudson.instance.getJob(job_name)
def job_data = Jenkins.instance.getItemByFullName(job.fullName)
println 'Job: ' + job_data.fullName
if (job_data.getLastBuild()) {
last_job_num = job_data.getLastBuild().getNumber()
}
println 'last_job_num: ' + last_job_num
if (job_data.getLastSuccessfulBuild()) {
last_succ_num = job_data.getLastSuccessfulBuild().getNumber()
}
println 'last_succ_num: ' + last_succ_num
doRunJob =(last_job_num - last_succ_num >= triggerThreshold)
println 'do run job? ' + doRunJob
if (doRunJob){
def jobToRun = Hudson.instance.getJob(job_name_to_run)
def cause = new Cause.UpstreamCause(currentBuild)
def causeAction = new hudson.model.CauseAction(cause)
Hudson.instance.queue.schedule(jobToRun, 0, causeAction)
}

List Index Out of Range - Tweepy/Twitter API into Geodatabase

Soooo I have been working on a script I took from ArcGIS Blueprints:
http://file.allitebooks.com/20151230/ArcGIS%20Blueprints.pdf
It should convert geolocated tweets into a geodatabase. I have the Twitter Streaming API already operational, and been playing with different ways to extract x/y, but keep coming back to this script, every so often, hoping I can get it running with no luck. I am stuck on a "List Index Out of Range" error. If anyone is gracious enough to offer some ideas on how I can get by this error I will be forever grateful. If nothing else this endeavor has exploited my shortcomings with Python and Arcpy, and hopefully it will round me out in the long run. For right now, I sure would like to get some mileage out of this script and the work Ive invested into it. Thank you!
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import arcpy
import sys
import time
consumer_key = 'xxx'
consumer_secret = 'xxx'
token_key = 'xxx'
token_secret = 'xxx'
class StdOutListener(StreamListener):
def __init__(self, start_time, featureClass, time_limit):
super(StdOutListener, self).__init__()
self.time = start_time
self.limit = time_limit
self.featureClass = featureClass
def on_status(self, status):
while (time.time() - self.time) < self.limit:
if status.geo is not None:
dictCoords = status.geo
listCoords = dictCoords['coordinates']
latitude = listCoords[0]
longitude = listCoords[1]
cursor =arcpy.da.InsertCursor(self.featureClass,"SHAPE#XY"))
cursor.insertRow([(longitude,latitude)])
print(str(listCoords[0]) + "," + str(listCoords[1]))
return True
else:
print "No coordinates found"
return True
start_time = time.time()
arcpy.env.workspace = "c:\ArcGIS_Blueprint_Python\data\Twitter\TweetInformation.gdb" "
def main():
try: #new
featureClass = sys.argv[1]
monitorTime = sys.argv[2]
monitorTime = monitorTime * 3600
sr = arcpy.SpatialReference(4326)
arcpy.env.overwriteOutput = True
arcpy.CreateFeatureclass_management(arcpy.env.workspace,
featureClass, "POINT", spatial_reference=sr)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(token_key, token_secret)
stream = Stream(auth, StdOutListener(start_time, featureClass,
time_limit=monitorTime)) #172800
stream.filter(track=['car'])
except Exception as e:
print(e.message)
if __name__ == '__main__':
main()

How do I use the Bilty API to shorten a list of URLs?

I have an account with Bitly which personalizes my URL shortening. How can I use the API to sign in and shorten a list of URLs?
Here is my solution in python using python requests library
import base64
import requests
import json
credentials = 'USERNAME:PASSWORD'
urls = ['www.google.com', 'www.google.co.uk', 'www.google.fr']
def getShortURLs(urls):
token = auth()
return shortenURLs(token, urls)
def auth():
base_auth = "https://api-ssl.bitly.com/oauth/access_token"
headers = {'Authorization': 'Basic ' + base64.b64encode(credentials)}
resp = requests.post(base_auth, headers=headers)
return resp.content
def shortenURLs(token, long_urls):
base = 'https://api-ssl.bitly.com/v3/shorten'
short_urls = []
for long_url in long_urls:
if long_url:
params = {'access_token':token, 'longUrl' : 'https://' + long_url}
response = requests.get(base, params=params)
r = json.loads(response.content)
short_urls.append(r['data']['url'])
return short_urls

Running a production SPSS job on remote server from command line

We have a Windows based SPSS server say 10.20.30.40. We would like to kick off a SPSS Production job from another server 10.20.30.50.
Can we kick off the job using a batch file?
1.Create an SPJ file in production.
2.make a bat file to run spj
"C:\Program Files\IBM\SPSS\Statistics\21\stats.exe" -production "K:\Meinzer\Production\SPJ\DashBoardInsert.spj"
create a 'scheduled task' in windows.
The real issue is getting your output from the job. for that, i use python.
I use syntax like this
begin program.
AlertDays=4
Files=['k:\meinzer/Production\dashboarddatasets/aod_network_report.sps',
'k:\meinzer/Production\push/aod_network_reportpush.sps',
'k:\meinzer/Production\pushproduction/aod_network_reportpushP.sps']
end program.
insert file='k:/meinzer/production/ps/errorTestPickles.sps'.
to trigger this
*still needs error info passed.
set mprint=off /printback=on.
begin program.
#test files to observe - uncomment out 8 or 9
#Files=['k:\meinzer/Production\dashboarddatasets/test.sps']
#Files=['k:\meinzer/Production\dashboarddatasets/testfail.sps']
#Files=['k:\meinzer/Production\dashboarddatasets/clinfo.sps']
#Files=['k:\meinzer/Production\dashboarddatasets/CSOC_Consecutive_High_Level_Svcs.sps']
import shutil
import spss
import re, os, pickle
from datetime import datetime
def main(Files):
"""The parser and processor for Syntax Error Reporting """
try:
for FilePath in Files:
Start = datetime.now().replace( microsecond=0)
DBname, init_Syntax = init_Vars(FilePath)
cmds = init_cmds(init_Syntax)
cmd=''
cmd2=''
cmd3=''
try:
for cmd in cmds:
cmd=cmd.replace('\r\n','\n ')
cmd=cmd.replace('\t',' ')
print cmd
spss.Submit(cmd)
cmd3=cmd2
cmd2=cmd
# cmd, cmd2, cmd3=run_cmds(cmd,cmd2,cmd3,cmds)
Finish = datetime.now().replace( microsecond=0)
spss_Output(DBname)
SavedNewname=check_saved_new_name(DBname)
if SavedNewname==1:
send_result(DBname,'Failure',Start,Finish,0,cmd,cmd2,cmd3)
break
if SavedNewname==0:
send_result(DBname,'Success',Start,Finish,1,AlertDays)
except Exception,e:
Finish = datetime.now().replace( microsecond=0)
errorLevel, errorMsg = get_spss_error(e)
send_result(DBname,"Failure in code",Start,Finish,0,AlertDays,cmd,cmd2,cmd3,errorLevel, errorMsg )
spss_Output(DBname)
break
except IOError:
print "can't open file or difficulty initializing comands from spss"
send_result('Can not open File %s' % DBname,Start,Finish)
spss_Output(DBname)
def init_Vars(FilePath):
FilePath=FilePath.encode('string-escape')
#FilePath= map(os.path.normpath, FilePath)
FilePath=FilePath.replace('\\','/')
FilePath=FilePath.replace('/x07','/a')
FilePath=FilePath.replace('//','/')
FilePath=FilePath.replace('/x08','/b')
FilePath=FilePath.replace('/x0b','/v')
FilePath=FilePath.replace('/x0c','/v')
print 'this is the file path..................... '+FilePath
DBname = os.path.split(os.path.normpath(FilePath))[-1]
#if '\\' in FilePath:
# DBname=FilePath.rpartition('\\')[-1]
#if '/' in FilePath:
# DBname=FilePath.rpartition('/')[-1]
init_Syntax=FilePath
OutputClose="output close name=%s." % DBname
OutputNew="output new name=%s." % DBname
spss.Submit(OutputClose)
spss.Submit(OutputNew)
return (DBname, init_Syntax)
def init_cmds(init_Syntax):
with open(init_Syntax,'rb') as f:
BOM_UTF8 = "\xef\xbb\xbf"
code = f.read().lstrip(BOM_UTF8)
#r = re.compile('(?<=\.)\s*?^\s*',re.M)
r = re.compile('(?<=\.)\s*?^\s*|\s*\Z|\A\s*',re.M)
cmds = r.split(code)
#cmds = re.split("(?<=\\.)%s[ \t]*" % os.linesep, code, flags=re.M)
#cmds = re.split(r'(?<=\.)[ \t]*%s' % os.linesep, code, flags=re.M)
cmds = [cmdx.lstrip() for cmdx in cmds if not cmdx.startswith("*")]
return cmds
def run_cmds(cmd,cmd2,cmd3,cmds):
for cmd in cmds:
cmd=cmd.replace('\r\n','\n ')
cmd=cmd.replace('\t',' ')
print cmd
spss.Submit(cmd)
cmd3=cmd2
cmd2=cmd
return (cmd, cmd2, cmd3)
def send_result(DBname,result,Start,Finish,status,AlertDays,cmd='',cmd2='',cmd3='',errorLevel='', errorMsg=''):
""" """
print result + ' was sent for '+DBname
FinishText = Finish.strftime("%m-%d-%y %H:%M")
StartText = Start.strftime("%m-%d-%y %H:%M")
Runtimex = str(Finish-Start)[0:7]
error_result="""%s %s
Start Finish Runtime Hrs:Min:Sec
%s %s %s """ % (DBname,result,StartText,FinishText,Runtimex)
error_result_email="""%s <br>
%s <br> Runtime %s <br>\n""" % (result,DBname,Runtimex)
with open("k:/meinzer/production/output/Error Log.txt", "r+") as myfile:
old=myfile.read()
myfile.seek(0)
if status==1:
myfile.write(error_result+"\n\n"+ old)
if status==0:
myfile.write(error_result+'\n'+'This was the problem\n'+errorLevel+" "+ errorMsg+'\n'+cmd3+'\n'+cmd2+'\n'+cmd+"\n\n"+ old)
# with open("k:/meinzer/production/output/email Log.txt", "r+") as emailtext:
# olde=emailtext.read()
# emailtext.seek(0)
# emailtext.write(error_result_email+ olde)
with open("k:/meinzer/production/output/ErrorCSV.txt", "r+") as ErrorCSV:
oldcsv=ErrorCSV.read()
ErrorCSV.seek(0)
ErrorCSV.write(DBname+','+str(status)+','+FinishText+",0"+','+str(AlertDays)+"\n"+ oldcsv)
def check_saved_new_name(DBname):
""" check saved new name"""
with open("k:/meinzer/production/output/text/"+DBname+".txt", "r") as searchfile:
if 'Warning # 5334' in open("k:/meinzer/production/output/text/"+DBname+".txt", "r").read():
SavedNewname=True
else:
SavedNewname=False
return SavedNewname
def get_spss_error(e):
print 'Error', e
errorLevel=str(spss.GetLastErrorLevel())
errorMsg=spss.GetLastErrorMessage()
return (errorLevel, errorMsg)
def spss_Output(DBname):
""" """
outputtext="output export /text documentfile='k:/meinzer/production/output/text/%s.txt'." % DBname
outputspv="output save outfile='k:/meinzer/production/output/%s.spv'." % DBname
spss.Submit(outputspv)
spss.Submit(outputtext)
main(Files)
end program.

Resources