I have to create a .xls file from the data displayed in a table in my page. This happens when the user clicks 'export' button. I have the following code for it and it is created okay. Now, I want to open this file in the same click. How should I open it for user to see it?
string filePath = "C:/Upload/Stats_" + viewModel.SelectedTest.ToString() + ".xls";
//write the header
using (var pw = new StreamWriter(filePath, true))
{
pw.WriteLine(String.Format("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}", "Month", "Total Users", "K",
"T", "G", "Detail", "GS",
"BI", "GHF","A"));
//write to the file
for (int i = 0; i < 12; i++)
{
pw.WriteLine(
String.Format("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}", viewModel.Months[i],
viewModel.M[i], viewModel.MKau[i],
viewModel.MTech[i], viewModel.MGew[i],
viewModel.MDet[i], viewModel.MGes[i],
viewModel.MBea[i], viewModel.MGesHf[i],viewModel.MAug[i]));
pw.Flush();
}
pw.Close();
}
Here I would like to open it.
I had the same 'requirement' to be able to export to xls, and instead I gave the client an export to csv, which will open in excel if you are on a machine with excel installed but is also available to other systems. I achieved it like this.
Create an extension method that supports .AsCsv, which was taken largely from Mike Hadlow's implementation
public static class EnumerableExtensions
{
public static string AsCsv<T>(this IEnumerable<T> items) where T : class
{
var csvBuilder = new StringBuilder();
var properties = typeof(T).GetProperties();
foreach (T item in items)
{
string line = string.Join(",", properties.Select(p => p.GetValue(item, null).ToCsvValue()).ToArray());
csvBuilder.AppendLine(line);
}
return csvBuilder.ToString();
}
private static string ToCsvValue<T>(this T item)
{
if (item is string)
{
return string.Format("\"{0}\"", item.ToString().Replace("\"", "\\\""));
}
double dummy;
if (double.TryParse(item.ToString(), out dummy))
{
return string.Format("{0}", item.ToString());
}
return string.Format("\"{0}\"", item.ToString());
}
}
Then you would need set your controller method to return a FileContentResult and have some logic like this within the controller method:
var outputModel = viewModel.ToList().Select(model => new
{
Months = model.Months
M = model.M[i],
MKau= model.MKau,
MTech = model.MTech,
MGew = model.MGew,
MDet = model.MDet,
MGes = model.MGes,
MBea= model.MBea,
MGesHf= model.MGesHf
});
string csv = "\"Month\",\"Total Users\",\"K\",\"T\",\"G\",\"Detail\",\"GS\",\"BI\",\"GHF\",\"A\""
+ System.Environment.NewLine
+ outputModel.AsCsv();
string fileName = "Stats_" + viewModel.SelectedTest.ToString() + ".csv"
return this.File(new System.Text.UTF8Encoding().GetBytes(csv), "text/csv", fileName);
Related
OK, this sounds dumb even to me, but I am clearly having a "bad brain day" and need help.
On a button click, I want to generate a file based on parameters taken from 2 ViewData fields and a checkbox control, and have the file be downloaded/displayed, just as you get with a fixed link.
Most of it is going fine, the controller method passes back a file like so: Return File(filePath, "text/csv") - but then what? How do I connect that to the button and have the file download/open dialog come up?
I feel I am missing something really simple. Just calling the controller via ajax seems to do nothing... the code is called but I see no results
***** EDIT: ******
The following gets me a file automatically downloaded, but with the name "download" - I want to offer the user the choice to open or download, and to set the filename - how do I do that?
serialize = function (obj) {
var str = [];
for (var p in obj)
if (obj.hasOwnProperty(p)) {
str.push(encodeURIComponent(p) + "=" + encodeURIComponent(obj[p]));
}
return str.join("&");
};
var params = {
ID1: '#ViewData("ID1")',
ID2: '#ViewData("ID2")',
flag: getFlag()
};
var actionUrl = ('#Url.Action("ProduceReport", "Report")');
actionUrl += "/?" + serialize(params);
window.open(actionUrl);
* 2nd edit *
Controller code - this produces a file and returns the path. After the call to ProduceReport The file is there on that path, this I have checked. It is used in production to email the file (works fine).
public FileResult ProduceReport(int ID1, int ID2, bool flag = false)
{
var filePath = ExcelReports.ProduceReportExcel(Models.UserInfo.GetCurrentUserID, ID1, ID2, flag);
return File(filePath,"application/vnd.ms-excel");
}
The acceptable compromise that I found, to at least get the file downloaded. I will have to do further research to see if I can print it automatically.
Javascript:
serialize = function (obj) {
var str = [];
for (var p in obj)
if (obj.hasOwnProperty(p)) {
str.push(encodeURIComponent(p) + "=" + encodeURIComponent(obj[p]));
}
return str.join("&");
};
var params = {
ID1: '#ViewData("ID1")',
ID2: '#ViewData("ID2")',
flag: getFlag()
};
var actionUrl = ('#Url.Action("ProduceReport", "Report")');
actionUrl += "/?" + serialize(params);
window.location.href = actionUrl;
Controller: Note 3rd parameter on the File() call
public FileResult ProduceReport(int ID1, int ID2, bool flag = false)
{
var filePath = ExcelReports.ProduceReportExcel(Models.UserInfo.GetCurrentUserID, ID1, ID2, flag);
return File(filePath,"application/vnd.ms-excel",System.IO.Path.GetFileName(filePath));
}
I created a simple action to download some content as excel file:
public FileResult ExportToExcel()
{
string filename = "list.xlsx";
string contentType = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
List<string[]> list = new List<string[]>();
list.Add(new[] { "col1", "col2", "cols3" });
list.Add(new[] { "col4", "col5", "cols6" });
list.Add(new[] { "col7", "col8", "cols9" });
StringWriter sw = new StringWriter();
sw.WriteLine("ID,Date,Description");
foreach (string[] item in list)
{
sw.WriteLine("{0},{1},{2}", item[0], item[1], item[2]);
}
byte[] fileContents = Encoding.UTF8.GetBytes(sw.ToString());
return this.File(fileContents, contentType, filename);
}
I have 2 issues with it:
1. The file is downloaded but I cannot open it and am getting a warning:
Excel cannot open the file ... because the file format or file extension is not valid. Verify that the file has not been corrupted and that the file extension matches the format of the file.
When I use old excel format:
string filename = "List.xls";
string contentType = "application/vnd.ms-excel";
I am able to open the file but after 3 different warnings about file being corrupted etc.
Btw I compared saving and tried to write file as pdf
string filename = "List.pdf";
string contentType = "application/pdf";
And I still couldn't open the file - it said format is not valid etc.
2. The contents appear in the file in the second example however the commas are not recognised as column separators and all data in a row is written as one column.
What separator to use for excel format or how to write data to file to have it in a table excel format?
Ideal solution for me would be just return exported view (strongly typed) but I didn't find out how to do it so far.
--- EDIT: Working solution ---
public FileResult ExportToExcel()
{
string filename = "List.xlsx";
string contentType = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
List<string[]> titles = new List<string[]>() { new[] { "a", "be", "ce" } };
List<string[]> list = new List<string[]>
{
new[] { "col1", "col2", "cols3" },
new[] { "col4", "col5", "cols6" },
new[] { "col7", "col8", "cols9" },
new[] { "col10", "col11", "cols12" }
};
XLWorkbook wb = new XLWorkbook();
XLTables xt = new XLTables();
var ws = wb.Worksheets.Add("List");
ws.Cell(1, 1).InsertData(titles);
ws.Cell(2, 1).InsertData(list);
ws.Columns().AdjustToContents();
var stream = new MemoryStream();
wb.SaveAs(stream);
stream.Seek(0, SeekOrigin.Begin);
wb.Dispose();
return this.File(stream, contentType, filename);
}
The reason why it is not being correctly rendered is because you cannot just return the mime type and expect the framework to figure out the rest.
I would go with a nuget package called closedXML which will allow you to create an excel file in memory and stream it back to the client.
it comes with a full documentation (here) for more information.
Using this package you can do something like
XLWorkbook wb = new XLWorkbook();
XLTables xt = new XLTables();
var ws = wb.Worksheets.Add("Sheet 1");
var firstCell = ws.Cell(1, 1);
var lastCell = ws.Cell(3, list.Count);
var table = ws.Range(firstCell.Address, lastCell.Address).AsTable();
table.Cell(2, 1).InsertData(list);
table.CreateTable();
ws.Columns().AdjustToContents();
using(var stream = new MemoryStream())
{
wb.SaveAs(stream);
stream.Seek(0, SeekOrigin.Begin);
wb.Dispose();
return File(stream , contentType, filename);
}
I am making a crawler application in Groovy on Grails. I am using Crawler4j and following this tutorial.
I created a new grails project
Put the BasicCrawlController.groovy file in controllers->package
Did not create any view because I expected on doing run-app, my crawled data would appear in my crawlStorageFolder (please correct me if my understanding is flawed)
After that I just ran the application by doing run-app but I didn't see any crawling data anywhere.
Am I right in expecting some file to be created at the crawlStorageFolder location that I have given as C:/crawl/crawler4jStorage?
Do I need to create any view for this?
If I want to invoke this crawler controller from some other view on click of a submit button of a form, can I just write <g:form name="submitWebsite" url="[controller:'BasicCrawlController ']">?
I asked this because I do not have any method in this controller, so is it the right way to invoke this controller?
My code is as follows:
//All necessary imports
public class BasicCrawlController {
static main(args) throws Exception {
String crawlStorageFolder = "C:/crawl/crawler4jStorage";
int numberOfCrawlers = 1;
//int maxDepthOfCrawling = -1; default
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(crawlStorageFolder);
config.setPolitenessDelay(1000);
config.setMaxPagesToFetch(100);
config.setResumableCrawling(false);
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
controller.addSeed("http://en.wikipedia.org/wiki/Web_crawler")
controller.start(BasicCrawler.class, 1);
}
}
class BasicCrawler extends WebCrawler {
final static Pattern FILTERS = Pattern
.compile(".*(\\.(css|js|bmp|gif|jpe?g"+ "|png|tiff?|mid|mp2|mp3|mp4" +
"|wav|avi|mov|mpeg|ram|m4v|pdf" +"|rm|smil|wmv|swf|wma|zip|rar|gz))\$")
/**
* You should implement this function to specify whether the given url
* should be crawled or not (based on your crawling logic).
*/
#Override
boolean shouldVisit(WebURL url) {
String href = url.getURL().toLowerCase()
!FILTERS.matcher(href).matches() && href.startsWith("http://en.wikipedia.org/wiki/Web_crawler/")
}
/**
* This function is called when a page is fetched and ready to be processed
* by your program.
*/
#Override
void visit(Page page) {
int docid = page.getWebURL().getDocid()
String url = page.getWebURL().getURL()
String domain = page.getWebURL().getDomain()
String path = page.getWebURL().getPath()
String subDomain = page.getWebURL().getSubDomain()
String parentUrl = page.getWebURL().getParentUrl()
String anchor = page.getWebURL().getAnchor()
println("Docid: ${docid} ")
println("URL: ${url} ")
println("Domain: '${domain}'")
println("Sub-domain: ' ${subDomain}'")
println("Path: '${path}'")
println("Parent page:${parentUrl} ")
println("Anchor text: ${anchor} " )
if (page.getParseData() instanceof HtmlParseData) {
HtmlParseData htmlParseData = (HtmlParseData) page.getParseData()
String text = htmlParseData.getText()
String html = htmlParseData.getHtml()
List<WebURL> links = htmlParseData.getOutgoingUrls()
println("Text length: " + text.length())
println("Html length: " + html.length())
println("Number of outgoing links: " + links.size())
}
Header[] responseHeaders = page.getFetchResponseHeaders()
if (responseHeaders != null) {
println("Response headers:")
for (Header header : responseHeaders) {
println("\t ${header.getName()} : ${header.getValue()}")
}
}
println("=============")
}
}
I'll try to translate your code into a Grails standard.
Use this under grails-app/controller
class BasicCrawlController {
def index() {
String crawlStorageFolder = "C:/crawl/crawler4jStorage";
int numberOfCrawlers = 1;
//int maxDepthOfCrawling = -1; default
CrawlConfig crawlConfig = new CrawlConfig();
crawlConfig.setCrawlStorageFolder(crawlStorageFolder);
crawlConfig.setPolitenessDelay(1000);
crawlConfig.setMaxPagesToFetch(100);
crawlConfig.setResumableCrawling(false);
PageFetcher pageFetcher = new PageFetcher(crawlConfig);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(crawlConfig, pageFetcher, robotstxtServer);
controller.addSeed("http://en.wikipedia.org/wiki/Web_crawler")
controller.start(BasicCrawler.class, 1);
render "done crawling"
}
}
Use this under src/groovy
class BasicCrawler extends WebCrawler {
final static Pattern FILTERS = Pattern
.compile(".*(\\.(css|js|bmp|gif|jpe?g"+ "|png|tiff?|mid|mp2|mp3|mp4" +
"|wav|avi|mov|mpeg|ram|m4v|pdf" +"|rm|smil|wmv|swf|wma|zip|rar|gz))\$")
/**
* You should implement this function to specify whether the given url
* should be crawled or not (based on your crawling logic).
*/
#Override
boolean shouldVisit(WebURL url) {
String href = url.getURL().toLowerCase()
!FILTERS.matcher(href).matches() && href.startsWith("http://en.wikipedia.org/wiki/Web_crawler/")
}
/**
* This function is called when a page is fetched and ready to be processed
* by your program.
*/
#Override
void visit(Page page) {
int docid = page.getWebURL().getDocid()
String url = page.getWebURL().getURL()
String domain = page.getWebURL().getDomain()
String path = page.getWebURL().getPath()
String subDomain = page.getWebURL().getSubDomain()
String parentUrl = page.getWebURL().getParentUrl()
String anchor = page.getWebURL().getAnchor()
println("Docid: ${docid} ")
println("URL: ${url} ")
println("Domain: '${domain}'")
println("Sub-domain: ' ${subDomain}'")
println("Path: '${path}'")
println("Parent page:${parentUrl} ")
println("Anchor text: ${anchor} " )
if (page.getParseData() instanceof HtmlParseData) {
HtmlParseData htmlParseData = (HtmlParseData) page.getParseData()
String text = htmlParseData.getText()
String html = htmlParseData.getHtml()
List<WebURL> links = htmlParseData.getOutgoingUrls()
println("Text length: " + text.length())
println("Html length: " + html.length())
println("Number of outgoing links: " + links.size())
}
Header[] responseHeaders = page.getFetchResponseHeaders()
if (responseHeaders != null) {
println("Response headers:")
for (Header header : responseHeaders) {
println("\t ${header.getName()} : ${header.getValue()}")
}
}
println("=============")
}
}
I am encountering a problem in getting the download prompt. In the below code first am allowing the user to upload a file to compress. Once the file is compressed the user should be provided with the compressed files. But in the below code download prompt doesn't appears neither it shows any error. Please help me by correcting my code
The view code:
function CompressFile(box) {
var file = document.getElementById('fileComp');
if (file.value == "") {
alert("Choose a file to upload");
return false;
}
dhtmlx.modalbox.hide(box);
var fd = new FormData();
fd.append('file', file.files[0]);
var xhr = new XMLHttpRequest();
xhr.open('POST', '/FileUpload/Compress', true);
xhr.send(fd);
}
The controller code:
public ActionResult Compress(HttpPostedFileBase file)
{
var supportedType = new[] { "pdf" };
var fileExt = System.IO.Path.GetExtension(file.FileName).Substring(1);
var filename = Path.GetFileNameWithoutExtension(file.FileName) ?? "";
if (file.ContentLength > 0 && supportedType.Contains(fileExt))
{
string filePath = Path.Combine(HttpContext.Server.MapPath(_uploadPDF), Path.GetFileName(file.FileName));
file.SaveAs(filePath);
PdfReader reader = new PdfReader(filePath);
string name = DateTime.Now.ToString("ddMM_HHmmss");
name = Server.MapPath(_fileUploadPath + name + ".pdf");
PdfStamper stamper = new PdfStamper(reader, new FileStream(name, FileMode.Create), PdfWriter.VERSION_1_5);
stamper.FormFlattening = true;
stamper.SetFullCompression();
stamper.Close();
string fn = System.IO.Path.GetFileName(name);
return base.File(name, "application/pdf",fn);
}
else
{
return View();
}
}
The problem is that you're using Ajax. You can't download a file through Ajax. You need to do a regular POST to the ActionMethod. That way the browser can send you back the file and prompt the user where he wants to save it.
Is it possible to move a work item from one project to another inside TFS? I’ve seen a copy option, but no move. Also, if it is possible, what’s the implication for any of the WI history?
I found this article from 2008 that seem to say it's not, but I wondered if there'd been any progress since then.
It isn't possible to move, just copy. The way we do it, is we do the copy, link the original, then close the original as obsolete. You could also create the copy and TF Destroy the original, but you will lose all history.
If you wanted to, you could get very fancy and create your own "move" utility that copies the workitem and all of the history, then closes out (or destroys) the old one. Seems like overkill for something that you probably shouldn't need to do all that often.
Lars Wilhelmsen wrote a WorkItemMigrator -> http://larsw.codeplex.com/SourceControl/list/changesets
Good starting point for a utility you can customize for your needs. We used it to split off a 100 or so work items to a new project. Here's the program I ended up with. Modify the query to subset the items to migrate.
namespace WorkItemMigrator
{
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Text;
using Microsoft.TeamFoundation.Client;
using Microsoft.TeamFoundation.Framework.Common;
using Microsoft.TeamFoundation.Server;
using Microsoft.TeamFoundation.WorkItemTracking.Client;
class Program
{
#region Members
private static readonly Dictionary<Uri, TfsTeamProjectCollection> Collections = new Dictionary<Uri, TfsTeamProjectCollection>();
private static readonly Uri SourceCollectionUri = new Uri("http://your.domain.com:8080/tfs/DefaultCollection");
private static readonly Uri TargetCollectionUri = new Uri("http://your.domain.com:8080/tfs/DefaultCollection");
private const String Areas = "ProjectModelHierarchy";
private const string Iterations = "ProjectLifecycle";
private const string TargetProjectName = "TargetProject";
private const string MicrosoftVstsCommonStackRankFieldName = "Microsoft.VSTS.Common.StackRank";
private const string MicrosoftVstsCommonPriority = "Microsoft.VSTS.Common.Priority";
private const string TargetWorkItemType = "User Story";
private const string Wiql = "SELECT [System.Id], [System.State], [System.Title], [System.AssignedTo], [System.WorkItemType], [Microsoft.VSTS.Common.Priority], " +
"[System.IterationPath], [System.AreaPath], [System.History], [System.Description] " +
"FROM WorkItems WHERE [System.TeamProject] = 'SourceProject' AND " +
"[System.State] = 'Active' " +
"ORDER BY [System.Id]";
private static WorkItemTypeCollection WorkItemTypes;
private static Dictionary<int, int> WorkItemIdMap = new Dictionary<int, int>();
#endregion
static void Main()
{
var createAreasAndIterations = GetRunMode();
var sourceWorkItemStore = GetSourceWorkItemStore();
var sourceWorkItems = sourceWorkItemStore.Query(Wiql);
var targetWorkItemStore = GetTargetWorkItemStore();
var targetProject = targetWorkItemStore.Projects[TargetProjectName];
WorkItemTypes = targetProject.WorkItemTypes;
foreach (WorkItem sourceWorkItem in sourceWorkItems)
{
if (createAreasAndIterations)
{
Console.WriteLine();
EnsureThatStructureExists(TargetProjectName, Areas, sourceWorkItem.AreaPath.Substring(sourceWorkItem.AreaPath.IndexOf("\\") + 1));
EnsureThatStructureExists(TargetProjectName, Iterations, sourceWorkItem.IterationPath.Substring(sourceWorkItem.IterationPath.IndexOf("\\") + 1));
}
else
{
MigrateWorkItem(sourceWorkItem);
}
}
if (!createAreasAndIterations)
{
var query = from WorkItem wi in sourceWorkItems where wi.Links.Count > 0 select wi;
foreach (WorkItem sourceWorkItem in query)
{
LinkRelatedItems(targetWorkItemStore, sourceWorkItem);
}
}
TextWriter tw = File.CreateText(#"C:\temp\TFS_MigratedItems.csv");
tw.WriteLine("SourceId,TargetId");
foreach (var entry in WorkItemIdMap)
{
tw.WriteLine(entry.Key + "," + entry.Value);
}
tw.Close();
Console.WriteLine();
Console.WriteLine("Done! Have a nice day.");
Console.ReadLine();
}
private static bool GetRunMode()
{
bool createAreasAndIterations;
while (true)
{
Console.Write("Create [A]reas/Iterations or [M]igrate (Ctrl-C to quit)?: ");
var command = Console.ReadLine().ToUpper().Trim();
if (command == "A")
{
createAreasAndIterations = true;
break;
}
if (command == "M")
{
createAreasAndIterations = false;
break;
}
Console.WriteLine("Unknown command " + command + " - try again.");
}
return createAreasAndIterations;
}
private static void MigrateWorkItem(WorkItem sourceWorkItem)
{
var targetWIT = WorkItemTypes[sourceWorkItem.Type.Name];
var newWorkItem = targetWIT.NewWorkItem();
//var newWorkItem = targetWorkItemType.NewWorkItem();
// Description (Task) / Steps to reproduce (Bug)
if (sourceWorkItem.Type.Name != "Bug")
{
newWorkItem.Description = sourceWorkItem.Description;
}
else
{
newWorkItem.Fields["Microsoft.VSTS.TCM.ReproSteps"].Value = sourceWorkItem.Description;
}
// History
newWorkItem.History = sourceWorkItem.History;
// Title
newWorkItem.Title = sourceWorkItem.Title;
// Assigned To
newWorkItem.Fields[CoreField.AssignedTo].Value = sourceWorkItem.Fields[CoreField.AssignedTo].Value;
// Stack Rank - Priority
newWorkItem.Fields[MicrosoftVstsCommonPriority].Value = sourceWorkItem.Fields[MicrosoftVstsCommonPriority].Value;
// Area Path
newWorkItem.AreaPath = FormatPath(TargetProjectName, sourceWorkItem.AreaPath);
// Iteration Path
newWorkItem.IterationPath = FormatPath(TargetProjectName, sourceWorkItem.IterationPath);
// Activity
if (sourceWorkItem.Type.Name == "Task")
{
newWorkItem.Fields["Microsoft.VSTS.Common.Activity"].Value = sourceWorkItem.Fields["Microsoft.VSTS.Common.Discipline"].Value;
}
// State
//newWorkItem.State = sourceWorkItem.State;
// Reason
//newWorkItem.Reason = sourceWorkItem.Reason;
// build a usable rendition of prior revision history
RevisionCollection revisions = sourceWorkItem.Revisions;
var query = from Revision r in revisions orderby r.Fields["Changed Date"].Value descending select r;
StringBuilder sb = new StringBuilder(String.Format("Migrated from work item {0}<BR />\n", sourceWorkItem.Id));
foreach (Revision revision in query)
{
String history = (String)revision.Fields["History"].Value;
if (!String.IsNullOrEmpty(history))
{
foreach (Field f in revision.Fields)
{
if (!Object.Equals(f.Value, f.OriginalValue))
{
if (f.Name == "History")
{
string notation = string.Empty;
if (revision.Fields["State"].OriginalValue != revision.Fields["State"].Value)
{
notation = String.Format("({0} to {1})", revision.Fields["State"].OriginalValue, revision.Fields["State"].Value);
}
//Console.WriteLine("<STRONG>{0} Edited {3} by {1}</STRONG><BR />\n{2}", revision.Fields["Changed Date"].Value.ToString(), revision.Fields["Changed By"].Value.ToString(), f.Value, notation);
sb.Append(String.Format("<STRONG>{0} Edited {3} by {1}</STRONG><BR />\n{2}<BR />\n", revision.Fields["Changed Date"].Value.ToString(), revision.Fields["Changed By"].Value.ToString(), f.Value, notation));
}
}
}
//Console.WriteLine("Revision {0}: ", revision.Fields["Rev"].Value);
//Console.WriteLine(" ChangedDate: " + revision.Fields["ChangedDate"].Value);
//Console.WriteLine(" History: " + sb.ToString());
}
}
newWorkItem.History = sb.ToString();
// Attachments
for (var i = 0; i < sourceWorkItem.AttachedFileCount; i++)
{
CopyAttachment(sourceWorkItem.Attachments[i], newWorkItem);
}
// Validate before save
if (!newWorkItem.IsValid())
{
var reasons = newWorkItem.Validate();
Console.WriteLine(string.Format("Could not validate new work item (old id: {0}).", sourceWorkItem.Id));
foreach (Field reason in reasons)
{
Console.WriteLine("Field: " + reason.Name + ", Status: " + reason.Status + ", Value: " + reason.Value);
}
}
else
{
Console.Write("[" + sourceWorkItem.Id + "] " + newWorkItem.Title);
try
{
newWorkItem.Save(SaveFlags.None);
Console.ForegroundColor = ConsoleColor.Cyan;
Console.WriteLine(string.Format(" [saved: {0}]", newWorkItem.Id));
WorkItemIdMap.Add(sourceWorkItem.Id, newWorkItem.Id);
Console.ResetColor();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
throw;
}
}
}
private static void CopyAttachment(Attachment attachment, WorkItem newWorkItem)
{
using (var client = new WebClient())
{
client.UseDefaultCredentials = true;
client.DownloadFile(attachment.Uri, attachment.Name);
var newAttachment = new Attachment(attachment.Name, attachment.Comment);
newWorkItem.Attachments.Add(newAttachment);
}
}
private static void LinkRelatedItems(WorkItemStore targetWorkItemStore, WorkItem sourceWorkItem)
{
int newId = WorkItemIdMap[sourceWorkItem.Id];
WorkItem targetItem = targetWorkItemStore.GetWorkItem(newId);
foreach (Link l in sourceWorkItem.Links)
{
if (l is RelatedLink)
{
RelatedLink sl = l as RelatedLink;
switch (sl.ArtifactLinkType.Name)
{
case "Related Workitem":
{
if (WorkItemIdMap.ContainsKey(sl.RelatedWorkItemId))
{
int RelatedWorkItemId = WorkItemIdMap[sl.RelatedWorkItemId];
RelatedLink rl = new RelatedLink(sl.LinkTypeEnd, RelatedWorkItemId);
// !!!!
// this does not work - need to check the existing links to see if one exists already for the linked workitem.
// using contains expects the same object and that's not what I'm doing here!!!!!!
//if (!targetItem.Links.Contains(rl))
// !!!!
var query = from RelatedLink qrl in targetItem.Links where qrl.RelatedWorkItemId == RelatedWorkItemId select qrl;
if (query.Count() == 0)
{
targetItem.Links.Add(rl); ;
// Validate before save
if (!targetItem.IsValid())
{
var reasons = targetItem.Validate();
Console.WriteLine(string.Format("Could not validate work item (old id: {0}) related link id {1}.", sourceWorkItem.Id, sl.RelatedWorkItemId));
foreach (Field reason in reasons)
{
Console.WriteLine("Field: " + reason.Name + ", Status: " + reason.Status + ", Value: " + reason.Value);
}
}
else
{
try
{
targetItem.Save(SaveFlags.None);
Console.ForegroundColor = ConsoleColor.Cyan;
Console.WriteLine(string.Format(" [Updated: {0}]", targetItem.Id));
Console.ResetColor();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
throw;
}
}
}
}
break;
}
default:
{ break; }
}
}
}
}
private static void EnsureThatStructureExists(string projectName, string structureType, string structurePath)
{
var parts = structurePath.Split('\\');
var css = GetCommonStructureService();
var projectInfo = css.GetProjectFromName(projectName);
var parentNodeUri = GetCssStructure(GetCommonStructureService(), projectInfo.Uri, structureType).Uri;
var currentPath = FormatPath(projectName, structureType == Areas ? "Area" : "Iteration");
foreach (var part in parts)
{
currentPath = FormatPath(currentPath, part);
Console.Write(currentPath);
try
{
var currentNode = css.GetNodeFromPath(currentPath);
parentNodeUri = currentNode.Uri;
Console.ForegroundColor = ConsoleColor.DarkGreen;
Console.WriteLine(" [found]");
}
catch
{
parentNodeUri = css.CreateNode(part, parentNodeUri);
Console.ForegroundColor = ConsoleColor.Green;
Console.WriteLine(" [created]");
}
Console.ResetColor();
}
}
private static string FormatPath(string currentPath, string part)
{
part = part.Substring(part.IndexOf("\\") + 1);
currentPath = string.Format(#"{0}\{1}", currentPath, part);
return currentPath;
}
private static TfsTeamProjectCollection GetProjectCollection(Uri uri)
{
TfsTeamProjectCollection collection;
if (!Collections.TryGetValue(uri, out collection))
{
collection = TfsTeamProjectCollectionFactory.GetTeamProjectCollection(uri);
collection.Connect(ConnectOptions.IncludeServices);
collection.Authenticate();
Collections.Add(uri, collection);
}
return Collections[uri];
}
private static WorkItemStore GetSourceWorkItemStore()
{
var collection = GetProjectCollection(SourceCollectionUri);
return collection.GetService<WorkItemStore>();
}
private static WorkItemStore GetTargetWorkItemStore()
{
var collection = GetProjectCollection(TargetCollectionUri);
return collection.GetService<WorkItemStore>();
}
public static NodeInfo GetCssStructure(ICommonStructureService css, String projectUri, String structureType)
{
return css.ListStructures(projectUri).FirstOrDefault(node => node.StructureType == structureType);
}
private static ICommonStructureService GetCommonStructureService()
{
var collection = GetProjectCollection(TargetCollectionUri);
return collection.GetService<ICommonStructureService>();
}
}
}