##// END OF EJS Templates
patch: extract fs access from patchfile into fsbackend...
Patrick Mezard -
r14348:c1c71910 default
parent child Browse files
Show More
@@ -1,691 +1,691 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a DSCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56 The more specific you are in your filename patterns the less you
57 57 lose speed in huge repositories.
58 58
59 59 For [keywordmaps] template mapping and expansion demonstration and
60 60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 61 available templates and filters.
62 62
63 63 Three additional date template filters are provided:
64 64
65 65 :``utcdate``: "2006/09/18 15:13:13"
66 66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 68
69 69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 70 replaced with customized keywords and templates. Again, run
71 71 :hg:`kwdemo` to control the results of your configuration changes.
72 72
73 73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 74 to avoid storing expanded keywords in the change history.
75 75
76 76 To force expansion after enabling it, or a configuration change, run
77 77 :hg:`kwexpand`.
78 78
79 79 Expansions spanning more than one line and incremental expansions,
80 80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 81 {desc}" expands to the first line of the changeset description.
82 82 '''
83 83
84 84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 86 from mercurial import scmutil
87 87 from mercurial.hgweb import webcommands
88 88 from mercurial.i18n import _
89 89 import os, re, shutil, tempfile
90 90
91 91 commands.optionalrepo += ' kwdemo'
92 92
93 93 cmdtable = {}
94 94 command = cmdutil.command(cmdtable)
95 95
96 96 # hg commands that do not act on keywords
97 97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 98 ' outgoing push tip verify convert email glog')
99 99
100 100 # hg commands that trigger expansion only when writing to working dir,
101 101 # not when reading filelog, and unexpand when reading from working dir
102 102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
103 103
104 104 # names of extensions using dorecord
105 105 recordextensions = 'record'
106 106
107 107 colortable = {
108 108 'kwfiles.enabled': 'green bold',
109 109 'kwfiles.deleted': 'cyan bold underline',
110 110 'kwfiles.enabledunknown': 'green',
111 111 'kwfiles.ignored': 'bold',
112 112 'kwfiles.ignoredunknown': 'none'
113 113 }
114 114
115 115 # date like in cvs' $Date
116 116 def utcdate(text):
117 117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
118 118 '''
119 119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
120 120 # date like in svn's $Date
121 121 def svnisodate(text):
122 122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
123 123 +0200 (Tue, 18 Aug 2009)".
124 124 '''
125 125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
126 126 # date like in svn's $Id
127 127 def svnutcdate(text):
128 128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
129 129 11:00:13Z".
130 130 '''
131 131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
132 132
133 133 templatefilters.filters.update({'utcdate': utcdate,
134 134 'svnisodate': svnisodate,
135 135 'svnutcdate': svnutcdate})
136 136
137 137 # make keyword tools accessible
138 138 kwtools = {'templater': None, 'hgcmd': ''}
139 139
140 140 def _defaultkwmaps(ui):
141 141 '''Returns default keywordmaps according to keywordset configuration.'''
142 142 templates = {
143 143 'Revision': '{node|short}',
144 144 'Author': '{author|user}',
145 145 }
146 146 kwsets = ({
147 147 'Date': '{date|utcdate}',
148 148 'RCSfile': '{file|basename},v',
149 149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
150 150 # with hg-keyword
151 151 'Source': '{root}/{file},v',
152 152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
153 153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
154 154 }, {
155 155 'Date': '{date|svnisodate}',
156 156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
157 157 'LastChangedRevision': '{node|short}',
158 158 'LastChangedBy': '{author|user}',
159 159 'LastChangedDate': '{date|svnisodate}',
160 160 })
161 161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
162 162 return templates
163 163
164 164 def _shrinktext(text, subfunc):
165 165 '''Helper for keyword expansion removal in text.
166 166 Depending on subfunc also returns number of substitutions.'''
167 167 return subfunc(r'$\1$', text)
168 168
169 169 def _preselect(wstatus, changed):
170 170 '''Retrieves modfied and added files from a working directory state
171 171 and returns the subset of each contained in given changed files
172 172 retrieved from a change context.'''
173 173 modified, added = wstatus[:2]
174 174 modified = [f for f in modified if f in changed]
175 175 added = [f for f in added if f in changed]
176 176 return modified, added
177 177
178 178
179 179 class kwtemplater(object):
180 180 '''
181 181 Sets up keyword templates, corresponding keyword regex, and
182 182 provides keyword substitution functions.
183 183 '''
184 184
185 185 def __init__(self, ui, repo, inc, exc):
186 186 self.ui = ui
187 187 self.repo = repo
188 188 self.match = match.match(repo.root, '', [], inc, exc)
189 189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 190 self.record = False
191 191
192 192 kwmaps = self.ui.configitems('keywordmaps')
193 193 if kwmaps: # override default templates
194 194 self.templates = dict((k, templater.parsestring(v, False))
195 195 for k, v in kwmaps)
196 196 else:
197 197 self.templates = _defaultkwmaps(self.ui)
198 198
199 199 @util.propertycache
200 200 def escape(self):
201 201 '''Returns bar-separated and escaped keywords.'''
202 202 return '|'.join(map(re.escape, self.templates.keys()))
203 203
204 204 @util.propertycache
205 205 def rekw(self):
206 206 '''Returns regex for unexpanded keywords.'''
207 207 return re.compile(r'\$(%s)\$' % self.escape)
208 208
209 209 @util.propertycache
210 210 def rekwexp(self):
211 211 '''Returns regex for expanded keywords.'''
212 212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213 213
214 214 def substitute(self, data, path, ctx, subfunc):
215 215 '''Replaces keywords in data with expanded template.'''
216 216 def kwsub(mobj):
217 217 kw = mobj.group(1)
218 218 ct = cmdutil.changeset_templater(self.ui, self.repo,
219 219 False, None, '', False)
220 220 ct.use_template(self.templates[kw])
221 221 self.ui.pushbuffer()
222 222 ct.show(ctx, root=self.repo.root, file=path)
223 223 ekw = templatefilters.firstline(self.ui.popbuffer())
224 224 return '$%s: %s $' % (kw, ekw)
225 225 return subfunc(kwsub, data)
226 226
227 227 def linkctx(self, path, fileid):
228 228 '''Similar to filelog.linkrev, but returns a changectx.'''
229 229 return self.repo.filectx(path, fileid=fileid).changectx()
230 230
231 231 def expand(self, path, node, data):
232 232 '''Returns data with keywords expanded.'''
233 233 if not self.restrict and self.match(path) and not util.binary(data):
234 234 ctx = self.linkctx(path, node)
235 235 return self.substitute(data, path, ctx, self.rekw.sub)
236 236 return data
237 237
238 238 def iskwfile(self, cand, ctx):
239 239 '''Returns subset of candidates which are configured for keyword
240 240 expansion are not symbolic links.'''
241 241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
242 242
243 243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
244 244 '''Overwrites selected files expanding/shrinking keywords.'''
245 245 if self.restrict or lookup or self.record: # exclude kw_copy
246 246 candidates = self.iskwfile(candidates, ctx)
247 247 if not candidates:
248 248 return
249 249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
250 250 if self.restrict or expand and lookup:
251 251 mf = ctx.manifest()
252 252 lctx = ctx
253 253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
254 254 msg = (expand and _('overwriting %s expanding keywords\n')
255 255 or _('overwriting %s shrinking keywords\n'))
256 256 for f in candidates:
257 257 if self.restrict:
258 258 data = self.repo.file(f).read(mf[f])
259 259 else:
260 260 data = self.repo.wread(f)
261 261 if util.binary(data):
262 262 continue
263 263 if expand:
264 264 if lookup:
265 265 lctx = self.linkctx(f, mf[f])
266 266 data, found = self.substitute(data, f, lctx, re_kw.subn)
267 267 elif self.restrict:
268 268 found = re_kw.search(data)
269 269 else:
270 270 data, found = _shrinktext(data, re_kw.subn)
271 271 if found:
272 272 self.ui.note(msg % f)
273 273 self.repo.wwrite(f, data, ctx.flags(f))
274 274 if kwcmd:
275 275 self.repo.dirstate.normal(f)
276 276 elif self.record:
277 277 self.repo.dirstate.normallookup(f)
278 278
279 279 def shrink(self, fname, text):
280 280 '''Returns text with all keyword substitutions removed.'''
281 281 if self.match(fname) and not util.binary(text):
282 282 return _shrinktext(text, self.rekwexp.sub)
283 283 return text
284 284
285 285 def shrinklines(self, fname, lines):
286 286 '''Returns lines with keyword substitutions removed.'''
287 287 if self.match(fname):
288 288 text = ''.join(lines)
289 289 if not util.binary(text):
290 290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
291 291 return lines
292 292
293 293 def wread(self, fname, data):
294 294 '''If in restricted mode returns data read from wdir with
295 295 keyword substitutions removed.'''
296 296 return self.restrict and self.shrink(fname, data) or data
297 297
298 298 class kwfilelog(filelog.filelog):
299 299 '''
300 300 Subclass of filelog to hook into its read, add, cmp methods.
301 301 Keywords are "stored" unexpanded, and processed on reading.
302 302 '''
303 303 def __init__(self, opener, kwt, path):
304 304 super(kwfilelog, self).__init__(opener, path)
305 305 self.kwt = kwt
306 306 self.path = path
307 307
308 308 def read(self, node):
309 309 '''Expands keywords when reading filelog.'''
310 310 data = super(kwfilelog, self).read(node)
311 311 if self.renamed(node):
312 312 return data
313 313 return self.kwt.expand(self.path, node, data)
314 314
315 315 def add(self, text, meta, tr, link, p1=None, p2=None):
316 316 '''Removes keyword substitutions when adding to filelog.'''
317 317 text = self.kwt.shrink(self.path, text)
318 318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
319 319
320 320 def cmp(self, node, text):
321 321 '''Removes keyword substitutions for comparison.'''
322 322 text = self.kwt.shrink(self.path, text)
323 323 return super(kwfilelog, self).cmp(node, text)
324 324
325 325 def _status(ui, repo, kwt, *pats, **opts):
326 326 '''Bails out if [keyword] configuration is not active.
327 327 Returns status of working directory.'''
328 328 if kwt:
329 329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
330 330 unknown=opts.get('unknown') or opts.get('all'))
331 331 if ui.configitems('keyword'):
332 332 raise util.Abort(_('[keyword] patterns cannot match'))
333 333 raise util.Abort(_('no [keyword] patterns configured'))
334 334
335 335 def _kwfwrite(ui, repo, expand, *pats, **opts):
336 336 '''Selects files and passes them to kwtemplater.overwrite.'''
337 337 wctx = repo[None]
338 338 if len(wctx.parents()) > 1:
339 339 raise util.Abort(_('outstanding uncommitted merge'))
340 340 kwt = kwtools['templater']
341 341 wlock = repo.wlock()
342 342 try:
343 343 status = _status(ui, repo, kwt, *pats, **opts)
344 344 modified, added, removed, deleted, unknown, ignored, clean = status
345 345 if modified or added or removed or deleted:
346 346 raise util.Abort(_('outstanding uncommitted changes'))
347 347 kwt.overwrite(wctx, clean, True, expand)
348 348 finally:
349 349 wlock.release()
350 350
351 351 @command('kwdemo',
352 352 [('d', 'default', None, _('show default keyword template maps')),
353 353 ('f', 'rcfile', '',
354 354 _('read maps from rcfile'), _('FILE'))],
355 355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
356 356 def demo(ui, repo, *args, **opts):
357 357 '''print [keywordmaps] configuration and an expansion example
358 358
359 359 Show current, custom, or default keyword template maps and their
360 360 expansions.
361 361
362 362 Extend the current configuration by specifying maps as arguments
363 363 and using -f/--rcfile to source an external hgrc file.
364 364
365 365 Use -d/--default to disable current configuration.
366 366
367 367 See :hg:`help templates` for information on templates and filters.
368 368 '''
369 369 def demoitems(section, items):
370 370 ui.write('[%s]\n' % section)
371 371 for k, v in sorted(items):
372 372 ui.write('%s = %s\n' % (k, v))
373 373
374 374 fn = 'demo.txt'
375 375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
376 376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
377 377 repo = localrepo.localrepository(ui, tmpdir, True)
378 378 ui.setconfig('keyword', fn, '')
379 379 svn = ui.configbool('keywordset', 'svn')
380 380 # explicitly set keywordset for demo output
381 381 ui.setconfig('keywordset', 'svn', svn)
382 382
383 383 uikwmaps = ui.configitems('keywordmaps')
384 384 if args or opts.get('rcfile'):
385 385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
386 386 if uikwmaps:
387 387 ui.status(_('\textending current template maps\n'))
388 388 if opts.get('default') or not uikwmaps:
389 389 if svn:
390 390 ui.status(_('\toverriding default svn keywordset\n'))
391 391 else:
392 392 ui.status(_('\toverriding default cvs keywordset\n'))
393 393 if opts.get('rcfile'):
394 394 ui.readconfig(opts.get('rcfile'))
395 395 if args:
396 396 # simulate hgrc parsing
397 397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
398 398 fp = repo.opener('hgrc', 'w')
399 399 fp.writelines(rcmaps)
400 400 fp.close()
401 401 ui.readconfig(repo.join('hgrc'))
402 402 kwmaps = dict(ui.configitems('keywordmaps'))
403 403 elif opts.get('default'):
404 404 if svn:
405 405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
406 406 else:
407 407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
408 408 kwmaps = _defaultkwmaps(ui)
409 409 if uikwmaps:
410 410 ui.status(_('\tdisabling current template maps\n'))
411 411 for k, v in kwmaps.iteritems():
412 412 ui.setconfig('keywordmaps', k, v)
413 413 else:
414 414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
415 415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
416 416
417 417 uisetup(ui)
418 418 reposetup(ui, repo)
419 419 ui.write('[extensions]\nkeyword =\n')
420 420 demoitems('keyword', ui.configitems('keyword'))
421 421 demoitems('keywordset', ui.configitems('keywordset'))
422 422 demoitems('keywordmaps', kwmaps.iteritems())
423 423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
424 424 repo.wopener.write(fn, keywords)
425 425 repo[None].add([fn])
426 426 ui.note(_('\nkeywords written to %s:\n') % fn)
427 427 ui.note(keywords)
428 428 repo.dirstate.setbranch('demobranch')
429 429 for name, cmd in ui.configitems('hooks'):
430 430 if name.split('.', 1)[0].find('commit') > -1:
431 431 repo.ui.setconfig('hooks', name, '')
432 432 msg = _('hg keyword configuration and expansion example')
433 433 ui.note("hg ci -m '%s'\n" % msg)
434 434 repo.commit(text=msg)
435 435 ui.status(_('\n\tkeywords expanded\n'))
436 436 ui.write(repo.wread(fn))
437 437 shutil.rmtree(tmpdir, ignore_errors=True)
438 438
439 439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
440 440 def expand(ui, repo, *pats, **opts):
441 441 '''expand keywords in the working directory
442 442
443 443 Run after (re)enabling keyword expansion.
444 444
445 445 kwexpand refuses to run if given files contain local changes.
446 446 '''
447 447 # 3rd argument sets expansion to True
448 448 _kwfwrite(ui, repo, True, *pats, **opts)
449 449
450 450 @command('kwfiles',
451 451 [('A', 'all', None, _('show keyword status flags of all files')),
452 452 ('i', 'ignore', None, _('show files excluded from expansion')),
453 453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
454 454 ] + commands.walkopts,
455 455 _('hg kwfiles [OPTION]... [FILE]...'))
456 456 def files(ui, repo, *pats, **opts):
457 457 '''show files configured for keyword expansion
458 458
459 459 List which files in the working directory are matched by the
460 460 [keyword] configuration patterns.
461 461
462 462 Useful to prevent inadvertent keyword expansion and to speed up
463 463 execution by including only files that are actual candidates for
464 464 expansion.
465 465
466 466 See :hg:`help keyword` on how to construct patterns both for
467 467 inclusion and exclusion of files.
468 468
469 469 With -A/--all and -v/--verbose the codes used to show the status
470 470 of files are::
471 471
472 472 K = keyword expansion candidate
473 473 k = keyword expansion candidate (not tracked)
474 474 I = ignored
475 475 i = ignored (not tracked)
476 476 '''
477 477 kwt = kwtools['templater']
478 478 status = _status(ui, repo, kwt, *pats, **opts)
479 479 cwd = pats and repo.getcwd() or ''
480 480 modified, added, removed, deleted, unknown, ignored, clean = status
481 481 files = []
482 482 if not opts.get('unknown') or opts.get('all'):
483 483 files = sorted(modified + added + clean)
484 484 wctx = repo[None]
485 485 kwfiles = kwt.iskwfile(files, wctx)
486 486 kwdeleted = kwt.iskwfile(deleted, wctx)
487 487 kwunknown = kwt.iskwfile(unknown, wctx)
488 488 if not opts.get('ignore') or opts.get('all'):
489 489 showfiles = kwfiles, kwdeleted, kwunknown
490 490 else:
491 491 showfiles = [], [], []
492 492 if opts.get('all') or opts.get('ignore'):
493 493 showfiles += ([f for f in files if f not in kwfiles],
494 494 [f for f in unknown if f not in kwunknown])
495 495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
496 496 kwstates = zip('K!kIi', showfiles, kwlabels)
497 497 for char, filenames, kwstate in kwstates:
498 498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
499 499 for f in filenames:
500 500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
501 501
502 502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
503 503 def shrink(ui, repo, *pats, **opts):
504 504 '''revert expanded keywords in the working directory
505 505
506 506 Must be run before changing/disabling active keywords.
507 507
508 508 kwshrink refuses to run if given files contain local changes.
509 509 '''
510 510 # 3rd argument sets expansion to False
511 511 _kwfwrite(ui, repo, False, *pats, **opts)
512 512
513 513
514 514 def uisetup(ui):
515 515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
516 516
517 517 def kwdispatch_parse(orig, ui, args):
518 518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
519 519 cmd, func, args, options, cmdoptions = orig(ui, args)
520 520 kwtools['hgcmd'] = cmd
521 521 return cmd, func, args, options, cmdoptions
522 522
523 523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
524 524
525 525 def reposetup(ui, repo):
526 526 '''Sets up repo as kwrepo for keyword substitution.
527 527 Overrides file method to return kwfilelog instead of filelog
528 528 if file matches user configuration.
529 529 Wraps commit to overwrite configured files with updated
530 530 keyword substitutions.
531 531 Monkeypatches patch and webcommands.'''
532 532
533 533 try:
534 534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
535 535 or '.hg' in util.splitpath(repo.root)
536 536 or repo._url.startswith('bundle:')):
537 537 return
538 538 except AttributeError:
539 539 pass
540 540
541 541 inc, exc = [], ['.hg*']
542 542 for pat, opt in ui.configitems('keyword'):
543 543 if opt != 'ignore':
544 544 inc.append(pat)
545 545 else:
546 546 exc.append(pat)
547 547 if not inc:
548 548 return
549 549
550 550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
551 551
552 552 class kwrepo(repo.__class__):
553 553 def file(self, f):
554 554 if f[0] == '/':
555 555 f = f[1:]
556 556 return kwfilelog(self.sopener, kwt, f)
557 557
558 558 def wread(self, filename):
559 559 data = super(kwrepo, self).wread(filename)
560 560 return kwt.wread(filename, data)
561 561
562 562 def commit(self, *args, **opts):
563 563 # use custom commitctx for user commands
564 564 # other extensions can still wrap repo.commitctx directly
565 565 self.commitctx = self.kwcommitctx
566 566 try:
567 567 return super(kwrepo, self).commit(*args, **opts)
568 568 finally:
569 569 del self.commitctx
570 570
571 571 def kwcommitctx(self, ctx, error=False):
572 572 n = super(kwrepo, self).commitctx(ctx, error)
573 573 # no lock needed, only called from repo.commit() which already locks
574 574 if not kwt.record:
575 575 restrict = kwt.restrict
576 576 kwt.restrict = True
577 577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
578 578 False, True)
579 579 kwt.restrict = restrict
580 580 return n
581 581
582 582 def rollback(self, dryrun=False):
583 583 wlock = self.wlock()
584 584 try:
585 585 if not dryrun:
586 586 changed = self['.'].files()
587 587 ret = super(kwrepo, self).rollback(dryrun)
588 588 if not dryrun:
589 589 ctx = self['.']
590 590 modified, added = _preselect(self[None].status(), changed)
591 591 kwt.overwrite(ctx, modified, True, True)
592 592 kwt.overwrite(ctx, added, True, False)
593 593 return ret
594 594 finally:
595 595 wlock.release()
596 596
597 597 # monkeypatches
598 def kwpatchfile_init(orig, self, ui, fname, opener,
598 def kwpatchfile_init(orig, self, ui, fname, backend,
599 599 missing=False, eolmode=None):
600 600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 601 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, fname, opener, missing, eolmode)
602 orig(self, ui, fname, backend, missing, eolmode)
603 603 # shrink keywords read from working dir
604 604 self.lines = kwt.shrinklines(self.fname, self.lines)
605 605
606 606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
607 607 opts=None, prefix=''):
608 608 '''Monkeypatch patch.diff to avoid expansion.'''
609 609 kwt.restrict = True
610 610 return orig(repo, node1, node2, match, changes, opts, prefix)
611 611
612 612 def kwweb_skip(orig, web, req, tmpl):
613 613 '''Wraps webcommands.x turning off keyword expansion.'''
614 614 kwt.match = util.never
615 615 return orig(web, req, tmpl)
616 616
617 617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
618 618 '''Wraps cmdutil.copy so that copy/rename destinations do not
619 619 contain expanded keywords.
620 620 Note that the source of a regular file destination may also be a
621 621 symlink:
622 622 hg cp sym x -> x is symlink
623 623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
624 624 For the latter we have to follow the symlink to find out whether its
625 625 target is configured for expansion and we therefore must unexpand the
626 626 keywords in the destination.'''
627 627 orig(ui, repo, pats, opts, rename)
628 628 if opts.get('dry_run'):
629 629 return
630 630 wctx = repo[None]
631 631 cwd = repo.getcwd()
632 632
633 633 def haskwsource(dest):
634 634 '''Returns true if dest is a regular file and configured for
635 635 expansion or a symlink which points to a file configured for
636 636 expansion. '''
637 637 source = repo.dirstate.copied(dest)
638 638 if 'l' in wctx.flags(source):
639 639 source = scmutil.canonpath(repo.root, cwd,
640 640 os.path.realpath(source))
641 641 return kwt.match(source)
642 642
643 643 candidates = [f for f in repo.dirstate.copies() if
644 644 not 'l' in wctx.flags(f) and haskwsource(f)]
645 645 kwt.overwrite(wctx, candidates, False, False)
646 646
647 647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
648 648 '''Wraps record.dorecord expanding keywords after recording.'''
649 649 wlock = repo.wlock()
650 650 try:
651 651 # record returns 0 even when nothing has changed
652 652 # therefore compare nodes before and after
653 653 kwt.record = True
654 654 ctx = repo['.']
655 655 wstatus = repo[None].status()
656 656 ret = orig(ui, repo, commitfunc, *pats, **opts)
657 657 recctx = repo['.']
658 658 if ctx != recctx:
659 659 modified, added = _preselect(wstatus, recctx.files())
660 660 kwt.restrict = False
661 661 kwt.overwrite(recctx, modified, False, True)
662 662 kwt.overwrite(recctx, added, False, True, True)
663 663 kwt.restrict = True
664 664 return ret
665 665 finally:
666 666 wlock.release()
667 667
668 668 def kwfilectx_cmp(orig, self, fctx):
669 669 # keyword affects data size, comparing wdir and filelog size does
670 670 # not make sense
671 671 if (fctx._filerev is None and
672 672 (self._repo._encodefilterpats or
673 673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
674 674 self.size() == fctx.size()):
675 675 return self._filelog.cmp(self._filenode, fctx.data())
676 676 return True
677 677
678 678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
679 679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
680 680 extensions.wrapfunction(patch, 'diff', kw_diff)
681 681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
682 682 for c in 'annotate changeset rev filediff diff'.split():
683 683 extensions.wrapfunction(webcommands, c, kwweb_skip)
684 684 for name in recordextensions.split():
685 685 try:
686 686 record = extensions.find(name)
687 687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
688 688 except KeyError:
689 689 pass
690 690
691 691 repo.__class__ = kwrepo
@@ -1,1697 +1,1743 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re
10 10 import tempfile, zlib
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15 15
16 16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 17
18 18 class PatchError(Exception):
19 19 pass
20 20
21 21 # helper functions
22 22
23 23 def copyfile(src, dst, basedir):
24 24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
25 25 for x in [src, dst]]
26 26 if os.path.lexists(absdst):
27 27 raise util.Abort(_("cannot create %s: destination already exists") %
28 28 dst)
29 29
30 30 dstdir = os.path.dirname(absdst)
31 31 if dstdir and not os.path.isdir(dstdir):
32 32 try:
33 33 os.makedirs(dstdir)
34 34 except IOError:
35 35 raise util.Abort(
36 36 _("cannot create %s: unable to create destination directory")
37 37 % dst)
38 38
39 39 util.copyfile(abssrc, absdst)
40 40
41 41 # public functions
42 42
43 43 def split(stream):
44 44 '''return an iterator of individual patches from a stream'''
45 45 def isheader(line, inheader):
46 46 if inheader and line[0] in (' ', '\t'):
47 47 # continuation
48 48 return True
49 49 if line[0] in (' ', '-', '+'):
50 50 # diff line - don't check for header pattern in there
51 51 return False
52 52 l = line.split(': ', 1)
53 53 return len(l) == 2 and ' ' not in l[0]
54 54
55 55 def chunk(lines):
56 56 return cStringIO.StringIO(''.join(lines))
57 57
58 58 def hgsplit(stream, cur):
59 59 inheader = True
60 60
61 61 for line in stream:
62 62 if not line.strip():
63 63 inheader = False
64 64 if not inheader and line.startswith('# HG changeset patch'):
65 65 yield chunk(cur)
66 66 cur = []
67 67 inheader = True
68 68
69 69 cur.append(line)
70 70
71 71 if cur:
72 72 yield chunk(cur)
73 73
74 74 def mboxsplit(stream, cur):
75 75 for line in stream:
76 76 if line.startswith('From '):
77 77 for c in split(chunk(cur[1:])):
78 78 yield c
79 79 cur = []
80 80
81 81 cur.append(line)
82 82
83 83 if cur:
84 84 for c in split(chunk(cur[1:])):
85 85 yield c
86 86
87 87 def mimesplit(stream, cur):
88 88 def msgfp(m):
89 89 fp = cStringIO.StringIO()
90 90 g = email.Generator.Generator(fp, mangle_from_=False)
91 91 g.flatten(m)
92 92 fp.seek(0)
93 93 return fp
94 94
95 95 for line in stream:
96 96 cur.append(line)
97 97 c = chunk(cur)
98 98
99 99 m = email.Parser.Parser().parse(c)
100 100 if not m.is_multipart():
101 101 yield msgfp(m)
102 102 else:
103 103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
104 104 for part in m.walk():
105 105 ct = part.get_content_type()
106 106 if ct not in ok_types:
107 107 continue
108 108 yield msgfp(part)
109 109
110 110 def headersplit(stream, cur):
111 111 inheader = False
112 112
113 113 for line in stream:
114 114 if not inheader and isheader(line, inheader):
115 115 yield chunk(cur)
116 116 cur = []
117 117 inheader = True
118 118 if inheader and not isheader(line, inheader):
119 119 inheader = False
120 120
121 121 cur.append(line)
122 122
123 123 if cur:
124 124 yield chunk(cur)
125 125
126 126 def remainder(cur):
127 127 yield chunk(cur)
128 128
129 129 class fiter(object):
130 130 def __init__(self, fp):
131 131 self.fp = fp
132 132
133 133 def __iter__(self):
134 134 return self
135 135
136 136 def next(self):
137 137 l = self.fp.readline()
138 138 if not l:
139 139 raise StopIteration
140 140 return l
141 141
142 142 inheader = False
143 143 cur = []
144 144
145 145 mimeheaders = ['content-type']
146 146
147 147 if not hasattr(stream, 'next'):
148 148 # http responses, for example, have readline but not next
149 149 stream = fiter(stream)
150 150
151 151 for line in stream:
152 152 cur.append(line)
153 153 if line.startswith('# HG changeset patch'):
154 154 return hgsplit(stream, cur)
155 155 elif line.startswith('From '):
156 156 return mboxsplit(stream, cur)
157 157 elif isheader(line, inheader):
158 158 inheader = True
159 159 if line.split(':', 1)[0].lower() in mimeheaders:
160 160 # let email parser handle this
161 161 return mimesplit(stream, cur)
162 162 elif line.startswith('--- ') and inheader:
163 163 # No evil headers seen by diff start, split by hand
164 164 return headersplit(stream, cur)
165 165 # Not enough info, keep reading
166 166
167 167 # if we are here, we have a very plain patch
168 168 return remainder(cur)
169 169
170 170 def extract(ui, fileobj):
171 171 '''extract patch from data read from fileobj.
172 172
173 173 patch can be a normal patch or contained in an email message.
174 174
175 175 return tuple (filename, message, user, date, branch, node, p1, p2).
176 176 Any item in the returned tuple can be None. If filename is None,
177 177 fileobj did not contain a patch. Caller must unlink filename when done.'''
178 178
179 179 # attempt to detect the start of a patch
180 180 # (this heuristic is borrowed from quilt)
181 181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
182 182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
183 183 r'---[ \t].*?^\+\+\+[ \t]|'
184 184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
185 185
186 186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 187 tmpfp = os.fdopen(fd, 'w')
188 188 try:
189 189 msg = email.Parser.Parser().parse(fileobj)
190 190
191 191 subject = msg['Subject']
192 192 user = msg['From']
193 193 if not subject and not user:
194 194 # Not an email, restore parsed headers if any
195 195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196 196
197 197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
198 198 # should try to parse msg['Date']
199 199 date = None
200 200 nodeid = None
201 201 branch = None
202 202 parents = []
203 203
204 204 if subject:
205 205 if subject.startswith('[PATCH'):
206 206 pend = subject.find(']')
207 207 if pend >= 0:
208 208 subject = subject[pend + 1:].lstrip()
209 209 subject = subject.replace('\n\t', ' ')
210 210 ui.debug('Subject: %s\n' % subject)
211 211 if user:
212 212 ui.debug('From: %s\n' % user)
213 213 diffs_seen = 0
214 214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
215 215 message = ''
216 216 for part in msg.walk():
217 217 content_type = part.get_content_type()
218 218 ui.debug('Content-Type: %s\n' % content_type)
219 219 if content_type not in ok_types:
220 220 continue
221 221 payload = part.get_payload(decode=True)
222 222 m = diffre.search(payload)
223 223 if m:
224 224 hgpatch = False
225 225 hgpatchheader = False
226 226 ignoretext = False
227 227
228 228 ui.debug('found patch at byte %d\n' % m.start(0))
229 229 diffs_seen += 1
230 230 cfp = cStringIO.StringIO()
231 231 for line in payload[:m.start(0)].splitlines():
232 232 if line.startswith('# HG changeset patch') and not hgpatch:
233 233 ui.debug('patch generated by hg export\n')
234 234 hgpatch = True
235 235 hgpatchheader = True
236 236 # drop earlier commit message content
237 237 cfp.seek(0)
238 238 cfp.truncate()
239 239 subject = None
240 240 elif hgpatchheader:
241 241 if line.startswith('# User '):
242 242 user = line[7:]
243 243 ui.debug('From: %s\n' % user)
244 244 elif line.startswith("# Date "):
245 245 date = line[7:]
246 246 elif line.startswith("# Branch "):
247 247 branch = line[9:]
248 248 elif line.startswith("# Node ID "):
249 249 nodeid = line[10:]
250 250 elif line.startswith("# Parent "):
251 251 parents.append(line[10:])
252 252 elif not line.startswith("# "):
253 253 hgpatchheader = False
254 254 elif line == '---' and gitsendmail:
255 255 ignoretext = True
256 256 if not hgpatchheader and not ignoretext:
257 257 cfp.write(line)
258 258 cfp.write('\n')
259 259 message = cfp.getvalue()
260 260 if tmpfp:
261 261 tmpfp.write(payload)
262 262 if not payload.endswith('\n'):
263 263 tmpfp.write('\n')
264 264 elif not diffs_seen and message and content_type == 'text/plain':
265 265 message += '\n' + payload
266 266 except:
267 267 tmpfp.close()
268 268 os.unlink(tmpname)
269 269 raise
270 270
271 271 if subject and not message.startswith(subject):
272 272 message = '%s\n%s' % (subject, message)
273 273 tmpfp.close()
274 274 if not diffs_seen:
275 275 os.unlink(tmpname)
276 276 return None, message, user, date, branch, None, None, None
277 277 p1 = parents and parents.pop(0) or None
278 278 p2 = parents and parents.pop(0) or None
279 279 return tmpname, message, user, date, branch, nodeid, p1, p2
280 280
281 281 class patchmeta(object):
282 282 """Patched file metadata
283 283
284 284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 285 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 286 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 287 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 288 'islink' is True if the file is a symlink and 'isexec' is True if
289 289 the file is executable. Otherwise, 'mode' is None.
290 290 """
291 291 def __init__(self, path):
292 292 self.path = path
293 293 self.oldpath = None
294 294 self.mode = None
295 295 self.op = 'MODIFY'
296 296 self.binary = False
297 297
298 298 def setmode(self, mode):
299 299 islink = mode & 020000
300 300 isexec = mode & 0100
301 301 self.mode = (islink, isexec)
302 302
303 303 def __repr__(self):
304 304 return "<patchmeta %s %r>" % (self.op, self.path)
305 305
306 306 def readgitpatch(lr):
307 307 """extract git-style metadata about patches from <patchname>"""
308 308
309 309 # Filter patch for git information
310 310 gp = None
311 311 gitpatches = []
312 312 for line in lr:
313 313 line = line.rstrip(' \r\n')
314 314 if line.startswith('diff --git'):
315 315 m = gitre.match(line)
316 316 if m:
317 317 if gp:
318 318 gitpatches.append(gp)
319 319 dst = m.group(2)
320 320 gp = patchmeta(dst)
321 321 elif gp:
322 322 if line.startswith('--- '):
323 323 gitpatches.append(gp)
324 324 gp = None
325 325 continue
326 326 if line.startswith('rename from '):
327 327 gp.op = 'RENAME'
328 328 gp.oldpath = line[12:]
329 329 elif line.startswith('rename to '):
330 330 gp.path = line[10:]
331 331 elif line.startswith('copy from '):
332 332 gp.op = 'COPY'
333 333 gp.oldpath = line[10:]
334 334 elif line.startswith('copy to '):
335 335 gp.path = line[8:]
336 336 elif line.startswith('deleted file'):
337 337 gp.op = 'DELETE'
338 338 elif line.startswith('new file mode '):
339 339 gp.op = 'ADD'
340 340 gp.setmode(int(line[-6:], 8))
341 341 elif line.startswith('new mode '):
342 342 gp.setmode(int(line[-6:], 8))
343 343 elif line.startswith('GIT binary patch'):
344 344 gp.binary = True
345 345 if gp:
346 346 gitpatches.append(gp)
347 347
348 348 return gitpatches
349 349
350 350 class linereader(object):
351 351 # simple class to allow pushing lines back into the input stream
352 352 def __init__(self, fp, textmode=False):
353 353 self.fp = fp
354 354 self.buf = []
355 355 self.textmode = textmode
356 356 self.eol = None
357 357
358 358 def push(self, line):
359 359 if line is not None:
360 360 self.buf.append(line)
361 361
362 362 def readline(self):
363 363 if self.buf:
364 364 l = self.buf[0]
365 365 del self.buf[0]
366 366 return l
367 367 l = self.fp.readline()
368 368 if not self.eol:
369 369 if l.endswith('\r\n'):
370 370 self.eol = '\r\n'
371 371 elif l.endswith('\n'):
372 372 self.eol = '\n'
373 373 if self.textmode and l.endswith('\r\n'):
374 374 l = l[:-2] + '\n'
375 375 return l
376 376
377 377 def __iter__(self):
378 378 while 1:
379 379 l = self.readline()
380 380 if not l:
381 381 break
382 382 yield l
383 383
384 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
385 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
386 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
387 eolmodes = ['strict', 'crlf', 'lf', 'auto']
388
389 class patchfile(object):
390 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
391 self.fname = fname
392 self.eolmode = eolmode
393 self.eol = None
394 self.opener = opener
384 class abstractbackend(object):
385 def __init__(self, ui):
395 386 self.ui = ui
396 self.lines = []
397 self.exists = False
398 self.missing = missing
399 if not missing:
400 try:
401 self.lines = self.readlines(fname)
402 self.exists = True
403 except IOError:
404 pass
405 else:
406 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
387
388 def readlines(self, fname):
389 """Return target file lines, or its content as a single line
390 for symlinks.
391 """
392 raise NotImplementedError
393
394 def writelines(self, fname, lines):
395 """Write lines to target file."""
396 raise NotImplementedError
407 397
408 self.hash = {}
409 self.dirty = False
410 self.offset = 0
411 self.skew = 0
412 self.rej = []
413 self.fileprinted = False
414 self.printfile(False)
415 self.hunks = 0
398 def unlink(self, fname):
399 """Unlink target file."""
400 raise NotImplementedError
401
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
405 files.
406 """
407 pass
408
409 class fsbackend(abstractbackend):
410 def __init__(self, ui, opener):
411 super(fsbackend, self).__init__(ui)
412 self.opener = opener
416 413
417 414 def readlines(self, fname):
418 415 if os.path.islink(fname):
419 416 return [os.readlink(fname)]
420 417 fp = self.opener(fname, 'r')
421 418 try:
422 lr = linereader(fp, self.eolmode != 'strict')
423 lines = list(lr)
424 self.eol = lr.eol
425 return lines
419 return list(fp)
426 420 finally:
427 421 fp.close()
428 422
429 423 def writelines(self, fname, lines):
430 424 # Ensure supplied data ends in fname, being a regular file or
431 425 # a symlink. _updatedir will -too magically- take care
432 426 # of setting it to the proper type afterwards.
433 427 st_mode = None
434 428 islink = os.path.islink(fname)
435 429 if islink:
436 430 fp = cStringIO.StringIO()
437 431 else:
438 432 try:
439 433 st_mode = os.lstat(fname).st_mode & 0777
440 434 except OSError, e:
441 435 if e.errno != errno.ENOENT:
442 436 raise
443 437 fp = self.opener(fname, 'w')
444 438 try:
445 if self.eolmode == 'auto':
446 eol = self.eol
447 elif self.eolmode == 'crlf':
448 eol = '\r\n'
449 else:
450 eol = '\n'
451
452 if self.eolmode != 'strict' and eol and eol != '\n':
453 for l in lines:
454 if l and l[-1] == '\n':
455 l = l[:-1] + eol
456 fp.write(l)
457 else:
458 fp.writelines(lines)
439 fp.writelines(lines)
459 440 if islink:
460 441 self.opener.symlink(fp.getvalue(), fname)
461 442 if st_mode is not None:
462 443 os.chmod(fname, st_mode)
463 444 finally:
464 445 fp.close()
465 446
466 447 def unlink(self, fname):
467 448 os.unlink(fname)
468 449
450 def writerej(self, fname, failed, total, lines):
451 fname = fname + ".rej"
452 self.ui.warn(
453 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
454 (failed, total, fname))
455 fp = self.opener(fname, 'w')
456 fp.writelines(lines)
457 fp.close()
458
459 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
460 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
461 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
462 eolmodes = ['strict', 'crlf', 'lf', 'auto']
463
464 class patchfile(object):
465 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
466 self.fname = fname
467 self.eolmode = eolmode
468 self.eol = None
469 self.backend = backend
470 self.ui = ui
471 self.lines = []
472 self.exists = False
473 self.missing = missing
474 if not missing:
475 try:
476 self.lines = self.backend.readlines(fname)
477 if self.lines:
478 # Normalize line endings
479 if self.lines[0].endswith('\r\n'):
480 self.eol = '\r\n'
481 elif self.lines[0].endswith('\n'):
482 self.eol = '\n'
483 if eolmode != 'strict':
484 nlines = []
485 for l in self.lines:
486 if l.endswith('\r\n'):
487 l = l[:-2] + '\n'
488 nlines.append(l)
489 self.lines = nlines
490 self.exists = True
491 except IOError:
492 pass
493 else:
494 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
495
496 self.hash = {}
497 self.dirty = 0
498 self.offset = 0
499 self.skew = 0
500 self.rej = []
501 self.fileprinted = False
502 self.printfile(False)
503 self.hunks = 0
504
505 def writelines(self, fname, lines):
506 if self.eolmode == 'auto':
507 eol = self.eol
508 elif self.eolmode == 'crlf':
509 eol = '\r\n'
510 else:
511 eol = '\n'
512
513 if self.eolmode != 'strict' and eol and eol != '\n':
514 rawlines = []
515 for l in lines:
516 if l and l[-1] == '\n':
517 l = l[:-1] + eol
518 rawlines.append(l)
519 lines = rawlines
520
521 self.backend.writelines(fname, lines)
522
469 523 def printfile(self, warn):
470 524 if self.fileprinted:
471 525 return
472 526 if warn or self.ui.verbose:
473 527 self.fileprinted = True
474 528 s = _("patching file %s\n") % self.fname
475 529 if warn:
476 530 self.ui.warn(s)
477 531 else:
478 532 self.ui.note(s)
479 533
480 534
481 535 def findlines(self, l, linenum):
482 536 # looks through the hash and finds candidate lines. The
483 537 # result is a list of line numbers sorted based on distance
484 538 # from linenum
485 539
486 540 cand = self.hash.get(l, [])
487 541 if len(cand) > 1:
488 542 # resort our list of potentials forward then back.
489 543 cand.sort(key=lambda x: abs(x - linenum))
490 544 return cand
491 545
492 546 def makerejlines(self, fname):
493 547 base = os.path.basename(fname)
494 548 yield "--- %s\n+++ %s\n" % (base, base)
495 549 for x in self.rej:
496 550 for l in x.hunk:
497 551 yield l
498 552 if l[-1] != '\n':
499 553 yield "\n\ No newline at end of file\n"
500 554
501 555 def write_rej(self):
502 556 # our rejects are a little different from patch(1). This always
503 557 # creates rejects in the same form as the original patch. A file
504 558 # header is inserted so that you can run the reject through patch again
505 559 # without having to type the filename.
506
507 560 if not self.rej:
508 561 return
509
510 fname = self.fname + ".rej"
511 self.ui.warn(
512 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
513 (len(self.rej), self.hunks, fname))
514
515 fp = self.opener(fname, 'w')
516 fp.writelines(self.makerejlines(self.fname))
517 fp.close()
562 self.backend.writerej(self.fname, len(self.rej), self.hunks,
563 self.makerejlines(self.fname))
518 564
519 565 def apply(self, h):
520 566 if not h.complete():
521 567 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
522 568 (h.number, h.desc, len(h.a), h.lena, len(h.b),
523 569 h.lenb))
524 570
525 571 self.hunks += 1
526 572
527 573 if self.missing:
528 574 self.rej.append(h)
529 575 return -1
530 576
531 577 if self.exists and h.createfile():
532 578 self.ui.warn(_("file %s already exists\n") % self.fname)
533 579 self.rej.append(h)
534 580 return -1
535 581
536 582 if isinstance(h, binhunk):
537 583 if h.rmfile():
538 self.unlink(self.fname)
584 self.backend.unlink(self.fname)
539 585 else:
540 586 self.lines[:] = h.new()
541 587 self.offset += len(h.new())
542 588 self.dirty = True
543 589 return 0
544 590
545 591 horig = h
546 592 if (self.eolmode in ('crlf', 'lf')
547 593 or self.eolmode == 'auto' and self.eol):
548 594 # If new eols are going to be normalized, then normalize
549 595 # hunk data before patching. Otherwise, preserve input
550 596 # line-endings.
551 597 h = h.getnormalized()
552 598
553 599 # fast case first, no offsets, no fuzz
554 600 old = h.old()
555 601 # patch starts counting at 1 unless we are adding the file
556 602 if h.starta == 0:
557 603 start = 0
558 604 else:
559 605 start = h.starta + self.offset - 1
560 606 orig_start = start
561 607 # if there's skew we want to emit the "(offset %d lines)" even
562 608 # when the hunk cleanly applies at start + skew, so skip the
563 609 # fast case code
564 610 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
565 611 if h.rmfile():
566 self.unlink(self.fname)
612 self.backend.unlink(self.fname)
567 613 else:
568 614 self.lines[start : start + h.lena] = h.new()
569 615 self.offset += h.lenb - h.lena
570 616 self.dirty = True
571 617 return 0
572 618
573 619 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
574 620 self.hash = {}
575 621 for x, s in enumerate(self.lines):
576 622 self.hash.setdefault(s, []).append(x)
577 623 if h.hunk[-1][0] != ' ':
578 624 # if the hunk tried to put something at the bottom of the file
579 625 # override the start line and use eof here
580 626 search_start = len(self.lines)
581 627 else:
582 628 search_start = orig_start + self.skew
583 629
584 630 for fuzzlen in xrange(3):
585 631 for toponly in [True, False]:
586 632 old = h.old(fuzzlen, toponly)
587 633
588 634 cand = self.findlines(old[0][1:], search_start)
589 635 for l in cand:
590 636 if diffhelpers.testhunk(old, self.lines, l) == 0:
591 637 newlines = h.new(fuzzlen, toponly)
592 638 self.lines[l : l + len(old)] = newlines
593 639 self.offset += len(newlines) - len(old)
594 640 self.skew = l - orig_start
595 641 self.dirty = True
596 642 offset = l - orig_start - fuzzlen
597 643 if fuzzlen:
598 644 msg = _("Hunk #%d succeeded at %d "
599 645 "with fuzz %d "
600 646 "(offset %d lines).\n")
601 647 self.printfile(True)
602 648 self.ui.warn(msg %
603 649 (h.number, l + 1, fuzzlen, offset))
604 650 else:
605 651 msg = _("Hunk #%d succeeded at %d "
606 652 "(offset %d lines).\n")
607 653 self.ui.note(msg % (h.number, l + 1, offset))
608 654 return fuzzlen
609 655 self.printfile(True)
610 656 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
611 657 self.rej.append(horig)
612 658 return -1
613 659
614 660 def close(self):
615 661 if self.dirty:
616 662 self.writelines(self.fname, self.lines)
617 663 self.write_rej()
618 664 return len(self.rej)
619 665
620 666 class hunk(object):
621 667 def __init__(self, desc, num, lr, context, create=False, remove=False):
622 668 self.number = num
623 669 self.desc = desc
624 670 self.hunk = [desc]
625 671 self.a = []
626 672 self.b = []
627 673 self.starta = self.lena = None
628 674 self.startb = self.lenb = None
629 675 if lr is not None:
630 676 if context:
631 677 self.read_context_hunk(lr)
632 678 else:
633 679 self.read_unified_hunk(lr)
634 680 self.create = create
635 681 self.remove = remove and not create
636 682
637 683 def getnormalized(self):
638 684 """Return a copy with line endings normalized to LF."""
639 685
640 686 def normalize(lines):
641 687 nlines = []
642 688 for line in lines:
643 689 if line.endswith('\r\n'):
644 690 line = line[:-2] + '\n'
645 691 nlines.append(line)
646 692 return nlines
647 693
648 694 # Dummy object, it is rebuilt manually
649 695 nh = hunk(self.desc, self.number, None, None, False, False)
650 696 nh.number = self.number
651 697 nh.desc = self.desc
652 698 nh.hunk = self.hunk
653 699 nh.a = normalize(self.a)
654 700 nh.b = normalize(self.b)
655 701 nh.starta = self.starta
656 702 nh.startb = self.startb
657 703 nh.lena = self.lena
658 704 nh.lenb = self.lenb
659 705 nh.create = self.create
660 706 nh.remove = self.remove
661 707 return nh
662 708
663 709 def read_unified_hunk(self, lr):
664 710 m = unidesc.match(self.desc)
665 711 if not m:
666 712 raise PatchError(_("bad hunk #%d") % self.number)
667 713 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
668 714 if self.lena is None:
669 715 self.lena = 1
670 716 else:
671 717 self.lena = int(self.lena)
672 718 if self.lenb is None:
673 719 self.lenb = 1
674 720 else:
675 721 self.lenb = int(self.lenb)
676 722 self.starta = int(self.starta)
677 723 self.startb = int(self.startb)
678 724 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
679 725 # if we hit eof before finishing out the hunk, the last line will
680 726 # be zero length. Lets try to fix it up.
681 727 while len(self.hunk[-1]) == 0:
682 728 del self.hunk[-1]
683 729 del self.a[-1]
684 730 del self.b[-1]
685 731 self.lena -= 1
686 732 self.lenb -= 1
687 733 self._fixnewline(lr)
688 734
689 735 def read_context_hunk(self, lr):
690 736 self.desc = lr.readline()
691 737 m = contextdesc.match(self.desc)
692 738 if not m:
693 739 raise PatchError(_("bad hunk #%d") % self.number)
694 740 foo, self.starta, foo2, aend, foo3 = m.groups()
695 741 self.starta = int(self.starta)
696 742 if aend is None:
697 743 aend = self.starta
698 744 self.lena = int(aend) - self.starta
699 745 if self.starta:
700 746 self.lena += 1
701 747 for x in xrange(self.lena):
702 748 l = lr.readline()
703 749 if l.startswith('---'):
704 750 # lines addition, old block is empty
705 751 lr.push(l)
706 752 break
707 753 s = l[2:]
708 754 if l.startswith('- ') or l.startswith('! '):
709 755 u = '-' + s
710 756 elif l.startswith(' '):
711 757 u = ' ' + s
712 758 else:
713 759 raise PatchError(_("bad hunk #%d old text line %d") %
714 760 (self.number, x))
715 761 self.a.append(u)
716 762 self.hunk.append(u)
717 763
718 764 l = lr.readline()
719 765 if l.startswith('\ '):
720 766 s = self.a[-1][:-1]
721 767 self.a[-1] = s
722 768 self.hunk[-1] = s
723 769 l = lr.readline()
724 770 m = contextdesc.match(l)
725 771 if not m:
726 772 raise PatchError(_("bad hunk #%d") % self.number)
727 773 foo, self.startb, foo2, bend, foo3 = m.groups()
728 774 self.startb = int(self.startb)
729 775 if bend is None:
730 776 bend = self.startb
731 777 self.lenb = int(bend) - self.startb
732 778 if self.startb:
733 779 self.lenb += 1
734 780 hunki = 1
735 781 for x in xrange(self.lenb):
736 782 l = lr.readline()
737 783 if l.startswith('\ '):
738 784 # XXX: the only way to hit this is with an invalid line range.
739 785 # The no-eol marker is not counted in the line range, but I
740 786 # guess there are diff(1) out there which behave differently.
741 787 s = self.b[-1][:-1]
742 788 self.b[-1] = s
743 789 self.hunk[hunki - 1] = s
744 790 continue
745 791 if not l:
746 792 # line deletions, new block is empty and we hit EOF
747 793 lr.push(l)
748 794 break
749 795 s = l[2:]
750 796 if l.startswith('+ ') or l.startswith('! '):
751 797 u = '+' + s
752 798 elif l.startswith(' '):
753 799 u = ' ' + s
754 800 elif len(self.b) == 0:
755 801 # line deletions, new block is empty
756 802 lr.push(l)
757 803 break
758 804 else:
759 805 raise PatchError(_("bad hunk #%d old text line %d") %
760 806 (self.number, x))
761 807 self.b.append(s)
762 808 while True:
763 809 if hunki >= len(self.hunk):
764 810 h = ""
765 811 else:
766 812 h = self.hunk[hunki]
767 813 hunki += 1
768 814 if h == u:
769 815 break
770 816 elif h.startswith('-'):
771 817 continue
772 818 else:
773 819 self.hunk.insert(hunki - 1, u)
774 820 break
775 821
776 822 if not self.a:
777 823 # this happens when lines were only added to the hunk
778 824 for x in self.hunk:
779 825 if x.startswith('-') or x.startswith(' '):
780 826 self.a.append(x)
781 827 if not self.b:
782 828 # this happens when lines were only deleted from the hunk
783 829 for x in self.hunk:
784 830 if x.startswith('+') or x.startswith(' '):
785 831 self.b.append(x[1:])
786 832 # @@ -start,len +start,len @@
787 833 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
788 834 self.startb, self.lenb)
789 835 self.hunk[0] = self.desc
790 836 self._fixnewline(lr)
791 837
792 838 def _fixnewline(self, lr):
793 839 l = lr.readline()
794 840 if l.startswith('\ '):
795 841 diffhelpers.fix_newline(self.hunk, self.a, self.b)
796 842 else:
797 843 lr.push(l)
798 844
799 845 def complete(self):
800 846 return len(self.a) == self.lena and len(self.b) == self.lenb
801 847
802 848 def createfile(self):
803 849 return self.starta == 0 and self.lena == 0 and self.create
804 850
805 851 def rmfile(self):
806 852 return self.startb == 0 and self.lenb == 0 and self.remove
807 853
808 854 def fuzzit(self, l, fuzz, toponly):
809 855 # this removes context lines from the top and bottom of list 'l'. It
810 856 # checks the hunk to make sure only context lines are removed, and then
811 857 # returns a new shortened list of lines.
812 858 fuzz = min(fuzz, len(l)-1)
813 859 if fuzz:
814 860 top = 0
815 861 bot = 0
816 862 hlen = len(self.hunk)
817 863 for x in xrange(hlen - 1):
818 864 # the hunk starts with the @@ line, so use x+1
819 865 if self.hunk[x + 1][0] == ' ':
820 866 top += 1
821 867 else:
822 868 break
823 869 if not toponly:
824 870 for x in xrange(hlen - 1):
825 871 if self.hunk[hlen - bot - 1][0] == ' ':
826 872 bot += 1
827 873 else:
828 874 break
829 875
830 876 # top and bot now count context in the hunk
831 877 # adjust them if either one is short
832 878 context = max(top, bot, 3)
833 879 if bot < context:
834 880 bot = max(0, fuzz - (context - bot))
835 881 else:
836 882 bot = min(fuzz, bot)
837 883 if top < context:
838 884 top = max(0, fuzz - (context - top))
839 885 else:
840 886 top = min(fuzz, top)
841 887
842 888 return l[top:len(l)-bot]
843 889 return l
844 890
845 891 def old(self, fuzz=0, toponly=False):
846 892 return self.fuzzit(self.a, fuzz, toponly)
847 893
848 894 def new(self, fuzz=0, toponly=False):
849 895 return self.fuzzit(self.b, fuzz, toponly)
850 896
851 897 class binhunk:
852 898 'A binary patch file. Only understands literals so far.'
853 899 def __init__(self, gitpatch):
854 900 self.gitpatch = gitpatch
855 901 self.text = None
856 902 self.hunk = ['GIT binary patch\n']
857 903
858 904 def createfile(self):
859 905 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
860 906
861 907 def rmfile(self):
862 908 return self.gitpatch.op == 'DELETE'
863 909
864 910 def complete(self):
865 911 return self.text is not None
866 912
867 913 def new(self):
868 914 return [self.text]
869 915
870 916 def extract(self, lr):
871 917 line = lr.readline()
872 918 self.hunk.append(line)
873 919 while line and not line.startswith('literal '):
874 920 line = lr.readline()
875 921 self.hunk.append(line)
876 922 if not line:
877 923 raise PatchError(_('could not extract binary patch'))
878 924 size = int(line[8:].rstrip())
879 925 dec = []
880 926 line = lr.readline()
881 927 self.hunk.append(line)
882 928 while len(line) > 1:
883 929 l = line[0]
884 930 if l <= 'Z' and l >= 'A':
885 931 l = ord(l) - ord('A') + 1
886 932 else:
887 933 l = ord(l) - ord('a') + 27
888 934 dec.append(base85.b85decode(line[1:-1])[:l])
889 935 line = lr.readline()
890 936 self.hunk.append(line)
891 937 text = zlib.decompress(''.join(dec))
892 938 if len(text) != size:
893 939 raise PatchError(_('binary patch is %d bytes, not %d') %
894 940 len(text), size)
895 941 self.text = text
896 942
897 943 def parsefilename(str):
898 944 # --- filename \t|space stuff
899 945 s = str[4:].rstrip('\r\n')
900 946 i = s.find('\t')
901 947 if i < 0:
902 948 i = s.find(' ')
903 949 if i < 0:
904 950 return s
905 951 return s[:i]
906 952
907 953 def pathstrip(path, strip):
908 954 pathlen = len(path)
909 955 i = 0
910 956 if strip == 0:
911 957 return '', path.rstrip()
912 958 count = strip
913 959 while count > 0:
914 960 i = path.find('/', i)
915 961 if i == -1:
916 962 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
917 963 (count, strip, path))
918 964 i += 1
919 965 # consume '//' in the path
920 966 while i < pathlen - 1 and path[i] == '/':
921 967 i += 1
922 968 count -= 1
923 969 return path[:i].lstrip(), path[i:].rstrip()
924 970
925 971 def selectfile(afile_orig, bfile_orig, hunk, strip):
926 972 nulla = afile_orig == "/dev/null"
927 973 nullb = bfile_orig == "/dev/null"
928 974 abase, afile = pathstrip(afile_orig, strip)
929 975 gooda = not nulla and os.path.lexists(afile)
930 976 bbase, bfile = pathstrip(bfile_orig, strip)
931 977 if afile == bfile:
932 978 goodb = gooda
933 979 else:
934 980 goodb = not nullb and os.path.lexists(bfile)
935 981 createfunc = hunk.createfile
936 982 missing = not goodb and not gooda and not createfunc()
937 983
938 984 # some diff programs apparently produce patches where the afile is
939 985 # not /dev/null, but afile starts with bfile
940 986 abasedir = afile[:afile.rfind('/') + 1]
941 987 bbasedir = bfile[:bfile.rfind('/') + 1]
942 988 if missing and abasedir == bbasedir and afile.startswith(bfile):
943 989 # this isn't very pretty
944 990 hunk.create = True
945 991 if createfunc():
946 992 missing = False
947 993 else:
948 994 hunk.create = False
949 995
950 996 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
951 997 # diff is between a file and its backup. In this case, the original
952 998 # file should be patched (see original mpatch code).
953 999 isbackup = (abase == bbase and bfile.startswith(afile))
954 1000 fname = None
955 1001 if not missing:
956 1002 if gooda and goodb:
957 1003 fname = isbackup and afile or bfile
958 1004 elif gooda:
959 1005 fname = afile
960 1006
961 1007 if not fname:
962 1008 if not nullb:
963 1009 fname = isbackup and afile or bfile
964 1010 elif not nulla:
965 1011 fname = afile
966 1012 else:
967 1013 raise PatchError(_("undefined source and destination files"))
968 1014
969 1015 return fname, missing
970 1016
971 1017 def scangitpatch(lr, firstline):
972 1018 """
973 1019 Git patches can emit:
974 1020 - rename a to b
975 1021 - change b
976 1022 - copy a to c
977 1023 - change c
978 1024
979 1025 We cannot apply this sequence as-is, the renamed 'a' could not be
980 1026 found for it would have been renamed already. And we cannot copy
981 1027 from 'b' instead because 'b' would have been changed already. So
982 1028 we scan the git patch for copy and rename commands so we can
983 1029 perform the copies ahead of time.
984 1030 """
985 1031 pos = 0
986 1032 try:
987 1033 pos = lr.fp.tell()
988 1034 fp = lr.fp
989 1035 except IOError:
990 1036 fp = cStringIO.StringIO(lr.fp.read())
991 1037 gitlr = linereader(fp, lr.textmode)
992 1038 gitlr.push(firstline)
993 1039 gitpatches = readgitpatch(gitlr)
994 1040 fp.seek(pos)
995 1041 return gitpatches
996 1042
997 1043 def iterhunks(fp):
998 1044 """Read a patch and yield the following events:
999 1045 - ("file", afile, bfile, firsthunk): select a new target file.
1000 1046 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1001 1047 "file" event.
1002 1048 - ("git", gitchanges): current diff is in git format, gitchanges
1003 1049 maps filenames to gitpatch records. Unique event.
1004 1050 """
1005 1051 changed = {}
1006 1052 afile = ""
1007 1053 bfile = ""
1008 1054 state = None
1009 1055 hunknum = 0
1010 1056 emitfile = newfile = False
1011 1057 git = False
1012 1058
1013 1059 # our states
1014 1060 BFILE = 1
1015 1061 context = None
1016 1062 lr = linereader(fp)
1017 1063
1018 1064 while True:
1019 1065 x = lr.readline()
1020 1066 if not x:
1021 1067 break
1022 1068 if (state == BFILE and ((not context and x[0] == '@') or
1023 1069 ((context is not False) and x.startswith('***************')))):
1024 1070 if context is None and x.startswith('***************'):
1025 1071 context = True
1026 1072 gpatch = changed.get(bfile)
1027 1073 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1028 1074 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1029 1075 h = hunk(x, hunknum + 1, lr, context, create, remove)
1030 1076 hunknum += 1
1031 1077 if emitfile:
1032 1078 emitfile = False
1033 1079 yield 'file', (afile, bfile, h)
1034 1080 yield 'hunk', h
1035 1081 elif state == BFILE and x.startswith('GIT binary patch'):
1036 1082 h = binhunk(changed[bfile])
1037 1083 hunknum += 1
1038 1084 if emitfile:
1039 1085 emitfile = False
1040 1086 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1041 1087 h.extract(lr)
1042 1088 yield 'hunk', h
1043 1089 elif x.startswith('diff --git'):
1044 1090 # check for git diff, scanning the whole patch file if needed
1045 1091 m = gitre.match(x)
1046 1092 if m:
1047 1093 afile, bfile = m.group(1, 2)
1048 1094 if not git:
1049 1095 git = True
1050 1096 gitpatches = scangitpatch(lr, x)
1051 1097 yield 'git', gitpatches
1052 1098 for gp in gitpatches:
1053 1099 changed[gp.path] = gp
1054 1100 # else error?
1055 1101 # copy/rename + modify should modify target, not source
1056 1102 gp = changed.get(bfile)
1057 1103 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1058 1104 or gp.mode):
1059 1105 afile = bfile
1060 1106 newfile = True
1061 1107 elif x.startswith('---'):
1062 1108 # check for a unified diff
1063 1109 l2 = lr.readline()
1064 1110 if not l2.startswith('+++'):
1065 1111 lr.push(l2)
1066 1112 continue
1067 1113 newfile = True
1068 1114 context = False
1069 1115 afile = parsefilename(x)
1070 1116 bfile = parsefilename(l2)
1071 1117 elif x.startswith('***'):
1072 1118 # check for a context diff
1073 1119 l2 = lr.readline()
1074 1120 if not l2.startswith('---'):
1075 1121 lr.push(l2)
1076 1122 continue
1077 1123 l3 = lr.readline()
1078 1124 lr.push(l3)
1079 1125 if not l3.startswith("***************"):
1080 1126 lr.push(l2)
1081 1127 continue
1082 1128 newfile = True
1083 1129 context = True
1084 1130 afile = parsefilename(x)
1085 1131 bfile = parsefilename(l2)
1086 1132
1087 1133 if newfile:
1088 1134 newfile = False
1089 1135 emitfile = True
1090 1136 state = BFILE
1091 1137 hunknum = 0
1092 1138
1093 1139 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1094 1140 """Reads a patch from fp and tries to apply it.
1095 1141
1096 1142 The dict 'changed' is filled in with all of the filenames changed
1097 1143 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1098 1144 found and 1 if there was any fuzz.
1099 1145
1100 1146 If 'eolmode' is 'strict', the patch content and patched file are
1101 1147 read in binary mode. Otherwise, line endings are ignored when
1102 1148 patching then normalized according to 'eolmode'.
1103 1149
1104 1150 Callers probably want to call '_updatedir' after this to
1105 1151 apply certain categories of changes not done by this function.
1106 1152 """
1107 1153 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1108 1154 eolmode=eolmode)
1109 1155
1110 1156 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1111 1157 rejects = 0
1112 1158 err = 0
1113 1159 current_file = None
1114 1160 cwd = os.getcwd()
1115 opener = scmutil.opener(cwd)
1161 backend = fsbackend(ui, scmutil.opener(cwd))
1116 1162
1117 1163 for state, values in iterhunks(fp):
1118 1164 if state == 'hunk':
1119 1165 if not current_file:
1120 1166 continue
1121 1167 ret = current_file.apply(values)
1122 1168 if ret >= 0:
1123 1169 changed.setdefault(current_file.fname, None)
1124 1170 if ret > 0:
1125 1171 err = 1
1126 1172 elif state == 'file':
1127 1173 if current_file:
1128 1174 rejects += current_file.close()
1129 1175 afile, bfile, first_hunk = values
1130 1176 try:
1131 1177 current_file, missing = selectfile(afile, bfile,
1132 1178 first_hunk, strip)
1133 current_file = patcher(ui, current_file, opener,
1179 current_file = patcher(ui, current_file, backend,
1134 1180 missing=missing, eolmode=eolmode)
1135 1181 except PatchError, inst:
1136 1182 ui.warn(str(inst) + '\n')
1137 1183 current_file = None
1138 1184 rejects += 1
1139 1185 continue
1140 1186 elif state == 'git':
1141 1187 for gp in values:
1142 1188 gp.path = pathstrip(gp.path, strip - 1)[1]
1143 1189 if gp.oldpath:
1144 1190 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1145 1191 # Binary patches really overwrite target files, copying them
1146 1192 # will just make it fails with "target file exists"
1147 1193 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1148 1194 copyfn(gp.oldpath, gp.path, cwd)
1149 1195 changed[gp.path] = gp
1150 1196 else:
1151 1197 raise util.Abort(_('unsupported parser state: %s') % state)
1152 1198
1153 1199 if current_file:
1154 1200 rejects += current_file.close()
1155 1201
1156 1202 if rejects:
1157 1203 return -1
1158 1204 return err
1159 1205
1160 1206 def _updatedir(ui, repo, patches, similarity=0):
1161 1207 '''Update dirstate after patch application according to metadata'''
1162 1208 if not patches:
1163 1209 return []
1164 1210 copies = []
1165 1211 removes = set()
1166 1212 cfiles = patches.keys()
1167 1213 cwd = repo.getcwd()
1168 1214 if cwd:
1169 1215 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1170 1216 for f in patches:
1171 1217 gp = patches[f]
1172 1218 if not gp:
1173 1219 continue
1174 1220 if gp.op == 'RENAME':
1175 1221 copies.append((gp.oldpath, gp.path))
1176 1222 removes.add(gp.oldpath)
1177 1223 elif gp.op == 'COPY':
1178 1224 copies.append((gp.oldpath, gp.path))
1179 1225 elif gp.op == 'DELETE':
1180 1226 removes.add(gp.path)
1181 1227
1182 1228 wctx = repo[None]
1183 1229 for src, dst in copies:
1184 1230 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1185 1231 if (not similarity) and removes:
1186 1232 wctx.remove(sorted(removes), True)
1187 1233
1188 1234 for f in patches:
1189 1235 gp = patches[f]
1190 1236 if gp and gp.mode:
1191 1237 islink, isexec = gp.mode
1192 1238 dst = repo.wjoin(gp.path)
1193 1239 # patch won't create empty files
1194 1240 if gp.op == 'ADD' and not os.path.lexists(dst):
1195 1241 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1196 1242 repo.wwrite(gp.path, '', flags)
1197 1243 util.setflags(dst, islink, isexec)
1198 1244 scmutil.addremove(repo, cfiles, similarity=similarity)
1199 1245 files = patches.keys()
1200 1246 files.extend([r for r in removes if r not in files])
1201 1247 return sorted(files)
1202 1248
1203 1249 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1204 1250 """use <patcher> to apply <patchname> to the working directory.
1205 1251 returns whether patch was applied with fuzz factor."""
1206 1252
1207 1253 fuzz = False
1208 1254 args = []
1209 1255 if cwd:
1210 1256 args.append('-d %s' % util.shellquote(cwd))
1211 1257 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1212 1258 util.shellquote(patchname)))
1213 1259
1214 1260 for line in fp:
1215 1261 line = line.rstrip()
1216 1262 ui.note(line + '\n')
1217 1263 if line.startswith('patching file '):
1218 1264 pf = util.parsepatchoutput(line)
1219 1265 printed_file = False
1220 1266 files.setdefault(pf, None)
1221 1267 elif line.find('with fuzz') >= 0:
1222 1268 fuzz = True
1223 1269 if not printed_file:
1224 1270 ui.warn(pf + '\n')
1225 1271 printed_file = True
1226 1272 ui.warn(line + '\n')
1227 1273 elif line.find('saving rejects to file') >= 0:
1228 1274 ui.warn(line + '\n')
1229 1275 elif line.find('FAILED') >= 0:
1230 1276 if not printed_file:
1231 1277 ui.warn(pf + '\n')
1232 1278 printed_file = True
1233 1279 ui.warn(line + '\n')
1234 1280 code = fp.close()
1235 1281 if code:
1236 1282 raise PatchError(_("patch command failed: %s") %
1237 1283 util.explainexit(code)[0])
1238 1284 return fuzz
1239 1285
1240 1286 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1241 1287 similarity=0):
1242 1288 """use builtin patch to apply <patchobj> to the working directory.
1243 1289 returns whether patch was applied with fuzz factor."""
1244 1290
1245 1291 if files is None:
1246 1292 files = {}
1247 1293 if eolmode is None:
1248 1294 eolmode = ui.config('patch', 'eol', 'strict')
1249 1295 if eolmode.lower() not in eolmodes:
1250 1296 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1251 1297 eolmode = eolmode.lower()
1252 1298
1253 1299 try:
1254 1300 fp = open(patchobj, 'rb')
1255 1301 except TypeError:
1256 1302 fp = patchobj
1257 1303 if cwd:
1258 1304 curdir = os.getcwd()
1259 1305 os.chdir(cwd)
1260 1306 try:
1261 1307 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1262 1308 finally:
1263 1309 if cwd:
1264 1310 os.chdir(curdir)
1265 1311 if fp != patchobj:
1266 1312 fp.close()
1267 1313 touched = _updatedir(ui, repo, files, similarity)
1268 1314 files.update(dict.fromkeys(touched))
1269 1315 if ret < 0:
1270 1316 raise PatchError(_('patch failed to apply'))
1271 1317 return ret > 0
1272 1318
1273 1319 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1274 1320 similarity=0):
1275 1321 """Apply <patchname> to the working directory.
1276 1322
1277 1323 'eolmode' specifies how end of lines should be handled. It can be:
1278 1324 - 'strict': inputs are read in binary mode, EOLs are preserved
1279 1325 - 'crlf': EOLs are ignored when patching and reset to CRLF
1280 1326 - 'lf': EOLs are ignored when patching and reset to LF
1281 1327 - None: get it from user settings, default to 'strict'
1282 1328 'eolmode' is ignored when using an external patcher program.
1283 1329
1284 1330 Returns whether patch was applied with fuzz factor.
1285 1331 """
1286 1332 patcher = ui.config('ui', 'patch')
1287 1333 if files is None:
1288 1334 files = {}
1289 1335 try:
1290 1336 if patcher:
1291 1337 try:
1292 1338 return _externalpatch(patcher, patchname, ui, strip, cwd,
1293 1339 files)
1294 1340 finally:
1295 1341 touched = _updatedir(ui, repo, files, similarity)
1296 1342 files.update(dict.fromkeys(touched))
1297 1343 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1298 1344 similarity)
1299 1345 except PatchError, err:
1300 1346 raise util.Abort(str(err))
1301 1347
1302 1348 def changedfiles(patchpath, strip=1):
1303 1349 fp = open(patchpath, 'rb')
1304 1350 try:
1305 1351 changed = set()
1306 1352 for state, values in iterhunks(fp):
1307 1353 if state == 'hunk':
1308 1354 continue
1309 1355 elif state == 'file':
1310 1356 afile, bfile, first_hunk = values
1311 1357 current_file, missing = selectfile(afile, bfile,
1312 1358 first_hunk, strip)
1313 1359 changed.add(current_file)
1314 1360 elif state == 'git':
1315 1361 for gp in values:
1316 1362 gp.path = pathstrip(gp.path, strip - 1)[1]
1317 1363 changed.add(gp.path)
1318 1364 if gp.oldpath:
1319 1365 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1320 1366 if gp.op == 'RENAME':
1321 1367 changed.add(gp.oldpath)
1322 1368 else:
1323 1369 raise util.Abort(_('unsupported parser state: %s') % state)
1324 1370 return changed
1325 1371 finally:
1326 1372 fp.close()
1327 1373
1328 1374 def b85diff(to, tn):
1329 1375 '''print base85-encoded binary diff'''
1330 1376 def gitindex(text):
1331 1377 if not text:
1332 1378 return hex(nullid)
1333 1379 l = len(text)
1334 1380 s = util.sha1('blob %d\0' % l)
1335 1381 s.update(text)
1336 1382 return s.hexdigest()
1337 1383
1338 1384 def fmtline(line):
1339 1385 l = len(line)
1340 1386 if l <= 26:
1341 1387 l = chr(ord('A') + l - 1)
1342 1388 else:
1343 1389 l = chr(l - 26 + ord('a') - 1)
1344 1390 return '%c%s\n' % (l, base85.b85encode(line, True))
1345 1391
1346 1392 def chunk(text, csize=52):
1347 1393 l = len(text)
1348 1394 i = 0
1349 1395 while i < l:
1350 1396 yield text[i:i + csize]
1351 1397 i += csize
1352 1398
1353 1399 tohash = gitindex(to)
1354 1400 tnhash = gitindex(tn)
1355 1401 if tohash == tnhash:
1356 1402 return ""
1357 1403
1358 1404 # TODO: deltas
1359 1405 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1360 1406 (tohash, tnhash, len(tn))]
1361 1407 for l in chunk(zlib.compress(tn)):
1362 1408 ret.append(fmtline(l))
1363 1409 ret.append('\n')
1364 1410 return ''.join(ret)
1365 1411
1366 1412 class GitDiffRequired(Exception):
1367 1413 pass
1368 1414
1369 1415 def diffopts(ui, opts=None, untrusted=False):
1370 1416 def get(key, name=None, getter=ui.configbool):
1371 1417 return ((opts and opts.get(key)) or
1372 1418 getter('diff', name or key, None, untrusted=untrusted))
1373 1419 return mdiff.diffopts(
1374 1420 text=opts and opts.get('text'),
1375 1421 git=get('git'),
1376 1422 nodates=get('nodates'),
1377 1423 showfunc=get('show_function', 'showfunc'),
1378 1424 ignorews=get('ignore_all_space', 'ignorews'),
1379 1425 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1380 1426 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1381 1427 context=get('unified', getter=ui.config))
1382 1428
1383 1429 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1384 1430 losedatafn=None, prefix=''):
1385 1431 '''yields diff of changes to files between two nodes, or node and
1386 1432 working directory.
1387 1433
1388 1434 if node1 is None, use first dirstate parent instead.
1389 1435 if node2 is None, compare node1 with working directory.
1390 1436
1391 1437 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1392 1438 every time some change cannot be represented with the current
1393 1439 patch format. Return False to upgrade to git patch format, True to
1394 1440 accept the loss or raise an exception to abort the diff. It is
1395 1441 called with the name of current file being diffed as 'fn'. If set
1396 1442 to None, patches will always be upgraded to git format when
1397 1443 necessary.
1398 1444
1399 1445 prefix is a filename prefix that is prepended to all filenames on
1400 1446 display (used for subrepos).
1401 1447 '''
1402 1448
1403 1449 if opts is None:
1404 1450 opts = mdiff.defaultopts
1405 1451
1406 1452 if not node1 and not node2:
1407 1453 node1 = repo.dirstate.p1()
1408 1454
1409 1455 def lrugetfilectx():
1410 1456 cache = {}
1411 1457 order = []
1412 1458 def getfilectx(f, ctx):
1413 1459 fctx = ctx.filectx(f, filelog=cache.get(f))
1414 1460 if f not in cache:
1415 1461 if len(cache) > 20:
1416 1462 del cache[order.pop(0)]
1417 1463 cache[f] = fctx.filelog()
1418 1464 else:
1419 1465 order.remove(f)
1420 1466 order.append(f)
1421 1467 return fctx
1422 1468 return getfilectx
1423 1469 getfilectx = lrugetfilectx()
1424 1470
1425 1471 ctx1 = repo[node1]
1426 1472 ctx2 = repo[node2]
1427 1473
1428 1474 if not changes:
1429 1475 changes = repo.status(ctx1, ctx2, match=match)
1430 1476 modified, added, removed = changes[:3]
1431 1477
1432 1478 if not modified and not added and not removed:
1433 1479 return []
1434 1480
1435 1481 revs = None
1436 1482 if not repo.ui.quiet:
1437 1483 hexfunc = repo.ui.debugflag and hex or short
1438 1484 revs = [hexfunc(node) for node in [node1, node2] if node]
1439 1485
1440 1486 copy = {}
1441 1487 if opts.git or opts.upgrade:
1442 1488 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1443 1489
1444 1490 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1445 1491 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1446 1492 if opts.upgrade and not opts.git:
1447 1493 try:
1448 1494 def losedata(fn):
1449 1495 if not losedatafn or not losedatafn(fn=fn):
1450 1496 raise GitDiffRequired()
1451 1497 # Buffer the whole output until we are sure it can be generated
1452 1498 return list(difffn(opts.copy(git=False), losedata))
1453 1499 except GitDiffRequired:
1454 1500 return difffn(opts.copy(git=True), None)
1455 1501 else:
1456 1502 return difffn(opts, None)
1457 1503
1458 1504 def difflabel(func, *args, **kw):
1459 1505 '''yields 2-tuples of (output, label) based on the output of func()'''
1460 1506 prefixes = [('diff', 'diff.diffline'),
1461 1507 ('copy', 'diff.extended'),
1462 1508 ('rename', 'diff.extended'),
1463 1509 ('old', 'diff.extended'),
1464 1510 ('new', 'diff.extended'),
1465 1511 ('deleted', 'diff.extended'),
1466 1512 ('---', 'diff.file_a'),
1467 1513 ('+++', 'diff.file_b'),
1468 1514 ('@@', 'diff.hunk'),
1469 1515 ('-', 'diff.deleted'),
1470 1516 ('+', 'diff.inserted')]
1471 1517
1472 1518 for chunk in func(*args, **kw):
1473 1519 lines = chunk.split('\n')
1474 1520 for i, line in enumerate(lines):
1475 1521 if i != 0:
1476 1522 yield ('\n', '')
1477 1523 stripline = line
1478 1524 if line and line[0] in '+-':
1479 1525 # highlight trailing whitespace, but only in changed lines
1480 1526 stripline = line.rstrip()
1481 1527 for prefix, label in prefixes:
1482 1528 if stripline.startswith(prefix):
1483 1529 yield (stripline, label)
1484 1530 break
1485 1531 else:
1486 1532 yield (line, '')
1487 1533 if line != stripline:
1488 1534 yield (line[len(stripline):], 'diff.trailingwhitespace')
1489 1535
1490 1536 def diffui(*args, **kw):
1491 1537 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1492 1538 return difflabel(diff, *args, **kw)
1493 1539
1494 1540
1495 1541 def _addmodehdr(header, omode, nmode):
1496 1542 if omode != nmode:
1497 1543 header.append('old mode %s\n' % omode)
1498 1544 header.append('new mode %s\n' % nmode)
1499 1545
1500 1546 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1501 1547 copy, getfilectx, opts, losedatafn, prefix):
1502 1548
1503 1549 def join(f):
1504 1550 return os.path.join(prefix, f)
1505 1551
1506 1552 date1 = util.datestr(ctx1.date())
1507 1553 man1 = ctx1.manifest()
1508 1554
1509 1555 gone = set()
1510 1556 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1511 1557
1512 1558 copyto = dict([(v, k) for k, v in copy.items()])
1513 1559
1514 1560 if opts.git:
1515 1561 revs = None
1516 1562
1517 1563 for f in sorted(modified + added + removed):
1518 1564 to = None
1519 1565 tn = None
1520 1566 dodiff = True
1521 1567 header = []
1522 1568 if f in man1:
1523 1569 to = getfilectx(f, ctx1).data()
1524 1570 if f not in removed:
1525 1571 tn = getfilectx(f, ctx2).data()
1526 1572 a, b = f, f
1527 1573 if opts.git or losedatafn:
1528 1574 if f in added:
1529 1575 mode = gitmode[ctx2.flags(f)]
1530 1576 if f in copy or f in copyto:
1531 1577 if opts.git:
1532 1578 if f in copy:
1533 1579 a = copy[f]
1534 1580 else:
1535 1581 a = copyto[f]
1536 1582 omode = gitmode[man1.flags(a)]
1537 1583 _addmodehdr(header, omode, mode)
1538 1584 if a in removed and a not in gone:
1539 1585 op = 'rename'
1540 1586 gone.add(a)
1541 1587 else:
1542 1588 op = 'copy'
1543 1589 header.append('%s from %s\n' % (op, join(a)))
1544 1590 header.append('%s to %s\n' % (op, join(f)))
1545 1591 to = getfilectx(a, ctx1).data()
1546 1592 else:
1547 1593 losedatafn(f)
1548 1594 else:
1549 1595 if opts.git:
1550 1596 header.append('new file mode %s\n' % mode)
1551 1597 elif ctx2.flags(f):
1552 1598 losedatafn(f)
1553 1599 # In theory, if tn was copied or renamed we should check
1554 1600 # if the source is binary too but the copy record already
1555 1601 # forces git mode.
1556 1602 if util.binary(tn):
1557 1603 if opts.git:
1558 1604 dodiff = 'binary'
1559 1605 else:
1560 1606 losedatafn(f)
1561 1607 if not opts.git and not tn:
1562 1608 # regular diffs cannot represent new empty file
1563 1609 losedatafn(f)
1564 1610 elif f in removed:
1565 1611 if opts.git:
1566 1612 # have we already reported a copy above?
1567 1613 if ((f in copy and copy[f] in added
1568 1614 and copyto[copy[f]] == f) or
1569 1615 (f in copyto and copyto[f] in added
1570 1616 and copy[copyto[f]] == f)):
1571 1617 dodiff = False
1572 1618 else:
1573 1619 header.append('deleted file mode %s\n' %
1574 1620 gitmode[man1.flags(f)])
1575 1621 elif not to or util.binary(to):
1576 1622 # regular diffs cannot represent empty file deletion
1577 1623 losedatafn(f)
1578 1624 else:
1579 1625 oflag = man1.flags(f)
1580 1626 nflag = ctx2.flags(f)
1581 1627 binary = util.binary(to) or util.binary(tn)
1582 1628 if opts.git:
1583 1629 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1584 1630 if binary:
1585 1631 dodiff = 'binary'
1586 1632 elif binary or nflag != oflag:
1587 1633 losedatafn(f)
1588 1634 if opts.git:
1589 1635 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1590 1636
1591 1637 if dodiff:
1592 1638 if dodiff == 'binary':
1593 1639 text = b85diff(to, tn)
1594 1640 else:
1595 1641 text = mdiff.unidiff(to, date1,
1596 1642 # ctx2 date may be dynamic
1597 1643 tn, util.datestr(ctx2.date()),
1598 1644 join(a), join(b), revs, opts=opts)
1599 1645 if header and (text or len(header) > 1):
1600 1646 yield ''.join(header)
1601 1647 if text:
1602 1648 yield text
1603 1649
1604 1650 def diffstatdata(lines):
1605 1651 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1606 1652
1607 1653 filename, adds, removes = None, 0, 0
1608 1654 for line in lines:
1609 1655 if line.startswith('diff'):
1610 1656 if filename:
1611 1657 isbinary = adds == 0 and removes == 0
1612 1658 yield (filename, adds, removes, isbinary)
1613 1659 # set numbers to 0 anyway when starting new file
1614 1660 adds, removes = 0, 0
1615 1661 if line.startswith('diff --git'):
1616 1662 filename = gitre.search(line).group(1)
1617 1663 elif line.startswith('diff -r'):
1618 1664 # format: "diff -r ... -r ... filename"
1619 1665 filename = diffre.search(line).group(1)
1620 1666 elif line.startswith('+') and not line.startswith('+++'):
1621 1667 adds += 1
1622 1668 elif line.startswith('-') and not line.startswith('---'):
1623 1669 removes += 1
1624 1670 if filename:
1625 1671 isbinary = adds == 0 and removes == 0
1626 1672 yield (filename, adds, removes, isbinary)
1627 1673
1628 1674 def diffstat(lines, width=80, git=False):
1629 1675 output = []
1630 1676 stats = list(diffstatdata(lines))
1631 1677
1632 1678 maxtotal, maxname = 0, 0
1633 1679 totaladds, totalremoves = 0, 0
1634 1680 hasbinary = False
1635 1681
1636 1682 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1637 1683 for filename, adds, removes, isbinary in stats]
1638 1684
1639 1685 for filename, adds, removes, isbinary, namewidth in sized:
1640 1686 totaladds += adds
1641 1687 totalremoves += removes
1642 1688 maxname = max(maxname, namewidth)
1643 1689 maxtotal = max(maxtotal, adds + removes)
1644 1690 if isbinary:
1645 1691 hasbinary = True
1646 1692
1647 1693 countwidth = len(str(maxtotal))
1648 1694 if hasbinary and countwidth < 3:
1649 1695 countwidth = 3
1650 1696 graphwidth = width - countwidth - maxname - 6
1651 1697 if graphwidth < 10:
1652 1698 graphwidth = 10
1653 1699
1654 1700 def scale(i):
1655 1701 if maxtotal <= graphwidth:
1656 1702 return i
1657 1703 # If diffstat runs out of room it doesn't print anything,
1658 1704 # which isn't very useful, so always print at least one + or -
1659 1705 # if there were at least some changes.
1660 1706 return max(i * graphwidth // maxtotal, int(bool(i)))
1661 1707
1662 1708 for filename, adds, removes, isbinary, namewidth in sized:
1663 1709 if git and isbinary:
1664 1710 count = 'Bin'
1665 1711 else:
1666 1712 count = adds + removes
1667 1713 pluses = '+' * scale(adds)
1668 1714 minuses = '-' * scale(removes)
1669 1715 output.append(' %s%s | %*s %s%s\n' %
1670 1716 (filename, ' ' * (maxname - namewidth),
1671 1717 countwidth, count,
1672 1718 pluses, minuses))
1673 1719
1674 1720 if stats:
1675 1721 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1676 1722 % (len(stats), totaladds, totalremoves))
1677 1723
1678 1724 return ''.join(output)
1679 1725
1680 1726 def diffstatui(*args, **kw):
1681 1727 '''like diffstat(), but yields 2-tuples of (output, label) for
1682 1728 ui.write()
1683 1729 '''
1684 1730
1685 1731 for line in diffstat(*args, **kw).splitlines():
1686 1732 if line and line[-1] in '+-':
1687 1733 name, graph = line.rsplit(' ', 1)
1688 1734 yield (name + ' ', '')
1689 1735 m = re.search(r'\++', graph)
1690 1736 if m:
1691 1737 yield (m.group(0), 'diffstat.inserted')
1692 1738 m = re.search(r'-+', graph)
1693 1739 if m:
1694 1740 yield (m.group(0), 'diffstat.deleted')
1695 1741 else:
1696 1742 yield (line, '')
1697 1743 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now