##// END OF EJS Templates
patch: set desired mode when patching, not in updatedir()...
Patrick Mezard -
r14367:468d7d17 default
parent child Browse files
Show More
@@ -1,691 +1,691
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a DSCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56 The more specific you are in your filename patterns the less you
57 57 lose speed in huge repositories.
58 58
59 59 For [keywordmaps] template mapping and expansion demonstration and
60 60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 61 available templates and filters.
62 62
63 63 Three additional date template filters are provided:
64 64
65 65 :``utcdate``: "2006/09/18 15:13:13"
66 66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 68
69 69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 70 replaced with customized keywords and templates. Again, run
71 71 :hg:`kwdemo` to control the results of your configuration changes.
72 72
73 73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 74 to avoid storing expanded keywords in the change history.
75 75
76 76 To force expansion after enabling it, or a configuration change, run
77 77 :hg:`kwexpand`.
78 78
79 79 Expansions spanning more than one line and incremental expansions,
80 80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 81 {desc}" expands to the first line of the changeset description.
82 82 '''
83 83
84 84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 86 from mercurial import scmutil
87 87 from mercurial.hgweb import webcommands
88 88 from mercurial.i18n import _
89 89 import os, re, shutil, tempfile
90 90
91 91 commands.optionalrepo += ' kwdemo'
92 92
93 93 cmdtable = {}
94 94 command = cmdutil.command(cmdtable)
95 95
96 96 # hg commands that do not act on keywords
97 97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 98 ' outgoing push tip verify convert email glog')
99 99
100 100 # hg commands that trigger expansion only when writing to working dir,
101 101 # not when reading filelog, and unexpand when reading from working dir
102 102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
103 103
104 104 # names of extensions using dorecord
105 105 recordextensions = 'record'
106 106
107 107 colortable = {
108 108 'kwfiles.enabled': 'green bold',
109 109 'kwfiles.deleted': 'cyan bold underline',
110 110 'kwfiles.enabledunknown': 'green',
111 111 'kwfiles.ignored': 'bold',
112 112 'kwfiles.ignoredunknown': 'none'
113 113 }
114 114
115 115 # date like in cvs' $Date
116 116 def utcdate(text):
117 117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
118 118 '''
119 119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
120 120 # date like in svn's $Date
121 121 def svnisodate(text):
122 122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
123 123 +0200 (Tue, 18 Aug 2009)".
124 124 '''
125 125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
126 126 # date like in svn's $Id
127 127 def svnutcdate(text):
128 128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
129 129 11:00:13Z".
130 130 '''
131 131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
132 132
133 133 templatefilters.filters.update({'utcdate': utcdate,
134 134 'svnisodate': svnisodate,
135 135 'svnutcdate': svnutcdate})
136 136
137 137 # make keyword tools accessible
138 138 kwtools = {'templater': None, 'hgcmd': ''}
139 139
140 140 def _defaultkwmaps(ui):
141 141 '''Returns default keywordmaps according to keywordset configuration.'''
142 142 templates = {
143 143 'Revision': '{node|short}',
144 144 'Author': '{author|user}',
145 145 }
146 146 kwsets = ({
147 147 'Date': '{date|utcdate}',
148 148 'RCSfile': '{file|basename},v',
149 149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
150 150 # with hg-keyword
151 151 'Source': '{root}/{file},v',
152 152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
153 153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
154 154 }, {
155 155 'Date': '{date|svnisodate}',
156 156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
157 157 'LastChangedRevision': '{node|short}',
158 158 'LastChangedBy': '{author|user}',
159 159 'LastChangedDate': '{date|svnisodate}',
160 160 })
161 161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
162 162 return templates
163 163
164 164 def _shrinktext(text, subfunc):
165 165 '''Helper for keyword expansion removal in text.
166 166 Depending on subfunc also returns number of substitutions.'''
167 167 return subfunc(r'$\1$', text)
168 168
169 169 def _preselect(wstatus, changed):
170 170 '''Retrieves modfied and added files from a working directory state
171 171 and returns the subset of each contained in given changed files
172 172 retrieved from a change context.'''
173 173 modified, added = wstatus[:2]
174 174 modified = [f for f in modified if f in changed]
175 175 added = [f for f in added if f in changed]
176 176 return modified, added
177 177
178 178
179 179 class kwtemplater(object):
180 180 '''
181 181 Sets up keyword templates, corresponding keyword regex, and
182 182 provides keyword substitution functions.
183 183 '''
184 184
185 185 def __init__(self, ui, repo, inc, exc):
186 186 self.ui = ui
187 187 self.repo = repo
188 188 self.match = match.match(repo.root, '', [], inc, exc)
189 189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 190 self.record = False
191 191
192 192 kwmaps = self.ui.configitems('keywordmaps')
193 193 if kwmaps: # override default templates
194 194 self.templates = dict((k, templater.parsestring(v, False))
195 195 for k, v in kwmaps)
196 196 else:
197 197 self.templates = _defaultkwmaps(self.ui)
198 198
199 199 @util.propertycache
200 200 def escape(self):
201 201 '''Returns bar-separated and escaped keywords.'''
202 202 return '|'.join(map(re.escape, self.templates.keys()))
203 203
204 204 @util.propertycache
205 205 def rekw(self):
206 206 '''Returns regex for unexpanded keywords.'''
207 207 return re.compile(r'\$(%s)\$' % self.escape)
208 208
209 209 @util.propertycache
210 210 def rekwexp(self):
211 211 '''Returns regex for expanded keywords.'''
212 212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213 213
214 214 def substitute(self, data, path, ctx, subfunc):
215 215 '''Replaces keywords in data with expanded template.'''
216 216 def kwsub(mobj):
217 217 kw = mobj.group(1)
218 218 ct = cmdutil.changeset_templater(self.ui, self.repo,
219 219 False, None, '', False)
220 220 ct.use_template(self.templates[kw])
221 221 self.ui.pushbuffer()
222 222 ct.show(ctx, root=self.repo.root, file=path)
223 223 ekw = templatefilters.firstline(self.ui.popbuffer())
224 224 return '$%s: %s $' % (kw, ekw)
225 225 return subfunc(kwsub, data)
226 226
227 227 def linkctx(self, path, fileid):
228 228 '''Similar to filelog.linkrev, but returns a changectx.'''
229 229 return self.repo.filectx(path, fileid=fileid).changectx()
230 230
231 231 def expand(self, path, node, data):
232 232 '''Returns data with keywords expanded.'''
233 233 if not self.restrict and self.match(path) and not util.binary(data):
234 234 ctx = self.linkctx(path, node)
235 235 return self.substitute(data, path, ctx, self.rekw.sub)
236 236 return data
237 237
238 238 def iskwfile(self, cand, ctx):
239 239 '''Returns subset of candidates which are configured for keyword
240 240 expansion are not symbolic links.'''
241 241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
242 242
243 243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
244 244 '''Overwrites selected files expanding/shrinking keywords.'''
245 245 if self.restrict or lookup or self.record: # exclude kw_copy
246 246 candidates = self.iskwfile(candidates, ctx)
247 247 if not candidates:
248 248 return
249 249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
250 250 if self.restrict or expand and lookup:
251 251 mf = ctx.manifest()
252 252 lctx = ctx
253 253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
254 254 msg = (expand and _('overwriting %s expanding keywords\n')
255 255 or _('overwriting %s shrinking keywords\n'))
256 256 for f in candidates:
257 257 if self.restrict:
258 258 data = self.repo.file(f).read(mf[f])
259 259 else:
260 260 data = self.repo.wread(f)
261 261 if util.binary(data):
262 262 continue
263 263 if expand:
264 264 if lookup:
265 265 lctx = self.linkctx(f, mf[f])
266 266 data, found = self.substitute(data, f, lctx, re_kw.subn)
267 267 elif self.restrict:
268 268 found = re_kw.search(data)
269 269 else:
270 270 data, found = _shrinktext(data, re_kw.subn)
271 271 if found:
272 272 self.ui.note(msg % f)
273 273 self.repo.wwrite(f, data, ctx.flags(f))
274 274 if kwcmd:
275 275 self.repo.dirstate.normal(f)
276 276 elif self.record:
277 277 self.repo.dirstate.normallookup(f)
278 278
279 279 def shrink(self, fname, text):
280 280 '''Returns text with all keyword substitutions removed.'''
281 281 if self.match(fname) and not util.binary(text):
282 282 return _shrinktext(text, self.rekwexp.sub)
283 283 return text
284 284
285 285 def shrinklines(self, fname, lines):
286 286 '''Returns lines with keyword substitutions removed.'''
287 287 if self.match(fname):
288 288 text = ''.join(lines)
289 289 if not util.binary(text):
290 290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
291 291 return lines
292 292
293 293 def wread(self, fname, data):
294 294 '''If in restricted mode returns data read from wdir with
295 295 keyword substitutions removed.'''
296 296 return self.restrict and self.shrink(fname, data) or data
297 297
298 298 class kwfilelog(filelog.filelog):
299 299 '''
300 300 Subclass of filelog to hook into its read, add, cmp methods.
301 301 Keywords are "stored" unexpanded, and processed on reading.
302 302 '''
303 303 def __init__(self, opener, kwt, path):
304 304 super(kwfilelog, self).__init__(opener, path)
305 305 self.kwt = kwt
306 306 self.path = path
307 307
308 308 def read(self, node):
309 309 '''Expands keywords when reading filelog.'''
310 310 data = super(kwfilelog, self).read(node)
311 311 if self.renamed(node):
312 312 return data
313 313 return self.kwt.expand(self.path, node, data)
314 314
315 315 def add(self, text, meta, tr, link, p1=None, p2=None):
316 316 '''Removes keyword substitutions when adding to filelog.'''
317 317 text = self.kwt.shrink(self.path, text)
318 318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
319 319
320 320 def cmp(self, node, text):
321 321 '''Removes keyword substitutions for comparison.'''
322 322 text = self.kwt.shrink(self.path, text)
323 323 return super(kwfilelog, self).cmp(node, text)
324 324
325 325 def _status(ui, repo, kwt, *pats, **opts):
326 326 '''Bails out if [keyword] configuration is not active.
327 327 Returns status of working directory.'''
328 328 if kwt:
329 329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
330 330 unknown=opts.get('unknown') or opts.get('all'))
331 331 if ui.configitems('keyword'):
332 332 raise util.Abort(_('[keyword] patterns cannot match'))
333 333 raise util.Abort(_('no [keyword] patterns configured'))
334 334
335 335 def _kwfwrite(ui, repo, expand, *pats, **opts):
336 336 '''Selects files and passes them to kwtemplater.overwrite.'''
337 337 wctx = repo[None]
338 338 if len(wctx.parents()) > 1:
339 339 raise util.Abort(_('outstanding uncommitted merge'))
340 340 kwt = kwtools['templater']
341 341 wlock = repo.wlock()
342 342 try:
343 343 status = _status(ui, repo, kwt, *pats, **opts)
344 344 modified, added, removed, deleted, unknown, ignored, clean = status
345 345 if modified or added or removed or deleted:
346 346 raise util.Abort(_('outstanding uncommitted changes'))
347 347 kwt.overwrite(wctx, clean, True, expand)
348 348 finally:
349 349 wlock.release()
350 350
351 351 @command('kwdemo',
352 352 [('d', 'default', None, _('show default keyword template maps')),
353 353 ('f', 'rcfile', '',
354 354 _('read maps from rcfile'), _('FILE'))],
355 355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
356 356 def demo(ui, repo, *args, **opts):
357 357 '''print [keywordmaps] configuration and an expansion example
358 358
359 359 Show current, custom, or default keyword template maps and their
360 360 expansions.
361 361
362 362 Extend the current configuration by specifying maps as arguments
363 363 and using -f/--rcfile to source an external hgrc file.
364 364
365 365 Use -d/--default to disable current configuration.
366 366
367 367 See :hg:`help templates` for information on templates and filters.
368 368 '''
369 369 def demoitems(section, items):
370 370 ui.write('[%s]\n' % section)
371 371 for k, v in sorted(items):
372 372 ui.write('%s = %s\n' % (k, v))
373 373
374 374 fn = 'demo.txt'
375 375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
376 376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
377 377 repo = localrepo.localrepository(ui, tmpdir, True)
378 378 ui.setconfig('keyword', fn, '')
379 379 svn = ui.configbool('keywordset', 'svn')
380 380 # explicitly set keywordset for demo output
381 381 ui.setconfig('keywordset', 'svn', svn)
382 382
383 383 uikwmaps = ui.configitems('keywordmaps')
384 384 if args or opts.get('rcfile'):
385 385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
386 386 if uikwmaps:
387 387 ui.status(_('\textending current template maps\n'))
388 388 if opts.get('default') or not uikwmaps:
389 389 if svn:
390 390 ui.status(_('\toverriding default svn keywordset\n'))
391 391 else:
392 392 ui.status(_('\toverriding default cvs keywordset\n'))
393 393 if opts.get('rcfile'):
394 394 ui.readconfig(opts.get('rcfile'))
395 395 if args:
396 396 # simulate hgrc parsing
397 397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
398 398 fp = repo.opener('hgrc', 'w')
399 399 fp.writelines(rcmaps)
400 400 fp.close()
401 401 ui.readconfig(repo.join('hgrc'))
402 402 kwmaps = dict(ui.configitems('keywordmaps'))
403 403 elif opts.get('default'):
404 404 if svn:
405 405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
406 406 else:
407 407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
408 408 kwmaps = _defaultkwmaps(ui)
409 409 if uikwmaps:
410 410 ui.status(_('\tdisabling current template maps\n'))
411 411 for k, v in kwmaps.iteritems():
412 412 ui.setconfig('keywordmaps', k, v)
413 413 else:
414 414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
415 415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
416 416
417 417 uisetup(ui)
418 418 reposetup(ui, repo)
419 419 ui.write('[extensions]\nkeyword =\n')
420 420 demoitems('keyword', ui.configitems('keyword'))
421 421 demoitems('keywordset', ui.configitems('keywordset'))
422 422 demoitems('keywordmaps', kwmaps.iteritems())
423 423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
424 424 repo.wopener.write(fn, keywords)
425 425 repo[None].add([fn])
426 426 ui.note(_('\nkeywords written to %s:\n') % fn)
427 427 ui.note(keywords)
428 428 repo.dirstate.setbranch('demobranch')
429 429 for name, cmd in ui.configitems('hooks'):
430 430 if name.split('.', 1)[0].find('commit') > -1:
431 431 repo.ui.setconfig('hooks', name, '')
432 432 msg = _('hg keyword configuration and expansion example')
433 433 ui.note("hg ci -m '%s'\n" % msg)
434 434 repo.commit(text=msg)
435 435 ui.status(_('\n\tkeywords expanded\n'))
436 436 ui.write(repo.wread(fn))
437 437 shutil.rmtree(tmpdir, ignore_errors=True)
438 438
439 439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
440 440 def expand(ui, repo, *pats, **opts):
441 441 '''expand keywords in the working directory
442 442
443 443 Run after (re)enabling keyword expansion.
444 444
445 445 kwexpand refuses to run if given files contain local changes.
446 446 '''
447 447 # 3rd argument sets expansion to True
448 448 _kwfwrite(ui, repo, True, *pats, **opts)
449 449
450 450 @command('kwfiles',
451 451 [('A', 'all', None, _('show keyword status flags of all files')),
452 452 ('i', 'ignore', None, _('show files excluded from expansion')),
453 453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
454 454 ] + commands.walkopts,
455 455 _('hg kwfiles [OPTION]... [FILE]...'))
456 456 def files(ui, repo, *pats, **opts):
457 457 '''show files configured for keyword expansion
458 458
459 459 List which files in the working directory are matched by the
460 460 [keyword] configuration patterns.
461 461
462 462 Useful to prevent inadvertent keyword expansion and to speed up
463 463 execution by including only files that are actual candidates for
464 464 expansion.
465 465
466 466 See :hg:`help keyword` on how to construct patterns both for
467 467 inclusion and exclusion of files.
468 468
469 469 With -A/--all and -v/--verbose the codes used to show the status
470 470 of files are::
471 471
472 472 K = keyword expansion candidate
473 473 k = keyword expansion candidate (not tracked)
474 474 I = ignored
475 475 i = ignored (not tracked)
476 476 '''
477 477 kwt = kwtools['templater']
478 478 status = _status(ui, repo, kwt, *pats, **opts)
479 479 cwd = pats and repo.getcwd() or ''
480 480 modified, added, removed, deleted, unknown, ignored, clean = status
481 481 files = []
482 482 if not opts.get('unknown') or opts.get('all'):
483 483 files = sorted(modified + added + clean)
484 484 wctx = repo[None]
485 485 kwfiles = kwt.iskwfile(files, wctx)
486 486 kwdeleted = kwt.iskwfile(deleted, wctx)
487 487 kwunknown = kwt.iskwfile(unknown, wctx)
488 488 if not opts.get('ignore') or opts.get('all'):
489 489 showfiles = kwfiles, kwdeleted, kwunknown
490 490 else:
491 491 showfiles = [], [], []
492 492 if opts.get('all') or opts.get('ignore'):
493 493 showfiles += ([f for f in files if f not in kwfiles],
494 494 [f for f in unknown if f not in kwunknown])
495 495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
496 496 kwstates = zip('K!kIi', showfiles, kwlabels)
497 497 for char, filenames, kwstate in kwstates:
498 498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
499 499 for f in filenames:
500 500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
501 501
502 502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
503 503 def shrink(ui, repo, *pats, **opts):
504 504 '''revert expanded keywords in the working directory
505 505
506 506 Must be run before changing/disabling active keywords.
507 507
508 508 kwshrink refuses to run if given files contain local changes.
509 509 '''
510 510 # 3rd argument sets expansion to False
511 511 _kwfwrite(ui, repo, False, *pats, **opts)
512 512
513 513
514 514 def uisetup(ui):
515 515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
516 516
517 517 def kwdispatch_parse(orig, ui, args):
518 518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
519 519 cmd, func, args, options, cmdoptions = orig(ui, args)
520 520 kwtools['hgcmd'] = cmd
521 521 return cmd, func, args, options, cmdoptions
522 522
523 523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
524 524
525 525 def reposetup(ui, repo):
526 526 '''Sets up repo as kwrepo for keyword substitution.
527 527 Overrides file method to return kwfilelog instead of filelog
528 528 if file matches user configuration.
529 529 Wraps commit to overwrite configured files with updated
530 530 keyword substitutions.
531 531 Monkeypatches patch and webcommands.'''
532 532
533 533 try:
534 534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
535 535 or '.hg' in util.splitpath(repo.root)
536 536 or repo._url.startswith('bundle:')):
537 537 return
538 538 except AttributeError:
539 539 pass
540 540
541 541 inc, exc = [], ['.hg*']
542 542 for pat, opt in ui.configitems('keyword'):
543 543 if opt != 'ignore':
544 544 inc.append(pat)
545 545 else:
546 546 exc.append(pat)
547 547 if not inc:
548 548 return
549 549
550 550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
551 551
552 552 class kwrepo(repo.__class__):
553 553 def file(self, f):
554 554 if f[0] == '/':
555 555 f = f[1:]
556 556 return kwfilelog(self.sopener, kwt, f)
557 557
558 558 def wread(self, filename):
559 559 data = super(kwrepo, self).wread(filename)
560 560 return kwt.wread(filename, data)
561 561
562 562 def commit(self, *args, **opts):
563 563 # use custom commitctx for user commands
564 564 # other extensions can still wrap repo.commitctx directly
565 565 self.commitctx = self.kwcommitctx
566 566 try:
567 567 return super(kwrepo, self).commit(*args, **opts)
568 568 finally:
569 569 del self.commitctx
570 570
571 571 def kwcommitctx(self, ctx, error=False):
572 572 n = super(kwrepo, self).commitctx(ctx, error)
573 573 # no lock needed, only called from repo.commit() which already locks
574 574 if not kwt.record:
575 575 restrict = kwt.restrict
576 576 kwt.restrict = True
577 577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
578 578 False, True)
579 579 kwt.restrict = restrict
580 580 return n
581 581
582 582 def rollback(self, dryrun=False):
583 583 wlock = self.wlock()
584 584 try:
585 585 if not dryrun:
586 586 changed = self['.'].files()
587 587 ret = super(kwrepo, self).rollback(dryrun)
588 588 if not dryrun:
589 589 ctx = self['.']
590 590 modified, added = _preselect(self[None].status(), changed)
591 591 kwt.overwrite(ctx, modified, True, True)
592 592 kwt.overwrite(ctx, added, True, False)
593 593 return ret
594 594 finally:
595 595 wlock.release()
596 596
597 597 # monkeypatches
598 def kwpatchfile_init(orig, self, ui, fname, backend,
598 def kwpatchfile_init(orig, self, ui, fname, backend, mode,
599 599 missing=False, eolmode=None):
600 600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 601 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, fname, backend, missing, eolmode)
602 orig(self, ui, fname, backend, mode, missing, eolmode)
603 603 # shrink keywords read from working dir
604 604 self.lines = kwt.shrinklines(self.fname, self.lines)
605 605
606 606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
607 607 opts=None, prefix=''):
608 608 '''Monkeypatch patch.diff to avoid expansion.'''
609 609 kwt.restrict = True
610 610 return orig(repo, node1, node2, match, changes, opts, prefix)
611 611
612 612 def kwweb_skip(orig, web, req, tmpl):
613 613 '''Wraps webcommands.x turning off keyword expansion.'''
614 614 kwt.match = util.never
615 615 return orig(web, req, tmpl)
616 616
617 617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
618 618 '''Wraps cmdutil.copy so that copy/rename destinations do not
619 619 contain expanded keywords.
620 620 Note that the source of a regular file destination may also be a
621 621 symlink:
622 622 hg cp sym x -> x is symlink
623 623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
624 624 For the latter we have to follow the symlink to find out whether its
625 625 target is configured for expansion and we therefore must unexpand the
626 626 keywords in the destination.'''
627 627 orig(ui, repo, pats, opts, rename)
628 628 if opts.get('dry_run'):
629 629 return
630 630 wctx = repo[None]
631 631 cwd = repo.getcwd()
632 632
633 633 def haskwsource(dest):
634 634 '''Returns true if dest is a regular file and configured for
635 635 expansion or a symlink which points to a file configured for
636 636 expansion. '''
637 637 source = repo.dirstate.copied(dest)
638 638 if 'l' in wctx.flags(source):
639 639 source = scmutil.canonpath(repo.root, cwd,
640 640 os.path.realpath(source))
641 641 return kwt.match(source)
642 642
643 643 candidates = [f for f in repo.dirstate.copies() if
644 644 not 'l' in wctx.flags(f) and haskwsource(f)]
645 645 kwt.overwrite(wctx, candidates, False, False)
646 646
647 647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
648 648 '''Wraps record.dorecord expanding keywords after recording.'''
649 649 wlock = repo.wlock()
650 650 try:
651 651 # record returns 0 even when nothing has changed
652 652 # therefore compare nodes before and after
653 653 kwt.record = True
654 654 ctx = repo['.']
655 655 wstatus = repo[None].status()
656 656 ret = orig(ui, repo, commitfunc, *pats, **opts)
657 657 recctx = repo['.']
658 658 if ctx != recctx:
659 659 modified, added = _preselect(wstatus, recctx.files())
660 660 kwt.restrict = False
661 661 kwt.overwrite(recctx, modified, False, True)
662 662 kwt.overwrite(recctx, added, False, True, True)
663 663 kwt.restrict = True
664 664 return ret
665 665 finally:
666 666 wlock.release()
667 667
668 668 def kwfilectx_cmp(orig, self, fctx):
669 669 # keyword affects data size, comparing wdir and filelog size does
670 670 # not make sense
671 671 if (fctx._filerev is None and
672 672 (self._repo._encodefilterpats or
673 673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
674 674 self.size() == fctx.size()):
675 675 return self._filelog.cmp(self._filenode, fctx.data())
676 676 return True
677 677
678 678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
679 679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
680 680 extensions.wrapfunction(patch, 'diff', kw_diff)
681 681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
682 682 for c in 'annotate changeset rev filediff diff'.split():
683 683 extensions.wrapfunction(webcommands, c, kwweb_skip)
684 684 for name in recordextensions.split():
685 685 try:
686 686 record = extensions.find(name)
687 687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
688 688 except KeyError:
689 689 pass
690 690
691 691 repo.__class__ = kwrepo
@@ -1,1749 +1,1757
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re
10 10 import tempfile, zlib
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15 15
16 16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 17
18 18 class PatchError(Exception):
19 19 pass
20 20
21 21
22 22 # public functions
23 23
24 24 def split(stream):
25 25 '''return an iterator of individual patches from a stream'''
26 26 def isheader(line, inheader):
27 27 if inheader and line[0] in (' ', '\t'):
28 28 # continuation
29 29 return True
30 30 if line[0] in (' ', '-', '+'):
31 31 # diff line - don't check for header pattern in there
32 32 return False
33 33 l = line.split(': ', 1)
34 34 return len(l) == 2 and ' ' not in l[0]
35 35
36 36 def chunk(lines):
37 37 return cStringIO.StringIO(''.join(lines))
38 38
39 39 def hgsplit(stream, cur):
40 40 inheader = True
41 41
42 42 for line in stream:
43 43 if not line.strip():
44 44 inheader = False
45 45 if not inheader and line.startswith('# HG changeset patch'):
46 46 yield chunk(cur)
47 47 cur = []
48 48 inheader = True
49 49
50 50 cur.append(line)
51 51
52 52 if cur:
53 53 yield chunk(cur)
54 54
55 55 def mboxsplit(stream, cur):
56 56 for line in stream:
57 57 if line.startswith('From '):
58 58 for c in split(chunk(cur[1:])):
59 59 yield c
60 60 cur = []
61 61
62 62 cur.append(line)
63 63
64 64 if cur:
65 65 for c in split(chunk(cur[1:])):
66 66 yield c
67 67
68 68 def mimesplit(stream, cur):
69 69 def msgfp(m):
70 70 fp = cStringIO.StringIO()
71 71 g = email.Generator.Generator(fp, mangle_from_=False)
72 72 g.flatten(m)
73 73 fp.seek(0)
74 74 return fp
75 75
76 76 for line in stream:
77 77 cur.append(line)
78 78 c = chunk(cur)
79 79
80 80 m = email.Parser.Parser().parse(c)
81 81 if not m.is_multipart():
82 82 yield msgfp(m)
83 83 else:
84 84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 85 for part in m.walk():
86 86 ct = part.get_content_type()
87 87 if ct not in ok_types:
88 88 continue
89 89 yield msgfp(part)
90 90
91 91 def headersplit(stream, cur):
92 92 inheader = False
93 93
94 94 for line in stream:
95 95 if not inheader and isheader(line, inheader):
96 96 yield chunk(cur)
97 97 cur = []
98 98 inheader = True
99 99 if inheader and not isheader(line, inheader):
100 100 inheader = False
101 101
102 102 cur.append(line)
103 103
104 104 if cur:
105 105 yield chunk(cur)
106 106
107 107 def remainder(cur):
108 108 yield chunk(cur)
109 109
110 110 class fiter(object):
111 111 def __init__(self, fp):
112 112 self.fp = fp
113 113
114 114 def __iter__(self):
115 115 return self
116 116
117 117 def next(self):
118 118 l = self.fp.readline()
119 119 if not l:
120 120 raise StopIteration
121 121 return l
122 122
123 123 inheader = False
124 124 cur = []
125 125
126 126 mimeheaders = ['content-type']
127 127
128 128 if not hasattr(stream, 'next'):
129 129 # http responses, for example, have readline but not next
130 130 stream = fiter(stream)
131 131
132 132 for line in stream:
133 133 cur.append(line)
134 134 if line.startswith('# HG changeset patch'):
135 135 return hgsplit(stream, cur)
136 136 elif line.startswith('From '):
137 137 return mboxsplit(stream, cur)
138 138 elif isheader(line, inheader):
139 139 inheader = True
140 140 if line.split(':', 1)[0].lower() in mimeheaders:
141 141 # let email parser handle this
142 142 return mimesplit(stream, cur)
143 143 elif line.startswith('--- ') and inheader:
144 144 # No evil headers seen by diff start, split by hand
145 145 return headersplit(stream, cur)
146 146 # Not enough info, keep reading
147 147
148 148 # if we are here, we have a very plain patch
149 149 return remainder(cur)
150 150
151 151 def extract(ui, fileobj):
152 152 '''extract patch from data read from fileobj.
153 153
154 154 patch can be a normal patch or contained in an email message.
155 155
156 156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 157 Any item in the returned tuple can be None. If filename is None,
158 158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 159
160 160 # attempt to detect the start of a patch
161 161 # (this heuristic is borrowed from quilt)
162 162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 164 r'---[ \t].*?^\+\+\+[ \t]|'
165 165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 166
167 167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 168 tmpfp = os.fdopen(fd, 'w')
169 169 try:
170 170 msg = email.Parser.Parser().parse(fileobj)
171 171
172 172 subject = msg['Subject']
173 173 user = msg['From']
174 174 if not subject and not user:
175 175 # Not an email, restore parsed headers if any
176 176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 177
178 178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 179 # should try to parse msg['Date']
180 180 date = None
181 181 nodeid = None
182 182 branch = None
183 183 parents = []
184 184
185 185 if subject:
186 186 if subject.startswith('[PATCH'):
187 187 pend = subject.find(']')
188 188 if pend >= 0:
189 189 subject = subject[pend + 1:].lstrip()
190 190 subject = subject.replace('\n\t', ' ')
191 191 ui.debug('Subject: %s\n' % subject)
192 192 if user:
193 193 ui.debug('From: %s\n' % user)
194 194 diffs_seen = 0
195 195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 196 message = ''
197 197 for part in msg.walk():
198 198 content_type = part.get_content_type()
199 199 ui.debug('Content-Type: %s\n' % content_type)
200 200 if content_type not in ok_types:
201 201 continue
202 202 payload = part.get_payload(decode=True)
203 203 m = diffre.search(payload)
204 204 if m:
205 205 hgpatch = False
206 206 hgpatchheader = False
207 207 ignoretext = False
208 208
209 209 ui.debug('found patch at byte %d\n' % m.start(0))
210 210 diffs_seen += 1
211 211 cfp = cStringIO.StringIO()
212 212 for line in payload[:m.start(0)].splitlines():
213 213 if line.startswith('# HG changeset patch') and not hgpatch:
214 214 ui.debug('patch generated by hg export\n')
215 215 hgpatch = True
216 216 hgpatchheader = True
217 217 # drop earlier commit message content
218 218 cfp.seek(0)
219 219 cfp.truncate()
220 220 subject = None
221 221 elif hgpatchheader:
222 222 if line.startswith('# User '):
223 223 user = line[7:]
224 224 ui.debug('From: %s\n' % user)
225 225 elif line.startswith("# Date "):
226 226 date = line[7:]
227 227 elif line.startswith("# Branch "):
228 228 branch = line[9:]
229 229 elif line.startswith("# Node ID "):
230 230 nodeid = line[10:]
231 231 elif line.startswith("# Parent "):
232 232 parents.append(line[10:])
233 233 elif not line.startswith("# "):
234 234 hgpatchheader = False
235 235 elif line == '---' and gitsendmail:
236 236 ignoretext = True
237 237 if not hgpatchheader and not ignoretext:
238 238 cfp.write(line)
239 239 cfp.write('\n')
240 240 message = cfp.getvalue()
241 241 if tmpfp:
242 242 tmpfp.write(payload)
243 243 if not payload.endswith('\n'):
244 244 tmpfp.write('\n')
245 245 elif not diffs_seen and message and content_type == 'text/plain':
246 246 message += '\n' + payload
247 247 except:
248 248 tmpfp.close()
249 249 os.unlink(tmpname)
250 250 raise
251 251
252 252 if subject and not message.startswith(subject):
253 253 message = '%s\n%s' % (subject, message)
254 254 tmpfp.close()
255 255 if not diffs_seen:
256 256 os.unlink(tmpname)
257 257 return None, message, user, date, branch, None, None, None
258 258 p1 = parents and parents.pop(0) or None
259 259 p2 = parents and parents.pop(0) or None
260 260 return tmpname, message, user, date, branch, nodeid, p1, p2
261 261
262 262 class patchmeta(object):
263 263 """Patched file metadata
264 264
265 265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 269 'islink' is True if the file is a symlink and 'isexec' is True if
270 270 the file is executable. Otherwise, 'mode' is None.
271 271 """
272 272 def __init__(self, path):
273 273 self.path = path
274 274 self.oldpath = None
275 275 self.mode = None
276 276 self.op = 'MODIFY'
277 277 self.binary = False
278 278
279 279 def setmode(self, mode):
280 280 islink = mode & 020000
281 281 isexec = mode & 0100
282 282 self.mode = (islink, isexec)
283 283
284 284 def __repr__(self):
285 285 return "<patchmeta %s %r>" % (self.op, self.path)
286 286
287 287 def readgitpatch(lr):
288 288 """extract git-style metadata about patches from <patchname>"""
289 289
290 290 # Filter patch for git information
291 291 gp = None
292 292 gitpatches = []
293 293 for line in lr:
294 294 line = line.rstrip(' \r\n')
295 295 if line.startswith('diff --git'):
296 296 m = gitre.match(line)
297 297 if m:
298 298 if gp:
299 299 gitpatches.append(gp)
300 300 dst = m.group(2)
301 301 gp = patchmeta(dst)
302 302 elif gp:
303 303 if line.startswith('--- '):
304 304 gitpatches.append(gp)
305 305 gp = None
306 306 continue
307 307 if line.startswith('rename from '):
308 308 gp.op = 'RENAME'
309 309 gp.oldpath = line[12:]
310 310 elif line.startswith('rename to '):
311 311 gp.path = line[10:]
312 312 elif line.startswith('copy from '):
313 313 gp.op = 'COPY'
314 314 gp.oldpath = line[10:]
315 315 elif line.startswith('copy to '):
316 316 gp.path = line[8:]
317 317 elif line.startswith('deleted file'):
318 318 gp.op = 'DELETE'
319 319 elif line.startswith('new file mode '):
320 320 gp.op = 'ADD'
321 321 gp.setmode(int(line[-6:], 8))
322 322 elif line.startswith('new mode '):
323 323 gp.setmode(int(line[-6:], 8))
324 324 elif line.startswith('GIT binary patch'):
325 325 gp.binary = True
326 326 if gp:
327 327 gitpatches.append(gp)
328 328
329 329 return gitpatches
330 330
331 331 class linereader(object):
332 332 # simple class to allow pushing lines back into the input stream
333 333 def __init__(self, fp, textmode=False):
334 334 self.fp = fp
335 335 self.buf = []
336 336 self.textmode = textmode
337 337 self.eol = None
338 338
339 339 def push(self, line):
340 340 if line is not None:
341 341 self.buf.append(line)
342 342
343 343 def readline(self):
344 344 if self.buf:
345 345 l = self.buf[0]
346 346 del self.buf[0]
347 347 return l
348 348 l = self.fp.readline()
349 349 if not self.eol:
350 350 if l.endswith('\r\n'):
351 351 self.eol = '\r\n'
352 352 elif l.endswith('\n'):
353 353 self.eol = '\n'
354 354 if self.textmode and l.endswith('\r\n'):
355 355 l = l[:-2] + '\n'
356 356 return l
357 357
358 358 def __iter__(self):
359 359 while 1:
360 360 l = self.readline()
361 361 if not l:
362 362 break
363 363 yield l
364 364
365 365 class abstractbackend(object):
366 366 def __init__(self, ui):
367 367 self.ui = ui
368 368
369 369 def readlines(self, fname):
370 370 """Return target file lines, or its content as a single line
371 371 for symlinks.
372 372 """
373 373 raise NotImplementedError
374 374
375 def writelines(self, fname, lines):
376 """Write lines to target file."""
375 def writelines(self, fname, lines, mode):
376 """Write lines to target file. mode is a (islink, isexec)
377 tuple, or None if there is no mode information.
378 """
377 379 raise NotImplementedError
378 380
379 381 def unlink(self, fname):
380 382 """Unlink target file."""
381 383 raise NotImplementedError
382 384
383 385 def writerej(self, fname, failed, total, lines):
384 386 """Write rejected lines for fname. total is the number of hunks
385 387 which failed to apply and total the total number of hunks for this
386 388 files.
387 389 """
388 390 pass
389 391
390 392 def copy(self, src, dst):
391 393 """Copy src file into dst file. Create intermediate directories if
392 394 necessary. Files are specified relatively to the patching base
393 395 directory.
394 396 """
395 397 raise NotImplementedError
396 398
397 399 def exists(self, fname):
398 400 raise NotImplementedError
399 401
402 def setmode(self, fname, islink, isexec):
403 """Change target file mode."""
404 raise NotImplementedError
405
400 406 class fsbackend(abstractbackend):
401 407 def __init__(self, ui, basedir):
402 408 super(fsbackend, self).__init__(ui)
403 409 self.opener = scmutil.opener(basedir)
404 410
405 411 def _join(self, f):
406 412 return os.path.join(self.opener.base, f)
407 413
408 414 def readlines(self, fname):
409 415 if os.path.islink(self._join(fname)):
410 416 return [os.readlink(self._join(fname))]
411 417 fp = self.opener(fname, 'r')
412 418 try:
413 419 return list(fp)
414 420 finally:
415 421 fp.close()
416 422
417 def writelines(self, fname, lines):
418 # Ensure supplied data ends in fname, being a regular file or
419 # a symlink. _updatedir will -too magically- take care
420 # of setting it to the proper type afterwards.
421 st_mode = None
422 islink = os.path.islink(self._join(fname))
423 if islink:
424 fp = cStringIO.StringIO()
425 else:
423 def writelines(self, fname, lines, mode):
424 if not mode:
425 # Preserve mode information
426 isexec, islink = False, False
426 427 try:
427 st_mode = os.lstat(self._join(fname)).st_mode & 0777
428 isexec = os.lstat(self._join(fname)).st_mode & 0100 != 0
429 islink = os.path.islink(self._join(fname))
428 430 except OSError, e:
429 431 if e.errno != errno.ENOENT:
430 432 raise
431 fp = self.opener(fname, 'w')
432 try:
433 fp.writelines(lines)
434 if islink:
435 self.opener.symlink(fp.getvalue(), fname)
436 if st_mode is not None:
437 os.chmod(self._join(fname), st_mode)
438 finally:
439 fp.close()
433 else:
434 islink, isexec = mode
435 if islink:
436 self.opener.symlink(''.join(lines), fname)
437 else:
438 self.opener(fname, 'w').writelines(lines)
439 if isexec:
440 util.setflags(self._join(fname), False, True)
440 441
441 442 def unlink(self, fname):
442 443 os.unlink(self._join(fname))
443 444
444 445 def writerej(self, fname, failed, total, lines):
445 446 fname = fname + ".rej"
446 447 self.ui.warn(
447 448 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
448 449 (failed, total, fname))
449 450 fp = self.opener(fname, 'w')
450 451 fp.writelines(lines)
451 452 fp.close()
452 453
453 454 def copy(self, src, dst):
454 455 basedir = self.opener.base
455 456 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
456 457 for x in [src, dst]]
457 458 if os.path.lexists(absdst):
458 459 raise util.Abort(_("cannot create %s: destination already exists")
459 460 % dst)
460 461 dstdir = os.path.dirname(absdst)
461 462 if dstdir and not os.path.isdir(dstdir):
462 463 try:
463 464 os.makedirs(dstdir)
464 465 except IOError:
465 466 raise util.Abort(
466 467 _("cannot create %s: unable to create destination directory")
467 468 % dst)
468 469 util.copyfile(abssrc, absdst)
469 470
470 471 def exists(self, fname):
471 472 return os.path.lexists(self._join(fname))
472 473
474 def setmode(self, fname, islink, isexec):
475 util.setflags(self._join(fname), islink, isexec)
476
473 477 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
474 478 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
475 479 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
476 480 eolmodes = ['strict', 'crlf', 'lf', 'auto']
477 481
478 482 class patchfile(object):
479 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
483 def __init__(self, ui, fname, backend, mode, missing=False,
484 eolmode='strict'):
480 485 self.fname = fname
481 486 self.eolmode = eolmode
482 487 self.eol = None
483 488 self.backend = backend
484 489 self.ui = ui
485 490 self.lines = []
486 491 self.exists = False
487 492 self.missing = missing
493 self.mode = mode
488 494 if not missing:
489 495 try:
490 496 self.lines = self.backend.readlines(fname)
491 497 if self.lines:
492 498 # Normalize line endings
493 499 if self.lines[0].endswith('\r\n'):
494 500 self.eol = '\r\n'
495 501 elif self.lines[0].endswith('\n'):
496 502 self.eol = '\n'
497 503 if eolmode != 'strict':
498 504 nlines = []
499 505 for l in self.lines:
500 506 if l.endswith('\r\n'):
501 507 l = l[:-2] + '\n'
502 508 nlines.append(l)
503 509 self.lines = nlines
504 510 self.exists = True
505 511 except IOError:
506 512 pass
507 513 else:
508 514 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
509 515
510 516 self.hash = {}
511 517 self.dirty = 0
512 518 self.offset = 0
513 519 self.skew = 0
514 520 self.rej = []
515 521 self.fileprinted = False
516 522 self.printfile(False)
517 523 self.hunks = 0
518 524
519 def writelines(self, fname, lines):
525 def writelines(self, fname, lines, mode):
520 526 if self.eolmode == 'auto':
521 527 eol = self.eol
522 528 elif self.eolmode == 'crlf':
523 529 eol = '\r\n'
524 530 else:
525 531 eol = '\n'
526 532
527 533 if self.eolmode != 'strict' and eol and eol != '\n':
528 534 rawlines = []
529 535 for l in lines:
530 536 if l and l[-1] == '\n':
531 537 l = l[:-1] + eol
532 538 rawlines.append(l)
533 539 lines = rawlines
534 540
535 self.backend.writelines(fname, lines)
541 self.backend.writelines(fname, lines, mode)
536 542
537 543 def printfile(self, warn):
538 544 if self.fileprinted:
539 545 return
540 546 if warn or self.ui.verbose:
541 547 self.fileprinted = True
542 548 s = _("patching file %s\n") % self.fname
543 549 if warn:
544 550 self.ui.warn(s)
545 551 else:
546 552 self.ui.note(s)
547 553
548 554
549 555 def findlines(self, l, linenum):
550 556 # looks through the hash and finds candidate lines. The
551 557 # result is a list of line numbers sorted based on distance
552 558 # from linenum
553 559
554 560 cand = self.hash.get(l, [])
555 561 if len(cand) > 1:
556 562 # resort our list of potentials forward then back.
557 563 cand.sort(key=lambda x: abs(x - linenum))
558 564 return cand
559 565
560 566 def write_rej(self):
561 567 # our rejects are a little different from patch(1). This always
562 568 # creates rejects in the same form as the original patch. A file
563 569 # header is inserted so that you can run the reject through patch again
564 570 # without having to type the filename.
565 571 if not self.rej:
566 572 return
567 573 base = os.path.basename(self.fname)
568 574 lines = ["--- %s\n+++ %s\n" % (base, base)]
569 575 for x in self.rej:
570 576 for l in x.hunk:
571 577 lines.append(l)
572 578 if l[-1] != '\n':
573 579 lines.append("\n\ No newline at end of file\n")
574 580 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
575 581
576 582 def apply(self, h):
577 583 if not h.complete():
578 584 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
579 585 (h.number, h.desc, len(h.a), h.lena, len(h.b),
580 586 h.lenb))
581 587
582 588 self.hunks += 1
583 589
584 590 if self.missing:
585 591 self.rej.append(h)
586 592 return -1
587 593
588 594 if self.exists and h.createfile():
589 595 self.ui.warn(_("file %s already exists\n") % self.fname)
590 596 self.rej.append(h)
591 597 return -1
592 598
593 599 if isinstance(h, binhunk):
594 600 if h.rmfile():
595 601 self.backend.unlink(self.fname)
596 602 else:
597 603 self.lines[:] = h.new()
598 604 self.offset += len(h.new())
599 605 self.dirty = True
600 606 return 0
601 607
602 608 horig = h
603 609 if (self.eolmode in ('crlf', 'lf')
604 610 or self.eolmode == 'auto' and self.eol):
605 611 # If new eols are going to be normalized, then normalize
606 612 # hunk data before patching. Otherwise, preserve input
607 613 # line-endings.
608 614 h = h.getnormalized()
609 615
610 616 # fast case first, no offsets, no fuzz
611 617 old = h.old()
612 618 # patch starts counting at 1 unless we are adding the file
613 619 if h.starta == 0:
614 620 start = 0
615 621 else:
616 622 start = h.starta + self.offset - 1
617 623 orig_start = start
618 624 # if there's skew we want to emit the "(offset %d lines)" even
619 625 # when the hunk cleanly applies at start + skew, so skip the
620 626 # fast case code
621 627 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
622 628 if h.rmfile():
623 629 self.backend.unlink(self.fname)
624 630 else:
625 631 self.lines[start : start + h.lena] = h.new()
626 632 self.offset += h.lenb - h.lena
627 633 self.dirty = True
628 634 return 0
629 635
630 636 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
631 637 self.hash = {}
632 638 for x, s in enumerate(self.lines):
633 639 self.hash.setdefault(s, []).append(x)
634 640 if h.hunk[-1][0] != ' ':
635 641 # if the hunk tried to put something at the bottom of the file
636 642 # override the start line and use eof here
637 643 search_start = len(self.lines)
638 644 else:
639 645 search_start = orig_start + self.skew
640 646
641 647 for fuzzlen in xrange(3):
642 648 for toponly in [True, False]:
643 649 old = h.old(fuzzlen, toponly)
644 650
645 651 cand = self.findlines(old[0][1:], search_start)
646 652 for l in cand:
647 653 if diffhelpers.testhunk(old, self.lines, l) == 0:
648 654 newlines = h.new(fuzzlen, toponly)
649 655 self.lines[l : l + len(old)] = newlines
650 656 self.offset += len(newlines) - len(old)
651 657 self.skew = l - orig_start
652 658 self.dirty = True
653 659 offset = l - orig_start - fuzzlen
654 660 if fuzzlen:
655 661 msg = _("Hunk #%d succeeded at %d "
656 662 "with fuzz %d "
657 663 "(offset %d lines).\n")
658 664 self.printfile(True)
659 665 self.ui.warn(msg %
660 666 (h.number, l + 1, fuzzlen, offset))
661 667 else:
662 668 msg = _("Hunk #%d succeeded at %d "
663 669 "(offset %d lines).\n")
664 670 self.ui.note(msg % (h.number, l + 1, offset))
665 671 return fuzzlen
666 672 self.printfile(True)
667 673 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
668 674 self.rej.append(horig)
669 675 return -1
670 676
671 677 def close(self):
672 678 if self.dirty:
673 self.writelines(self.fname, self.lines)
679 self.writelines(self.fname, self.lines, self.mode)
674 680 self.write_rej()
675 681 return len(self.rej)
676 682
677 683 class hunk(object):
678 684 def __init__(self, desc, num, lr, context, create=False, remove=False):
679 685 self.number = num
680 686 self.desc = desc
681 687 self.hunk = [desc]
682 688 self.a = []
683 689 self.b = []
684 690 self.starta = self.lena = None
685 691 self.startb = self.lenb = None
686 692 if lr is not None:
687 693 if context:
688 694 self.read_context_hunk(lr)
689 695 else:
690 696 self.read_unified_hunk(lr)
691 697 self.create = create
692 698 self.remove = remove and not create
693 699
694 700 def getnormalized(self):
695 701 """Return a copy with line endings normalized to LF."""
696 702
697 703 def normalize(lines):
698 704 nlines = []
699 705 for line in lines:
700 706 if line.endswith('\r\n'):
701 707 line = line[:-2] + '\n'
702 708 nlines.append(line)
703 709 return nlines
704 710
705 711 # Dummy object, it is rebuilt manually
706 712 nh = hunk(self.desc, self.number, None, None, False, False)
707 713 nh.number = self.number
708 714 nh.desc = self.desc
709 715 nh.hunk = self.hunk
710 716 nh.a = normalize(self.a)
711 717 nh.b = normalize(self.b)
712 718 nh.starta = self.starta
713 719 nh.startb = self.startb
714 720 nh.lena = self.lena
715 721 nh.lenb = self.lenb
716 722 nh.create = self.create
717 723 nh.remove = self.remove
718 724 return nh
719 725
720 726 def read_unified_hunk(self, lr):
721 727 m = unidesc.match(self.desc)
722 728 if not m:
723 729 raise PatchError(_("bad hunk #%d") % self.number)
724 730 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
725 731 if self.lena is None:
726 732 self.lena = 1
727 733 else:
728 734 self.lena = int(self.lena)
729 735 if self.lenb is None:
730 736 self.lenb = 1
731 737 else:
732 738 self.lenb = int(self.lenb)
733 739 self.starta = int(self.starta)
734 740 self.startb = int(self.startb)
735 741 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
736 742 # if we hit eof before finishing out the hunk, the last line will
737 743 # be zero length. Lets try to fix it up.
738 744 while len(self.hunk[-1]) == 0:
739 745 del self.hunk[-1]
740 746 del self.a[-1]
741 747 del self.b[-1]
742 748 self.lena -= 1
743 749 self.lenb -= 1
744 750 self._fixnewline(lr)
745 751
746 752 def read_context_hunk(self, lr):
747 753 self.desc = lr.readline()
748 754 m = contextdesc.match(self.desc)
749 755 if not m:
750 756 raise PatchError(_("bad hunk #%d") % self.number)
751 757 foo, self.starta, foo2, aend, foo3 = m.groups()
752 758 self.starta = int(self.starta)
753 759 if aend is None:
754 760 aend = self.starta
755 761 self.lena = int(aend) - self.starta
756 762 if self.starta:
757 763 self.lena += 1
758 764 for x in xrange(self.lena):
759 765 l = lr.readline()
760 766 if l.startswith('---'):
761 767 # lines addition, old block is empty
762 768 lr.push(l)
763 769 break
764 770 s = l[2:]
765 771 if l.startswith('- ') or l.startswith('! '):
766 772 u = '-' + s
767 773 elif l.startswith(' '):
768 774 u = ' ' + s
769 775 else:
770 776 raise PatchError(_("bad hunk #%d old text line %d") %
771 777 (self.number, x))
772 778 self.a.append(u)
773 779 self.hunk.append(u)
774 780
775 781 l = lr.readline()
776 782 if l.startswith('\ '):
777 783 s = self.a[-1][:-1]
778 784 self.a[-1] = s
779 785 self.hunk[-1] = s
780 786 l = lr.readline()
781 787 m = contextdesc.match(l)
782 788 if not m:
783 789 raise PatchError(_("bad hunk #%d") % self.number)
784 790 foo, self.startb, foo2, bend, foo3 = m.groups()
785 791 self.startb = int(self.startb)
786 792 if bend is None:
787 793 bend = self.startb
788 794 self.lenb = int(bend) - self.startb
789 795 if self.startb:
790 796 self.lenb += 1
791 797 hunki = 1
792 798 for x in xrange(self.lenb):
793 799 l = lr.readline()
794 800 if l.startswith('\ '):
795 801 # XXX: the only way to hit this is with an invalid line range.
796 802 # The no-eol marker is not counted in the line range, but I
797 803 # guess there are diff(1) out there which behave differently.
798 804 s = self.b[-1][:-1]
799 805 self.b[-1] = s
800 806 self.hunk[hunki - 1] = s
801 807 continue
802 808 if not l:
803 809 # line deletions, new block is empty and we hit EOF
804 810 lr.push(l)
805 811 break
806 812 s = l[2:]
807 813 if l.startswith('+ ') or l.startswith('! '):
808 814 u = '+' + s
809 815 elif l.startswith(' '):
810 816 u = ' ' + s
811 817 elif len(self.b) == 0:
812 818 # line deletions, new block is empty
813 819 lr.push(l)
814 820 break
815 821 else:
816 822 raise PatchError(_("bad hunk #%d old text line %d") %
817 823 (self.number, x))
818 824 self.b.append(s)
819 825 while True:
820 826 if hunki >= len(self.hunk):
821 827 h = ""
822 828 else:
823 829 h = self.hunk[hunki]
824 830 hunki += 1
825 831 if h == u:
826 832 break
827 833 elif h.startswith('-'):
828 834 continue
829 835 else:
830 836 self.hunk.insert(hunki - 1, u)
831 837 break
832 838
833 839 if not self.a:
834 840 # this happens when lines were only added to the hunk
835 841 for x in self.hunk:
836 842 if x.startswith('-') or x.startswith(' '):
837 843 self.a.append(x)
838 844 if not self.b:
839 845 # this happens when lines were only deleted from the hunk
840 846 for x in self.hunk:
841 847 if x.startswith('+') or x.startswith(' '):
842 848 self.b.append(x[1:])
843 849 # @@ -start,len +start,len @@
844 850 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
845 851 self.startb, self.lenb)
846 852 self.hunk[0] = self.desc
847 853 self._fixnewline(lr)
848 854
849 855 def _fixnewline(self, lr):
850 856 l = lr.readline()
851 857 if l.startswith('\ '):
852 858 diffhelpers.fix_newline(self.hunk, self.a, self.b)
853 859 else:
854 860 lr.push(l)
855 861
856 862 def complete(self):
857 863 return len(self.a) == self.lena and len(self.b) == self.lenb
858 864
859 865 def createfile(self):
860 866 return self.starta == 0 and self.lena == 0 and self.create
861 867
862 868 def rmfile(self):
863 869 return self.startb == 0 and self.lenb == 0 and self.remove
864 870
865 871 def fuzzit(self, l, fuzz, toponly):
866 872 # this removes context lines from the top and bottom of list 'l'. It
867 873 # checks the hunk to make sure only context lines are removed, and then
868 874 # returns a new shortened list of lines.
869 875 fuzz = min(fuzz, len(l)-1)
870 876 if fuzz:
871 877 top = 0
872 878 bot = 0
873 879 hlen = len(self.hunk)
874 880 for x in xrange(hlen - 1):
875 881 # the hunk starts with the @@ line, so use x+1
876 882 if self.hunk[x + 1][0] == ' ':
877 883 top += 1
878 884 else:
879 885 break
880 886 if not toponly:
881 887 for x in xrange(hlen - 1):
882 888 if self.hunk[hlen - bot - 1][0] == ' ':
883 889 bot += 1
884 890 else:
885 891 break
886 892
887 893 # top and bot now count context in the hunk
888 894 # adjust them if either one is short
889 895 context = max(top, bot, 3)
890 896 if bot < context:
891 897 bot = max(0, fuzz - (context - bot))
892 898 else:
893 899 bot = min(fuzz, bot)
894 900 if top < context:
895 901 top = max(0, fuzz - (context - top))
896 902 else:
897 903 top = min(fuzz, top)
898 904
899 905 return l[top:len(l)-bot]
900 906 return l
901 907
902 908 def old(self, fuzz=0, toponly=False):
903 909 return self.fuzzit(self.a, fuzz, toponly)
904 910
905 911 def new(self, fuzz=0, toponly=False):
906 912 return self.fuzzit(self.b, fuzz, toponly)
907 913
908 914 class binhunk:
909 915 'A binary patch file. Only understands literals so far.'
910 916 def __init__(self, gitpatch):
911 917 self.gitpatch = gitpatch
912 918 self.text = None
913 919 self.hunk = ['GIT binary patch\n']
914 920
915 921 def createfile(self):
916 922 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
917 923
918 924 def rmfile(self):
919 925 return self.gitpatch.op == 'DELETE'
920 926
921 927 def complete(self):
922 928 return self.text is not None
923 929
924 930 def new(self):
925 931 return [self.text]
926 932
927 933 def extract(self, lr):
928 934 line = lr.readline()
929 935 self.hunk.append(line)
930 936 while line and not line.startswith('literal '):
931 937 line = lr.readline()
932 938 self.hunk.append(line)
933 939 if not line:
934 940 raise PatchError(_('could not extract binary patch'))
935 941 size = int(line[8:].rstrip())
936 942 dec = []
937 943 line = lr.readline()
938 944 self.hunk.append(line)
939 945 while len(line) > 1:
940 946 l = line[0]
941 947 if l <= 'Z' and l >= 'A':
942 948 l = ord(l) - ord('A') + 1
943 949 else:
944 950 l = ord(l) - ord('a') + 27
945 951 dec.append(base85.b85decode(line[1:-1])[:l])
946 952 line = lr.readline()
947 953 self.hunk.append(line)
948 954 text = zlib.decompress(''.join(dec))
949 955 if len(text) != size:
950 956 raise PatchError(_('binary patch is %d bytes, not %d') %
951 957 len(text), size)
952 958 self.text = text
953 959
954 960 def parsefilename(str):
955 961 # --- filename \t|space stuff
956 962 s = str[4:].rstrip('\r\n')
957 963 i = s.find('\t')
958 964 if i < 0:
959 965 i = s.find(' ')
960 966 if i < 0:
961 967 return s
962 968 return s[:i]
963 969
964 970 def pathstrip(path, strip):
965 971 pathlen = len(path)
966 972 i = 0
967 973 if strip == 0:
968 974 return '', path.rstrip()
969 975 count = strip
970 976 while count > 0:
971 977 i = path.find('/', i)
972 978 if i == -1:
973 979 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
974 980 (count, strip, path))
975 981 i += 1
976 982 # consume '//' in the path
977 983 while i < pathlen - 1 and path[i] == '/':
978 984 i += 1
979 985 count -= 1
980 986 return path[:i].lstrip(), path[i:].rstrip()
981 987
982 988 def selectfile(backend, afile_orig, bfile_orig, hunk, strip):
983 989 nulla = afile_orig == "/dev/null"
984 990 nullb = bfile_orig == "/dev/null"
985 991 abase, afile = pathstrip(afile_orig, strip)
986 992 gooda = not nulla and backend.exists(afile)
987 993 bbase, bfile = pathstrip(bfile_orig, strip)
988 994 if afile == bfile:
989 995 goodb = gooda
990 996 else:
991 997 goodb = not nullb and backend.exists(bfile)
992 998 createfunc = hunk.createfile
993 999 missing = not goodb and not gooda and not createfunc()
994 1000
995 1001 # some diff programs apparently produce patches where the afile is
996 1002 # not /dev/null, but afile starts with bfile
997 1003 abasedir = afile[:afile.rfind('/') + 1]
998 1004 bbasedir = bfile[:bfile.rfind('/') + 1]
999 1005 if missing and abasedir == bbasedir and afile.startswith(bfile):
1000 1006 # this isn't very pretty
1001 1007 hunk.create = True
1002 1008 if createfunc():
1003 1009 missing = False
1004 1010 else:
1005 1011 hunk.create = False
1006 1012
1007 1013 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1008 1014 # diff is between a file and its backup. In this case, the original
1009 1015 # file should be patched (see original mpatch code).
1010 1016 isbackup = (abase == bbase and bfile.startswith(afile))
1011 1017 fname = None
1012 1018 if not missing:
1013 1019 if gooda and goodb:
1014 1020 fname = isbackup and afile or bfile
1015 1021 elif gooda:
1016 1022 fname = afile
1017 1023
1018 1024 if not fname:
1019 1025 if not nullb:
1020 1026 fname = isbackup and afile or bfile
1021 1027 elif not nulla:
1022 1028 fname = afile
1023 1029 else:
1024 1030 raise PatchError(_("undefined source and destination files"))
1025 1031
1026 1032 return fname, missing
1027 1033
1028 1034 def scangitpatch(lr, firstline):
1029 1035 """
1030 1036 Git patches can emit:
1031 1037 - rename a to b
1032 1038 - change b
1033 1039 - copy a to c
1034 1040 - change c
1035 1041
1036 1042 We cannot apply this sequence as-is, the renamed 'a' could not be
1037 1043 found for it would have been renamed already. And we cannot copy
1038 1044 from 'b' instead because 'b' would have been changed already. So
1039 1045 we scan the git patch for copy and rename commands so we can
1040 1046 perform the copies ahead of time.
1041 1047 """
1042 1048 pos = 0
1043 1049 try:
1044 1050 pos = lr.fp.tell()
1045 1051 fp = lr.fp
1046 1052 except IOError:
1047 1053 fp = cStringIO.StringIO(lr.fp.read())
1048 1054 gitlr = linereader(fp, lr.textmode)
1049 1055 gitlr.push(firstline)
1050 1056 gitpatches = readgitpatch(gitlr)
1051 1057 fp.seek(pos)
1052 1058 return gitpatches
1053 1059
1054 1060 def iterhunks(fp):
1055 1061 """Read a patch and yield the following events:
1056 1062 - ("file", afile, bfile, firsthunk): select a new target file.
1057 1063 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1058 1064 "file" event.
1059 1065 - ("git", gitchanges): current diff is in git format, gitchanges
1060 1066 maps filenames to gitpatch records. Unique event.
1061 1067 """
1062 1068 changed = {}
1063 1069 afile = ""
1064 1070 bfile = ""
1065 1071 state = None
1066 1072 hunknum = 0
1067 1073 emitfile = newfile = False
1068 1074 git = False
1069 1075
1070 1076 # our states
1071 1077 BFILE = 1
1072 1078 context = None
1073 1079 lr = linereader(fp)
1074 1080
1075 1081 while True:
1076 1082 x = lr.readline()
1077 1083 if not x:
1078 1084 break
1079 1085 if (state == BFILE and ((not context and x[0] == '@') or
1080 1086 ((context is not False) and x.startswith('***************')))):
1081 1087 if context is None and x.startswith('***************'):
1082 1088 context = True
1083 1089 gpatch = changed.get(bfile)
1084 1090 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1085 1091 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1086 1092 h = hunk(x, hunknum + 1, lr, context, create, remove)
1087 1093 hunknum += 1
1088 1094 if emitfile:
1089 1095 emitfile = False
1090 yield 'file', (afile, bfile, h)
1096 yield 'file', (afile, bfile, h, gpatch and gpatch.mode or None)
1091 1097 yield 'hunk', h
1092 1098 elif state == BFILE and x.startswith('GIT binary patch'):
1093 h = binhunk(changed[bfile])
1099 gpatch = changed[bfile]
1100 h = binhunk(gpatch)
1094 1101 hunknum += 1
1095 1102 if emitfile:
1096 1103 emitfile = False
1097 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1104 yield 'file', ('a/' + afile, 'b/' + bfile, h,
1105 gpatch and gpatch.mode or None)
1098 1106 h.extract(lr)
1099 1107 yield 'hunk', h
1100 1108 elif x.startswith('diff --git'):
1101 1109 # check for git diff, scanning the whole patch file if needed
1102 1110 m = gitre.match(x)
1103 1111 if m:
1104 1112 afile, bfile = m.group(1, 2)
1105 1113 if not git:
1106 1114 git = True
1107 1115 gitpatches = scangitpatch(lr, x)
1108 1116 yield 'git', gitpatches
1109 1117 for gp in gitpatches:
1110 1118 changed[gp.path] = gp
1111 1119 # else error?
1112 1120 # copy/rename + modify should modify target, not source
1113 1121 gp = changed.get(bfile)
1114 1122 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1115 1123 or gp.mode):
1116 1124 afile = bfile
1117 1125 newfile = True
1118 1126 elif x.startswith('---'):
1119 1127 # check for a unified diff
1120 1128 l2 = lr.readline()
1121 1129 if not l2.startswith('+++'):
1122 1130 lr.push(l2)
1123 1131 continue
1124 1132 newfile = True
1125 1133 context = False
1126 1134 afile = parsefilename(x)
1127 1135 bfile = parsefilename(l2)
1128 1136 elif x.startswith('***'):
1129 1137 # check for a context diff
1130 1138 l2 = lr.readline()
1131 1139 if not l2.startswith('---'):
1132 1140 lr.push(l2)
1133 1141 continue
1134 1142 l3 = lr.readline()
1135 1143 lr.push(l3)
1136 1144 if not l3.startswith("***************"):
1137 1145 lr.push(l2)
1138 1146 continue
1139 1147 newfile = True
1140 1148 context = True
1141 1149 afile = parsefilename(x)
1142 1150 bfile = parsefilename(l2)
1143 1151
1144 1152 if newfile:
1145 1153 newfile = False
1146 1154 emitfile = True
1147 1155 state = BFILE
1148 1156 hunknum = 0
1149 1157
1150 1158 def applydiff(ui, fp, changed, backend, strip=1, eolmode='strict'):
1151 1159 """Reads a patch from fp and tries to apply it.
1152 1160
1153 1161 The dict 'changed' is filled in with all of the filenames changed
1154 1162 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1155 1163 found and 1 if there was any fuzz.
1156 1164
1157 1165 If 'eolmode' is 'strict', the patch content and patched file are
1158 1166 read in binary mode. Otherwise, line endings are ignored when
1159 1167 patching then normalized according to 'eolmode'.
1160 1168
1161 1169 Callers probably want to call '_updatedir' after this to
1162 1170 apply certain categories of changes not done by this function.
1163 1171 """
1164 1172 return _applydiff(ui, fp, patchfile, backend, changed, strip=strip,
1165 1173 eolmode=eolmode)
1166 1174
1167 1175 def _applydiff(ui, fp, patcher, backend, changed, strip=1, eolmode='strict'):
1168 1176 rejects = 0
1169 1177 err = 0
1170 1178 current_file = None
1171 1179
1172 1180 for state, values in iterhunks(fp):
1173 1181 if state == 'hunk':
1174 1182 if not current_file:
1175 1183 continue
1176 1184 ret = current_file.apply(values)
1177 1185 if ret >= 0:
1178 1186 changed.setdefault(current_file.fname, None)
1179 1187 if ret > 0:
1180 1188 err = 1
1181 1189 elif state == 'file':
1182 1190 if current_file:
1183 1191 rejects += current_file.close()
1184 afile, bfile, first_hunk = values
1192 afile, bfile, first_hunk, mode = values
1185 1193 try:
1186 1194 current_file, missing = selectfile(backend, afile, bfile,
1187 1195 first_hunk, strip)
1188 current_file = patcher(ui, current_file, backend,
1196 current_file = patcher(ui, current_file, backend, mode,
1189 1197 missing=missing, eolmode=eolmode)
1190 1198 except PatchError, inst:
1191 1199 ui.warn(str(inst) + '\n')
1192 1200 current_file = None
1193 1201 rejects += 1
1194 1202 continue
1195 1203 elif state == 'git':
1196 1204 for gp in values:
1197 1205 gp.path = pathstrip(gp.path, strip - 1)[1]
1198 1206 if gp.oldpath:
1199 1207 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1200 1208 # Binary patches really overwrite target files, copying them
1201 1209 # will just make it fails with "target file exists"
1202 1210 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1203 1211 backend.copy(gp.oldpath, gp.path)
1204 1212 changed[gp.path] = gp
1205 1213 else:
1206 1214 raise util.Abort(_('unsupported parser state: %s') % state)
1207 1215
1208 1216 if current_file:
1209 1217 rejects += current_file.close()
1210 1218
1219 # Handle mode changes without hunk
1220 for gp in changed.itervalues():
1221 if not gp or not gp.mode:
1222 continue
1223 if gp.op == 'ADD' and not backend.exists(gp.path):
1224 # Added files without content have no hunk and must be created
1225 backend.writelines(gp.path, [], gp.mode)
1226 else:
1227 backend.setmode(gp.path, gp.mode[0], gp.mode[1])
1228
1211 1229 if rejects:
1212 1230 return -1
1213 1231 return err
1214 1232
1215 1233 def _updatedir(ui, repo, patches, similarity=0):
1216 1234 '''Update dirstate after patch application according to metadata'''
1217 1235 if not patches:
1218 1236 return []
1219 1237 copies = []
1220 1238 removes = set()
1221 1239 cfiles = patches.keys()
1222 1240 cwd = repo.getcwd()
1223 1241 if cwd:
1224 1242 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1225 1243 for f in patches:
1226 1244 gp = patches[f]
1227 1245 if not gp:
1228 1246 continue
1229 1247 if gp.op == 'RENAME':
1230 1248 copies.append((gp.oldpath, gp.path))
1231 1249 removes.add(gp.oldpath)
1232 1250 elif gp.op == 'COPY':
1233 1251 copies.append((gp.oldpath, gp.path))
1234 1252 elif gp.op == 'DELETE':
1235 1253 removes.add(gp.path)
1236 1254
1237 1255 wctx = repo[None]
1238 1256 for src, dst in copies:
1239 1257 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1240 1258 if (not similarity) and removes:
1241 1259 wctx.remove(sorted(removes), True)
1242 1260
1243 for f in patches:
1244 gp = patches[f]
1245 if gp and gp.mode:
1246 islink, isexec = gp.mode
1247 dst = repo.wjoin(gp.path)
1248 # patch won't create empty files
1249 if gp.op == 'ADD' and not os.path.lexists(dst):
1250 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1251 repo.wwrite(gp.path, '', flags)
1252 util.setflags(dst, islink, isexec)
1253 1261 scmutil.addremove(repo, cfiles, similarity=similarity)
1254 1262 files = patches.keys()
1255 1263 files.extend([r for r in removes if r not in files])
1256 1264 return sorted(files)
1257 1265
1258 1266 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1259 1267 """use <patcher> to apply <patchname> to the working directory.
1260 1268 returns whether patch was applied with fuzz factor."""
1261 1269
1262 1270 fuzz = False
1263 1271 args = []
1264 1272 if cwd:
1265 1273 args.append('-d %s' % util.shellquote(cwd))
1266 1274 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1267 1275 util.shellquote(patchname)))
1268 1276
1269 1277 for line in fp:
1270 1278 line = line.rstrip()
1271 1279 ui.note(line + '\n')
1272 1280 if line.startswith('patching file '):
1273 1281 pf = util.parsepatchoutput(line)
1274 1282 printed_file = False
1275 1283 files.setdefault(pf, None)
1276 1284 elif line.find('with fuzz') >= 0:
1277 1285 fuzz = True
1278 1286 if not printed_file:
1279 1287 ui.warn(pf + '\n')
1280 1288 printed_file = True
1281 1289 ui.warn(line + '\n')
1282 1290 elif line.find('saving rejects to file') >= 0:
1283 1291 ui.warn(line + '\n')
1284 1292 elif line.find('FAILED') >= 0:
1285 1293 if not printed_file:
1286 1294 ui.warn(pf + '\n')
1287 1295 printed_file = True
1288 1296 ui.warn(line + '\n')
1289 1297 code = fp.close()
1290 1298 if code:
1291 1299 raise PatchError(_("patch command failed: %s") %
1292 1300 util.explainexit(code)[0])
1293 1301 return fuzz
1294 1302
1295 1303 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1296 1304 similarity=0):
1297 1305 """use builtin patch to apply <patchobj> to the working directory.
1298 1306 returns whether patch was applied with fuzz factor."""
1299 1307
1300 1308 if files is None:
1301 1309 files = {}
1302 1310 if eolmode is None:
1303 1311 eolmode = ui.config('patch', 'eol', 'strict')
1304 1312 if eolmode.lower() not in eolmodes:
1305 1313 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1306 1314 eolmode = eolmode.lower()
1307 1315
1308 1316 backend = fsbackend(ui, cwd)
1309 1317 try:
1310 1318 fp = open(patchobj, 'rb')
1311 1319 except TypeError:
1312 1320 fp = patchobj
1313 1321 try:
1314 1322 ret = applydiff(ui, fp, files, backend, strip=strip, eolmode=eolmode)
1315 1323 finally:
1316 1324 if fp != patchobj:
1317 1325 fp.close()
1318 1326 touched = _updatedir(ui, repo, files, similarity)
1319 1327 files.update(dict.fromkeys(touched))
1320 1328 if ret < 0:
1321 1329 raise PatchError(_('patch failed to apply'))
1322 1330 return ret > 0
1323 1331
1324 1332 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1325 1333 similarity=0):
1326 1334 """Apply <patchname> to the working directory.
1327 1335
1328 1336 'eolmode' specifies how end of lines should be handled. It can be:
1329 1337 - 'strict': inputs are read in binary mode, EOLs are preserved
1330 1338 - 'crlf': EOLs are ignored when patching and reset to CRLF
1331 1339 - 'lf': EOLs are ignored when patching and reset to LF
1332 1340 - None: get it from user settings, default to 'strict'
1333 1341 'eolmode' is ignored when using an external patcher program.
1334 1342
1335 1343 Returns whether patch was applied with fuzz factor.
1336 1344 """
1337 1345 patcher = ui.config('ui', 'patch')
1338 1346 if files is None:
1339 1347 files = {}
1340 1348 try:
1341 1349 if patcher:
1342 1350 try:
1343 1351 return _externalpatch(patcher, patchname, ui, strip, cwd,
1344 1352 files)
1345 1353 finally:
1346 1354 touched = _updatedir(ui, repo, files, similarity)
1347 1355 files.update(dict.fromkeys(touched))
1348 1356 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1349 1357 similarity)
1350 1358 except PatchError, err:
1351 1359 raise util.Abort(str(err))
1352 1360
1353 1361 def changedfiles(ui, repo, patchpath, strip=1):
1354 1362 backend = fsbackend(ui, repo.root)
1355 1363 fp = open(patchpath, 'rb')
1356 1364 try:
1357 1365 changed = set()
1358 1366 for state, values in iterhunks(fp):
1359 1367 if state == 'hunk':
1360 1368 continue
1361 1369 elif state == 'file':
1362 afile, bfile, first_hunk = values
1370 afile, bfile, first_hunk, mode = values
1363 1371 current_file, missing = selectfile(backend, afile, bfile,
1364 1372 first_hunk, strip)
1365 1373 changed.add(current_file)
1366 1374 elif state == 'git':
1367 1375 for gp in values:
1368 1376 gp.path = pathstrip(gp.path, strip - 1)[1]
1369 1377 changed.add(gp.path)
1370 1378 if gp.oldpath:
1371 1379 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1372 1380 if gp.op == 'RENAME':
1373 1381 changed.add(gp.oldpath)
1374 1382 else:
1375 1383 raise util.Abort(_('unsupported parser state: %s') % state)
1376 1384 return changed
1377 1385 finally:
1378 1386 fp.close()
1379 1387
1380 1388 def b85diff(to, tn):
1381 1389 '''print base85-encoded binary diff'''
1382 1390 def gitindex(text):
1383 1391 if not text:
1384 1392 return hex(nullid)
1385 1393 l = len(text)
1386 1394 s = util.sha1('blob %d\0' % l)
1387 1395 s.update(text)
1388 1396 return s.hexdigest()
1389 1397
1390 1398 def fmtline(line):
1391 1399 l = len(line)
1392 1400 if l <= 26:
1393 1401 l = chr(ord('A') + l - 1)
1394 1402 else:
1395 1403 l = chr(l - 26 + ord('a') - 1)
1396 1404 return '%c%s\n' % (l, base85.b85encode(line, True))
1397 1405
1398 1406 def chunk(text, csize=52):
1399 1407 l = len(text)
1400 1408 i = 0
1401 1409 while i < l:
1402 1410 yield text[i:i + csize]
1403 1411 i += csize
1404 1412
1405 1413 tohash = gitindex(to)
1406 1414 tnhash = gitindex(tn)
1407 1415 if tohash == tnhash:
1408 1416 return ""
1409 1417
1410 1418 # TODO: deltas
1411 1419 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1412 1420 (tohash, tnhash, len(tn))]
1413 1421 for l in chunk(zlib.compress(tn)):
1414 1422 ret.append(fmtline(l))
1415 1423 ret.append('\n')
1416 1424 return ''.join(ret)
1417 1425
1418 1426 class GitDiffRequired(Exception):
1419 1427 pass
1420 1428
1421 1429 def diffopts(ui, opts=None, untrusted=False):
1422 1430 def get(key, name=None, getter=ui.configbool):
1423 1431 return ((opts and opts.get(key)) or
1424 1432 getter('diff', name or key, None, untrusted=untrusted))
1425 1433 return mdiff.diffopts(
1426 1434 text=opts and opts.get('text'),
1427 1435 git=get('git'),
1428 1436 nodates=get('nodates'),
1429 1437 showfunc=get('show_function', 'showfunc'),
1430 1438 ignorews=get('ignore_all_space', 'ignorews'),
1431 1439 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1432 1440 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1433 1441 context=get('unified', getter=ui.config))
1434 1442
1435 1443 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1436 1444 losedatafn=None, prefix=''):
1437 1445 '''yields diff of changes to files between two nodes, or node and
1438 1446 working directory.
1439 1447
1440 1448 if node1 is None, use first dirstate parent instead.
1441 1449 if node2 is None, compare node1 with working directory.
1442 1450
1443 1451 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1444 1452 every time some change cannot be represented with the current
1445 1453 patch format. Return False to upgrade to git patch format, True to
1446 1454 accept the loss or raise an exception to abort the diff. It is
1447 1455 called with the name of current file being diffed as 'fn'. If set
1448 1456 to None, patches will always be upgraded to git format when
1449 1457 necessary.
1450 1458
1451 1459 prefix is a filename prefix that is prepended to all filenames on
1452 1460 display (used for subrepos).
1453 1461 '''
1454 1462
1455 1463 if opts is None:
1456 1464 opts = mdiff.defaultopts
1457 1465
1458 1466 if not node1 and not node2:
1459 1467 node1 = repo.dirstate.p1()
1460 1468
1461 1469 def lrugetfilectx():
1462 1470 cache = {}
1463 1471 order = []
1464 1472 def getfilectx(f, ctx):
1465 1473 fctx = ctx.filectx(f, filelog=cache.get(f))
1466 1474 if f not in cache:
1467 1475 if len(cache) > 20:
1468 1476 del cache[order.pop(0)]
1469 1477 cache[f] = fctx.filelog()
1470 1478 else:
1471 1479 order.remove(f)
1472 1480 order.append(f)
1473 1481 return fctx
1474 1482 return getfilectx
1475 1483 getfilectx = lrugetfilectx()
1476 1484
1477 1485 ctx1 = repo[node1]
1478 1486 ctx2 = repo[node2]
1479 1487
1480 1488 if not changes:
1481 1489 changes = repo.status(ctx1, ctx2, match=match)
1482 1490 modified, added, removed = changes[:3]
1483 1491
1484 1492 if not modified and not added and not removed:
1485 1493 return []
1486 1494
1487 1495 revs = None
1488 1496 if not repo.ui.quiet:
1489 1497 hexfunc = repo.ui.debugflag and hex or short
1490 1498 revs = [hexfunc(node) for node in [node1, node2] if node]
1491 1499
1492 1500 copy = {}
1493 1501 if opts.git or opts.upgrade:
1494 1502 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1495 1503
1496 1504 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1497 1505 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1498 1506 if opts.upgrade and not opts.git:
1499 1507 try:
1500 1508 def losedata(fn):
1501 1509 if not losedatafn or not losedatafn(fn=fn):
1502 1510 raise GitDiffRequired()
1503 1511 # Buffer the whole output until we are sure it can be generated
1504 1512 return list(difffn(opts.copy(git=False), losedata))
1505 1513 except GitDiffRequired:
1506 1514 return difffn(opts.copy(git=True), None)
1507 1515 else:
1508 1516 return difffn(opts, None)
1509 1517
1510 1518 def difflabel(func, *args, **kw):
1511 1519 '''yields 2-tuples of (output, label) based on the output of func()'''
1512 1520 prefixes = [('diff', 'diff.diffline'),
1513 1521 ('copy', 'diff.extended'),
1514 1522 ('rename', 'diff.extended'),
1515 1523 ('old', 'diff.extended'),
1516 1524 ('new', 'diff.extended'),
1517 1525 ('deleted', 'diff.extended'),
1518 1526 ('---', 'diff.file_a'),
1519 1527 ('+++', 'diff.file_b'),
1520 1528 ('@@', 'diff.hunk'),
1521 1529 ('-', 'diff.deleted'),
1522 1530 ('+', 'diff.inserted')]
1523 1531
1524 1532 for chunk in func(*args, **kw):
1525 1533 lines = chunk.split('\n')
1526 1534 for i, line in enumerate(lines):
1527 1535 if i != 0:
1528 1536 yield ('\n', '')
1529 1537 stripline = line
1530 1538 if line and line[0] in '+-':
1531 1539 # highlight trailing whitespace, but only in changed lines
1532 1540 stripline = line.rstrip()
1533 1541 for prefix, label in prefixes:
1534 1542 if stripline.startswith(prefix):
1535 1543 yield (stripline, label)
1536 1544 break
1537 1545 else:
1538 1546 yield (line, '')
1539 1547 if line != stripline:
1540 1548 yield (line[len(stripline):], 'diff.trailingwhitespace')
1541 1549
1542 1550 def diffui(*args, **kw):
1543 1551 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1544 1552 return difflabel(diff, *args, **kw)
1545 1553
1546 1554
1547 1555 def _addmodehdr(header, omode, nmode):
1548 1556 if omode != nmode:
1549 1557 header.append('old mode %s\n' % omode)
1550 1558 header.append('new mode %s\n' % nmode)
1551 1559
1552 1560 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1553 1561 copy, getfilectx, opts, losedatafn, prefix):
1554 1562
1555 1563 def join(f):
1556 1564 return os.path.join(prefix, f)
1557 1565
1558 1566 date1 = util.datestr(ctx1.date())
1559 1567 man1 = ctx1.manifest()
1560 1568
1561 1569 gone = set()
1562 1570 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1563 1571
1564 1572 copyto = dict([(v, k) for k, v in copy.items()])
1565 1573
1566 1574 if opts.git:
1567 1575 revs = None
1568 1576
1569 1577 for f in sorted(modified + added + removed):
1570 1578 to = None
1571 1579 tn = None
1572 1580 dodiff = True
1573 1581 header = []
1574 1582 if f in man1:
1575 1583 to = getfilectx(f, ctx1).data()
1576 1584 if f not in removed:
1577 1585 tn = getfilectx(f, ctx2).data()
1578 1586 a, b = f, f
1579 1587 if opts.git or losedatafn:
1580 1588 if f in added:
1581 1589 mode = gitmode[ctx2.flags(f)]
1582 1590 if f in copy or f in copyto:
1583 1591 if opts.git:
1584 1592 if f in copy:
1585 1593 a = copy[f]
1586 1594 else:
1587 1595 a = copyto[f]
1588 1596 omode = gitmode[man1.flags(a)]
1589 1597 _addmodehdr(header, omode, mode)
1590 1598 if a in removed and a not in gone:
1591 1599 op = 'rename'
1592 1600 gone.add(a)
1593 1601 else:
1594 1602 op = 'copy'
1595 1603 header.append('%s from %s\n' % (op, join(a)))
1596 1604 header.append('%s to %s\n' % (op, join(f)))
1597 1605 to = getfilectx(a, ctx1).data()
1598 1606 else:
1599 1607 losedatafn(f)
1600 1608 else:
1601 1609 if opts.git:
1602 1610 header.append('new file mode %s\n' % mode)
1603 1611 elif ctx2.flags(f):
1604 1612 losedatafn(f)
1605 1613 # In theory, if tn was copied or renamed we should check
1606 1614 # if the source is binary too but the copy record already
1607 1615 # forces git mode.
1608 1616 if util.binary(tn):
1609 1617 if opts.git:
1610 1618 dodiff = 'binary'
1611 1619 else:
1612 1620 losedatafn(f)
1613 1621 if not opts.git and not tn:
1614 1622 # regular diffs cannot represent new empty file
1615 1623 losedatafn(f)
1616 1624 elif f in removed:
1617 1625 if opts.git:
1618 1626 # have we already reported a copy above?
1619 1627 if ((f in copy and copy[f] in added
1620 1628 and copyto[copy[f]] == f) or
1621 1629 (f in copyto and copyto[f] in added
1622 1630 and copy[copyto[f]] == f)):
1623 1631 dodiff = False
1624 1632 else:
1625 1633 header.append('deleted file mode %s\n' %
1626 1634 gitmode[man1.flags(f)])
1627 1635 elif not to or util.binary(to):
1628 1636 # regular diffs cannot represent empty file deletion
1629 1637 losedatafn(f)
1630 1638 else:
1631 1639 oflag = man1.flags(f)
1632 1640 nflag = ctx2.flags(f)
1633 1641 binary = util.binary(to) or util.binary(tn)
1634 1642 if opts.git:
1635 1643 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1636 1644 if binary:
1637 1645 dodiff = 'binary'
1638 1646 elif binary or nflag != oflag:
1639 1647 losedatafn(f)
1640 1648 if opts.git:
1641 1649 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1642 1650
1643 1651 if dodiff:
1644 1652 if dodiff == 'binary':
1645 1653 text = b85diff(to, tn)
1646 1654 else:
1647 1655 text = mdiff.unidiff(to, date1,
1648 1656 # ctx2 date may be dynamic
1649 1657 tn, util.datestr(ctx2.date()),
1650 1658 join(a), join(b), revs, opts=opts)
1651 1659 if header and (text or len(header) > 1):
1652 1660 yield ''.join(header)
1653 1661 if text:
1654 1662 yield text
1655 1663
1656 1664 def diffstatdata(lines):
1657 1665 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1658 1666
1659 1667 filename, adds, removes = None, 0, 0
1660 1668 for line in lines:
1661 1669 if line.startswith('diff'):
1662 1670 if filename:
1663 1671 isbinary = adds == 0 and removes == 0
1664 1672 yield (filename, adds, removes, isbinary)
1665 1673 # set numbers to 0 anyway when starting new file
1666 1674 adds, removes = 0, 0
1667 1675 if line.startswith('diff --git'):
1668 1676 filename = gitre.search(line).group(1)
1669 1677 elif line.startswith('diff -r'):
1670 1678 # format: "diff -r ... -r ... filename"
1671 1679 filename = diffre.search(line).group(1)
1672 1680 elif line.startswith('+') and not line.startswith('+++'):
1673 1681 adds += 1
1674 1682 elif line.startswith('-') and not line.startswith('---'):
1675 1683 removes += 1
1676 1684 if filename:
1677 1685 isbinary = adds == 0 and removes == 0
1678 1686 yield (filename, adds, removes, isbinary)
1679 1687
1680 1688 def diffstat(lines, width=80, git=False):
1681 1689 output = []
1682 1690 stats = list(diffstatdata(lines))
1683 1691
1684 1692 maxtotal, maxname = 0, 0
1685 1693 totaladds, totalremoves = 0, 0
1686 1694 hasbinary = False
1687 1695
1688 1696 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1689 1697 for filename, adds, removes, isbinary in stats]
1690 1698
1691 1699 for filename, adds, removes, isbinary, namewidth in sized:
1692 1700 totaladds += adds
1693 1701 totalremoves += removes
1694 1702 maxname = max(maxname, namewidth)
1695 1703 maxtotal = max(maxtotal, adds + removes)
1696 1704 if isbinary:
1697 1705 hasbinary = True
1698 1706
1699 1707 countwidth = len(str(maxtotal))
1700 1708 if hasbinary and countwidth < 3:
1701 1709 countwidth = 3
1702 1710 graphwidth = width - countwidth - maxname - 6
1703 1711 if graphwidth < 10:
1704 1712 graphwidth = 10
1705 1713
1706 1714 def scale(i):
1707 1715 if maxtotal <= graphwidth:
1708 1716 return i
1709 1717 # If diffstat runs out of room it doesn't print anything,
1710 1718 # which isn't very useful, so always print at least one + or -
1711 1719 # if there were at least some changes.
1712 1720 return max(i * graphwidth // maxtotal, int(bool(i)))
1713 1721
1714 1722 for filename, adds, removes, isbinary, namewidth in sized:
1715 1723 if git and isbinary:
1716 1724 count = 'Bin'
1717 1725 else:
1718 1726 count = adds + removes
1719 1727 pluses = '+' * scale(adds)
1720 1728 minuses = '-' * scale(removes)
1721 1729 output.append(' %s%s | %*s %s%s\n' %
1722 1730 (filename, ' ' * (maxname - namewidth),
1723 1731 countwidth, count,
1724 1732 pluses, minuses))
1725 1733
1726 1734 if stats:
1727 1735 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1728 1736 % (len(stats), totaladds, totalremoves))
1729 1737
1730 1738 return ''.join(output)
1731 1739
1732 1740 def diffstatui(*args, **kw):
1733 1741 '''like diffstat(), but yields 2-tuples of (output, label) for
1734 1742 ui.write()
1735 1743 '''
1736 1744
1737 1745 for line in diffstat(*args, **kw).splitlines():
1738 1746 if line and line[-1] in '+-':
1739 1747 name, graph = line.rsplit(' ', 1)
1740 1748 yield (name + ' ', '')
1741 1749 m = re.search(r'\++', graph)
1742 1750 if m:
1743 1751 yield (m.group(0), 'diffstat.inserted')
1744 1752 m = re.search(r'-+', graph)
1745 1753 if m:
1746 1754 yield (m.group(0), 'diffstat.deleted')
1747 1755 else:
1748 1756 yield (line, '')
1749 1757 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now