##// END OF EJS Templates
patch: refactor file creation/removal detection...
Patrick Mezard -
r14451:c78d41db default
parent child Browse files
Show More
@@ -1,691 +1,691 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a DSCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56 The more specific you are in your filename patterns the less you
57 57 lose speed in huge repositories.
58 58
59 59 For [keywordmaps] template mapping and expansion demonstration and
60 60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 61 available templates and filters.
62 62
63 63 Three additional date template filters are provided:
64 64
65 65 :``utcdate``: "2006/09/18 15:13:13"
66 66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 68
69 69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 70 replaced with customized keywords and templates. Again, run
71 71 :hg:`kwdemo` to control the results of your configuration changes.
72 72
73 73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 74 to avoid storing expanded keywords in the change history.
75 75
76 76 To force expansion after enabling it, or a configuration change, run
77 77 :hg:`kwexpand`.
78 78
79 79 Expansions spanning more than one line and incremental expansions,
80 80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 81 {desc}" expands to the first line of the changeset description.
82 82 '''
83 83
84 84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 86 from mercurial import scmutil
87 87 from mercurial.hgweb import webcommands
88 88 from mercurial.i18n import _
89 89 import os, re, shutil, tempfile
90 90
91 91 commands.optionalrepo += ' kwdemo'
92 92
93 93 cmdtable = {}
94 94 command = cmdutil.command(cmdtable)
95 95
96 96 # hg commands that do not act on keywords
97 97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 98 ' outgoing push tip verify convert email glog')
99 99
100 100 # hg commands that trigger expansion only when writing to working dir,
101 101 # not when reading filelog, and unexpand when reading from working dir
102 102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
103 103
104 104 # names of extensions using dorecord
105 105 recordextensions = 'record'
106 106
107 107 colortable = {
108 108 'kwfiles.enabled': 'green bold',
109 109 'kwfiles.deleted': 'cyan bold underline',
110 110 'kwfiles.enabledunknown': 'green',
111 111 'kwfiles.ignored': 'bold',
112 112 'kwfiles.ignoredunknown': 'none'
113 113 }
114 114
115 115 # date like in cvs' $Date
116 116 def utcdate(text):
117 117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
118 118 '''
119 119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
120 120 # date like in svn's $Date
121 121 def svnisodate(text):
122 122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
123 123 +0200 (Tue, 18 Aug 2009)".
124 124 '''
125 125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
126 126 # date like in svn's $Id
127 127 def svnutcdate(text):
128 128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
129 129 11:00:13Z".
130 130 '''
131 131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
132 132
133 133 templatefilters.filters.update({'utcdate': utcdate,
134 134 'svnisodate': svnisodate,
135 135 'svnutcdate': svnutcdate})
136 136
137 137 # make keyword tools accessible
138 138 kwtools = {'templater': None, 'hgcmd': ''}
139 139
140 140 def _defaultkwmaps(ui):
141 141 '''Returns default keywordmaps according to keywordset configuration.'''
142 142 templates = {
143 143 'Revision': '{node|short}',
144 144 'Author': '{author|user}',
145 145 }
146 146 kwsets = ({
147 147 'Date': '{date|utcdate}',
148 148 'RCSfile': '{file|basename},v',
149 149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
150 150 # with hg-keyword
151 151 'Source': '{root}/{file},v',
152 152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
153 153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
154 154 }, {
155 155 'Date': '{date|svnisodate}',
156 156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
157 157 'LastChangedRevision': '{node|short}',
158 158 'LastChangedBy': '{author|user}',
159 159 'LastChangedDate': '{date|svnisodate}',
160 160 })
161 161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
162 162 return templates
163 163
164 164 def _shrinktext(text, subfunc):
165 165 '''Helper for keyword expansion removal in text.
166 166 Depending on subfunc also returns number of substitutions.'''
167 167 return subfunc(r'$\1$', text)
168 168
169 169 def _preselect(wstatus, changed):
170 170 '''Retrieves modfied and added files from a working directory state
171 171 and returns the subset of each contained in given changed files
172 172 retrieved from a change context.'''
173 173 modified, added = wstatus[:2]
174 174 modified = [f for f in modified if f in changed]
175 175 added = [f for f in added if f in changed]
176 176 return modified, added
177 177
178 178
179 179 class kwtemplater(object):
180 180 '''
181 181 Sets up keyword templates, corresponding keyword regex, and
182 182 provides keyword substitution functions.
183 183 '''
184 184
185 185 def __init__(self, ui, repo, inc, exc):
186 186 self.ui = ui
187 187 self.repo = repo
188 188 self.match = match.match(repo.root, '', [], inc, exc)
189 189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 190 self.record = False
191 191
192 192 kwmaps = self.ui.configitems('keywordmaps')
193 193 if kwmaps: # override default templates
194 194 self.templates = dict((k, templater.parsestring(v, False))
195 195 for k, v in kwmaps)
196 196 else:
197 197 self.templates = _defaultkwmaps(self.ui)
198 198
199 199 @util.propertycache
200 200 def escape(self):
201 201 '''Returns bar-separated and escaped keywords.'''
202 202 return '|'.join(map(re.escape, self.templates.keys()))
203 203
204 204 @util.propertycache
205 205 def rekw(self):
206 206 '''Returns regex for unexpanded keywords.'''
207 207 return re.compile(r'\$(%s)\$' % self.escape)
208 208
209 209 @util.propertycache
210 210 def rekwexp(self):
211 211 '''Returns regex for expanded keywords.'''
212 212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213 213
214 214 def substitute(self, data, path, ctx, subfunc):
215 215 '''Replaces keywords in data with expanded template.'''
216 216 def kwsub(mobj):
217 217 kw = mobj.group(1)
218 218 ct = cmdutil.changeset_templater(self.ui, self.repo,
219 219 False, None, '', False)
220 220 ct.use_template(self.templates[kw])
221 221 self.ui.pushbuffer()
222 222 ct.show(ctx, root=self.repo.root, file=path)
223 223 ekw = templatefilters.firstline(self.ui.popbuffer())
224 224 return '$%s: %s $' % (kw, ekw)
225 225 return subfunc(kwsub, data)
226 226
227 227 def linkctx(self, path, fileid):
228 228 '''Similar to filelog.linkrev, but returns a changectx.'''
229 229 return self.repo.filectx(path, fileid=fileid).changectx()
230 230
231 231 def expand(self, path, node, data):
232 232 '''Returns data with keywords expanded.'''
233 233 if not self.restrict and self.match(path) and not util.binary(data):
234 234 ctx = self.linkctx(path, node)
235 235 return self.substitute(data, path, ctx, self.rekw.sub)
236 236 return data
237 237
238 238 def iskwfile(self, cand, ctx):
239 239 '''Returns subset of candidates which are configured for keyword
240 240 expansion are not symbolic links.'''
241 241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
242 242
243 243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
244 244 '''Overwrites selected files expanding/shrinking keywords.'''
245 245 if self.restrict or lookup or self.record: # exclude kw_copy
246 246 candidates = self.iskwfile(candidates, ctx)
247 247 if not candidates:
248 248 return
249 249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
250 250 if self.restrict or expand and lookup:
251 251 mf = ctx.manifest()
252 252 lctx = ctx
253 253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
254 254 msg = (expand and _('overwriting %s expanding keywords\n')
255 255 or _('overwriting %s shrinking keywords\n'))
256 256 for f in candidates:
257 257 if self.restrict:
258 258 data = self.repo.file(f).read(mf[f])
259 259 else:
260 260 data = self.repo.wread(f)
261 261 if util.binary(data):
262 262 continue
263 263 if expand:
264 264 if lookup:
265 265 lctx = self.linkctx(f, mf[f])
266 266 data, found = self.substitute(data, f, lctx, re_kw.subn)
267 267 elif self.restrict:
268 268 found = re_kw.search(data)
269 269 else:
270 270 data, found = _shrinktext(data, re_kw.subn)
271 271 if found:
272 272 self.ui.note(msg % f)
273 273 self.repo.wwrite(f, data, ctx.flags(f))
274 274 if kwcmd:
275 275 self.repo.dirstate.normal(f)
276 276 elif self.record:
277 277 self.repo.dirstate.normallookup(f)
278 278
279 279 def shrink(self, fname, text):
280 280 '''Returns text with all keyword substitutions removed.'''
281 281 if self.match(fname) and not util.binary(text):
282 282 return _shrinktext(text, self.rekwexp.sub)
283 283 return text
284 284
285 285 def shrinklines(self, fname, lines):
286 286 '''Returns lines with keyword substitutions removed.'''
287 287 if self.match(fname):
288 288 text = ''.join(lines)
289 289 if not util.binary(text):
290 290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
291 291 return lines
292 292
293 293 def wread(self, fname, data):
294 294 '''If in restricted mode returns data read from wdir with
295 295 keyword substitutions removed.'''
296 296 return self.restrict and self.shrink(fname, data) or data
297 297
298 298 class kwfilelog(filelog.filelog):
299 299 '''
300 300 Subclass of filelog to hook into its read, add, cmp methods.
301 301 Keywords are "stored" unexpanded, and processed on reading.
302 302 '''
303 303 def __init__(self, opener, kwt, path):
304 304 super(kwfilelog, self).__init__(opener, path)
305 305 self.kwt = kwt
306 306 self.path = path
307 307
308 308 def read(self, node):
309 309 '''Expands keywords when reading filelog.'''
310 310 data = super(kwfilelog, self).read(node)
311 311 if self.renamed(node):
312 312 return data
313 313 return self.kwt.expand(self.path, node, data)
314 314
315 315 def add(self, text, meta, tr, link, p1=None, p2=None):
316 316 '''Removes keyword substitutions when adding to filelog.'''
317 317 text = self.kwt.shrink(self.path, text)
318 318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
319 319
320 320 def cmp(self, node, text):
321 321 '''Removes keyword substitutions for comparison.'''
322 322 text = self.kwt.shrink(self.path, text)
323 323 return super(kwfilelog, self).cmp(node, text)
324 324
325 325 def _status(ui, repo, kwt, *pats, **opts):
326 326 '''Bails out if [keyword] configuration is not active.
327 327 Returns status of working directory.'''
328 328 if kwt:
329 329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
330 330 unknown=opts.get('unknown') or opts.get('all'))
331 331 if ui.configitems('keyword'):
332 332 raise util.Abort(_('[keyword] patterns cannot match'))
333 333 raise util.Abort(_('no [keyword] patterns configured'))
334 334
335 335 def _kwfwrite(ui, repo, expand, *pats, **opts):
336 336 '''Selects files and passes them to kwtemplater.overwrite.'''
337 337 wctx = repo[None]
338 338 if len(wctx.parents()) > 1:
339 339 raise util.Abort(_('outstanding uncommitted merge'))
340 340 kwt = kwtools['templater']
341 341 wlock = repo.wlock()
342 342 try:
343 343 status = _status(ui, repo, kwt, *pats, **opts)
344 344 modified, added, removed, deleted, unknown, ignored, clean = status
345 345 if modified or added or removed or deleted:
346 346 raise util.Abort(_('outstanding uncommitted changes'))
347 347 kwt.overwrite(wctx, clean, True, expand)
348 348 finally:
349 349 wlock.release()
350 350
351 351 @command('kwdemo',
352 352 [('d', 'default', None, _('show default keyword template maps')),
353 353 ('f', 'rcfile', '',
354 354 _('read maps from rcfile'), _('FILE'))],
355 355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
356 356 def demo(ui, repo, *args, **opts):
357 357 '''print [keywordmaps] configuration and an expansion example
358 358
359 359 Show current, custom, or default keyword template maps and their
360 360 expansions.
361 361
362 362 Extend the current configuration by specifying maps as arguments
363 363 and using -f/--rcfile to source an external hgrc file.
364 364
365 365 Use -d/--default to disable current configuration.
366 366
367 367 See :hg:`help templates` for information on templates and filters.
368 368 '''
369 369 def demoitems(section, items):
370 370 ui.write('[%s]\n' % section)
371 371 for k, v in sorted(items):
372 372 ui.write('%s = %s\n' % (k, v))
373 373
374 374 fn = 'demo.txt'
375 375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
376 376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
377 377 repo = localrepo.localrepository(ui, tmpdir, True)
378 378 ui.setconfig('keyword', fn, '')
379 379 svn = ui.configbool('keywordset', 'svn')
380 380 # explicitly set keywordset for demo output
381 381 ui.setconfig('keywordset', 'svn', svn)
382 382
383 383 uikwmaps = ui.configitems('keywordmaps')
384 384 if args or opts.get('rcfile'):
385 385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
386 386 if uikwmaps:
387 387 ui.status(_('\textending current template maps\n'))
388 388 if opts.get('default') or not uikwmaps:
389 389 if svn:
390 390 ui.status(_('\toverriding default svn keywordset\n'))
391 391 else:
392 392 ui.status(_('\toverriding default cvs keywordset\n'))
393 393 if opts.get('rcfile'):
394 394 ui.readconfig(opts.get('rcfile'))
395 395 if args:
396 396 # simulate hgrc parsing
397 397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
398 398 fp = repo.opener('hgrc', 'w')
399 399 fp.writelines(rcmaps)
400 400 fp.close()
401 401 ui.readconfig(repo.join('hgrc'))
402 402 kwmaps = dict(ui.configitems('keywordmaps'))
403 403 elif opts.get('default'):
404 404 if svn:
405 405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
406 406 else:
407 407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
408 408 kwmaps = _defaultkwmaps(ui)
409 409 if uikwmaps:
410 410 ui.status(_('\tdisabling current template maps\n'))
411 411 for k, v in kwmaps.iteritems():
412 412 ui.setconfig('keywordmaps', k, v)
413 413 else:
414 414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
415 415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
416 416
417 417 uisetup(ui)
418 418 reposetup(ui, repo)
419 419 ui.write('[extensions]\nkeyword =\n')
420 420 demoitems('keyword', ui.configitems('keyword'))
421 421 demoitems('keywordset', ui.configitems('keywordset'))
422 422 demoitems('keywordmaps', kwmaps.iteritems())
423 423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
424 424 repo.wopener.write(fn, keywords)
425 425 repo[None].add([fn])
426 426 ui.note(_('\nkeywords written to %s:\n') % fn)
427 427 ui.note(keywords)
428 428 repo.dirstate.setbranch('demobranch')
429 429 for name, cmd in ui.configitems('hooks'):
430 430 if name.split('.', 1)[0].find('commit') > -1:
431 431 repo.ui.setconfig('hooks', name, '')
432 432 msg = _('hg keyword configuration and expansion example')
433 433 ui.note("hg ci -m '%s'\n" % msg)
434 434 repo.commit(text=msg)
435 435 ui.status(_('\n\tkeywords expanded\n'))
436 436 ui.write(repo.wread(fn))
437 437 shutil.rmtree(tmpdir, ignore_errors=True)
438 438
439 439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
440 440 def expand(ui, repo, *pats, **opts):
441 441 '''expand keywords in the working directory
442 442
443 443 Run after (re)enabling keyword expansion.
444 444
445 445 kwexpand refuses to run if given files contain local changes.
446 446 '''
447 447 # 3rd argument sets expansion to True
448 448 _kwfwrite(ui, repo, True, *pats, **opts)
449 449
450 450 @command('kwfiles',
451 451 [('A', 'all', None, _('show keyword status flags of all files')),
452 452 ('i', 'ignore', None, _('show files excluded from expansion')),
453 453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
454 454 ] + commands.walkopts,
455 455 _('hg kwfiles [OPTION]... [FILE]...'))
456 456 def files(ui, repo, *pats, **opts):
457 457 '''show files configured for keyword expansion
458 458
459 459 List which files in the working directory are matched by the
460 460 [keyword] configuration patterns.
461 461
462 462 Useful to prevent inadvertent keyword expansion and to speed up
463 463 execution by including only files that are actual candidates for
464 464 expansion.
465 465
466 466 See :hg:`help keyword` on how to construct patterns both for
467 467 inclusion and exclusion of files.
468 468
469 469 With -A/--all and -v/--verbose the codes used to show the status
470 470 of files are::
471 471
472 472 K = keyword expansion candidate
473 473 k = keyword expansion candidate (not tracked)
474 474 I = ignored
475 475 i = ignored (not tracked)
476 476 '''
477 477 kwt = kwtools['templater']
478 478 status = _status(ui, repo, kwt, *pats, **opts)
479 479 cwd = pats and repo.getcwd() or ''
480 480 modified, added, removed, deleted, unknown, ignored, clean = status
481 481 files = []
482 482 if not opts.get('unknown') or opts.get('all'):
483 483 files = sorted(modified + added + clean)
484 484 wctx = repo[None]
485 485 kwfiles = kwt.iskwfile(files, wctx)
486 486 kwdeleted = kwt.iskwfile(deleted, wctx)
487 487 kwunknown = kwt.iskwfile(unknown, wctx)
488 488 if not opts.get('ignore') or opts.get('all'):
489 489 showfiles = kwfiles, kwdeleted, kwunknown
490 490 else:
491 491 showfiles = [], [], []
492 492 if opts.get('all') or opts.get('ignore'):
493 493 showfiles += ([f for f in files if f not in kwfiles],
494 494 [f for f in unknown if f not in kwunknown])
495 495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
496 496 kwstates = zip('K!kIi', showfiles, kwlabels)
497 497 for char, filenames, kwstate in kwstates:
498 498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
499 499 for f in filenames:
500 500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
501 501
502 502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
503 503 def shrink(ui, repo, *pats, **opts):
504 504 '''revert expanded keywords in the working directory
505 505
506 506 Must be run before changing/disabling active keywords.
507 507
508 508 kwshrink refuses to run if given files contain local changes.
509 509 '''
510 510 # 3rd argument sets expansion to False
511 511 _kwfwrite(ui, repo, False, *pats, **opts)
512 512
513 513
514 514 def uisetup(ui):
515 515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
516 516
517 517 def kwdispatch_parse(orig, ui, args):
518 518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
519 519 cmd, func, args, options, cmdoptions = orig(ui, args)
520 520 kwtools['hgcmd'] = cmd
521 521 return cmd, func, args, options, cmdoptions
522 522
523 523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
524 524
525 525 def reposetup(ui, repo):
526 526 '''Sets up repo as kwrepo for keyword substitution.
527 527 Overrides file method to return kwfilelog instead of filelog
528 528 if file matches user configuration.
529 529 Wraps commit to overwrite configured files with updated
530 530 keyword substitutions.
531 531 Monkeypatches patch and webcommands.'''
532 532
533 533 try:
534 534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
535 535 or '.hg' in util.splitpath(repo.root)
536 536 or repo._url.startswith('bundle:')):
537 537 return
538 538 except AttributeError:
539 539 pass
540 540
541 541 inc, exc = [], ['.hg*']
542 542 for pat, opt in ui.configitems('keyword'):
543 543 if opt != 'ignore':
544 544 inc.append(pat)
545 545 else:
546 546 exc.append(pat)
547 547 if not inc:
548 548 return
549 549
550 550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
551 551
552 552 class kwrepo(repo.__class__):
553 553 def file(self, f):
554 554 if f[0] == '/':
555 555 f = f[1:]
556 556 return kwfilelog(self.sopener, kwt, f)
557 557
558 558 def wread(self, filename):
559 559 data = super(kwrepo, self).wread(filename)
560 560 return kwt.wread(filename, data)
561 561
562 562 def commit(self, *args, **opts):
563 563 # use custom commitctx for user commands
564 564 # other extensions can still wrap repo.commitctx directly
565 565 self.commitctx = self.kwcommitctx
566 566 try:
567 567 return super(kwrepo, self).commit(*args, **opts)
568 568 finally:
569 569 del self.commitctx
570 570
571 571 def kwcommitctx(self, ctx, error=False):
572 572 n = super(kwrepo, self).commitctx(ctx, error)
573 573 # no lock needed, only called from repo.commit() which already locks
574 574 if not kwt.record:
575 575 restrict = kwt.restrict
576 576 kwt.restrict = True
577 577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
578 578 False, True)
579 579 kwt.restrict = restrict
580 580 return n
581 581
582 582 def rollback(self, dryrun=False):
583 583 wlock = self.wlock()
584 584 try:
585 585 if not dryrun:
586 586 changed = self['.'].files()
587 587 ret = super(kwrepo, self).rollback(dryrun)
588 588 if not dryrun:
589 589 ctx = self['.']
590 590 modified, added = _preselect(self[None].status(), changed)
591 591 kwt.overwrite(ctx, modified, True, True)
592 592 kwt.overwrite(ctx, added, True, False)
593 593 return ret
594 594 finally:
595 595 wlock.release()
596 596
597 597 # monkeypatches
598 def kwpatchfile_init(orig, self, ui, fname, backend, mode,
598 def kwpatchfile_init(orig, self, ui, fname, backend, mode, create, remove,
599 599 missing=False, eolmode=None):
600 600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 601 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, fname, backend, mode, missing, eolmode)
602 orig(self, ui, fname, backend, mode, create, remove, missing, eolmode)
603 603 # shrink keywords read from working dir
604 604 self.lines = kwt.shrinklines(self.fname, self.lines)
605 605
606 606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
607 607 opts=None, prefix=''):
608 608 '''Monkeypatch patch.diff to avoid expansion.'''
609 609 kwt.restrict = True
610 610 return orig(repo, node1, node2, match, changes, opts, prefix)
611 611
612 612 def kwweb_skip(orig, web, req, tmpl):
613 613 '''Wraps webcommands.x turning off keyword expansion.'''
614 614 kwt.match = util.never
615 615 return orig(web, req, tmpl)
616 616
617 617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
618 618 '''Wraps cmdutil.copy so that copy/rename destinations do not
619 619 contain expanded keywords.
620 620 Note that the source of a regular file destination may also be a
621 621 symlink:
622 622 hg cp sym x -> x is symlink
623 623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
624 624 For the latter we have to follow the symlink to find out whether its
625 625 target is configured for expansion and we therefore must unexpand the
626 626 keywords in the destination.'''
627 627 orig(ui, repo, pats, opts, rename)
628 628 if opts.get('dry_run'):
629 629 return
630 630 wctx = repo[None]
631 631 cwd = repo.getcwd()
632 632
633 633 def haskwsource(dest):
634 634 '''Returns true if dest is a regular file and configured for
635 635 expansion or a symlink which points to a file configured for
636 636 expansion. '''
637 637 source = repo.dirstate.copied(dest)
638 638 if 'l' in wctx.flags(source):
639 639 source = scmutil.canonpath(repo.root, cwd,
640 640 os.path.realpath(source))
641 641 return kwt.match(source)
642 642
643 643 candidates = [f for f in repo.dirstate.copies() if
644 644 not 'l' in wctx.flags(f) and haskwsource(f)]
645 645 kwt.overwrite(wctx, candidates, False, False)
646 646
647 647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
648 648 '''Wraps record.dorecord expanding keywords after recording.'''
649 649 wlock = repo.wlock()
650 650 try:
651 651 # record returns 0 even when nothing has changed
652 652 # therefore compare nodes before and after
653 653 kwt.record = True
654 654 ctx = repo['.']
655 655 wstatus = repo[None].status()
656 656 ret = orig(ui, repo, commitfunc, *pats, **opts)
657 657 recctx = repo['.']
658 658 if ctx != recctx:
659 659 modified, added = _preselect(wstatus, recctx.files())
660 660 kwt.restrict = False
661 661 kwt.overwrite(recctx, modified, False, True)
662 662 kwt.overwrite(recctx, added, False, True, True)
663 663 kwt.restrict = True
664 664 return ret
665 665 finally:
666 666 wlock.release()
667 667
668 668 def kwfilectx_cmp(orig, self, fctx):
669 669 # keyword affects data size, comparing wdir and filelog size does
670 670 # not make sense
671 671 if (fctx._filerev is None and
672 672 (self._repo._encodefilterpats or
673 673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
674 674 self.size() == fctx.size()):
675 675 return self._filelog.cmp(self._filenode, fctx.data())
676 676 return True
677 677
678 678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
679 679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
680 680 extensions.wrapfunction(patch, 'diff', kw_diff)
681 681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
682 682 for c in 'annotate changeset rev filediff diff'.split():
683 683 extensions.wrapfunction(webcommands, c, kwweb_skip)
684 684 for name in recordextensions.split():
685 685 try:
686 686 record = extensions.find(name)
687 687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
688 688 except KeyError:
689 689 pass
690 690
691 691 repo.__class__ = kwrepo
@@ -1,1767 +1,1751 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re
10 10 import tempfile, zlib
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15 15
16 16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 17
18 18 class PatchError(Exception):
19 19 pass
20 20
21 21
22 22 # public functions
23 23
24 24 def split(stream):
25 25 '''return an iterator of individual patches from a stream'''
26 26 def isheader(line, inheader):
27 27 if inheader and line[0] in (' ', '\t'):
28 28 # continuation
29 29 return True
30 30 if line[0] in (' ', '-', '+'):
31 31 # diff line - don't check for header pattern in there
32 32 return False
33 33 l = line.split(': ', 1)
34 34 return len(l) == 2 and ' ' not in l[0]
35 35
36 36 def chunk(lines):
37 37 return cStringIO.StringIO(''.join(lines))
38 38
39 39 def hgsplit(stream, cur):
40 40 inheader = True
41 41
42 42 for line in stream:
43 43 if not line.strip():
44 44 inheader = False
45 45 if not inheader and line.startswith('# HG changeset patch'):
46 46 yield chunk(cur)
47 47 cur = []
48 48 inheader = True
49 49
50 50 cur.append(line)
51 51
52 52 if cur:
53 53 yield chunk(cur)
54 54
55 55 def mboxsplit(stream, cur):
56 56 for line in stream:
57 57 if line.startswith('From '):
58 58 for c in split(chunk(cur[1:])):
59 59 yield c
60 60 cur = []
61 61
62 62 cur.append(line)
63 63
64 64 if cur:
65 65 for c in split(chunk(cur[1:])):
66 66 yield c
67 67
68 68 def mimesplit(stream, cur):
69 69 def msgfp(m):
70 70 fp = cStringIO.StringIO()
71 71 g = email.Generator.Generator(fp, mangle_from_=False)
72 72 g.flatten(m)
73 73 fp.seek(0)
74 74 return fp
75 75
76 76 for line in stream:
77 77 cur.append(line)
78 78 c = chunk(cur)
79 79
80 80 m = email.Parser.Parser().parse(c)
81 81 if not m.is_multipart():
82 82 yield msgfp(m)
83 83 else:
84 84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 85 for part in m.walk():
86 86 ct = part.get_content_type()
87 87 if ct not in ok_types:
88 88 continue
89 89 yield msgfp(part)
90 90
91 91 def headersplit(stream, cur):
92 92 inheader = False
93 93
94 94 for line in stream:
95 95 if not inheader and isheader(line, inheader):
96 96 yield chunk(cur)
97 97 cur = []
98 98 inheader = True
99 99 if inheader and not isheader(line, inheader):
100 100 inheader = False
101 101
102 102 cur.append(line)
103 103
104 104 if cur:
105 105 yield chunk(cur)
106 106
107 107 def remainder(cur):
108 108 yield chunk(cur)
109 109
110 110 class fiter(object):
111 111 def __init__(self, fp):
112 112 self.fp = fp
113 113
114 114 def __iter__(self):
115 115 return self
116 116
117 117 def next(self):
118 118 l = self.fp.readline()
119 119 if not l:
120 120 raise StopIteration
121 121 return l
122 122
123 123 inheader = False
124 124 cur = []
125 125
126 126 mimeheaders = ['content-type']
127 127
128 128 if not hasattr(stream, 'next'):
129 129 # http responses, for example, have readline but not next
130 130 stream = fiter(stream)
131 131
132 132 for line in stream:
133 133 cur.append(line)
134 134 if line.startswith('# HG changeset patch'):
135 135 return hgsplit(stream, cur)
136 136 elif line.startswith('From '):
137 137 return mboxsplit(stream, cur)
138 138 elif isheader(line, inheader):
139 139 inheader = True
140 140 if line.split(':', 1)[0].lower() in mimeheaders:
141 141 # let email parser handle this
142 142 return mimesplit(stream, cur)
143 143 elif line.startswith('--- ') and inheader:
144 144 # No evil headers seen by diff start, split by hand
145 145 return headersplit(stream, cur)
146 146 # Not enough info, keep reading
147 147
148 148 # if we are here, we have a very plain patch
149 149 return remainder(cur)
150 150
151 151 def extract(ui, fileobj):
152 152 '''extract patch from data read from fileobj.
153 153
154 154 patch can be a normal patch or contained in an email message.
155 155
156 156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 157 Any item in the returned tuple can be None. If filename is None,
158 158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 159
160 160 # attempt to detect the start of a patch
161 161 # (this heuristic is borrowed from quilt)
162 162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 164 r'---[ \t].*?^\+\+\+[ \t]|'
165 165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 166
167 167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 168 tmpfp = os.fdopen(fd, 'w')
169 169 try:
170 170 msg = email.Parser.Parser().parse(fileobj)
171 171
172 172 subject = msg['Subject']
173 173 user = msg['From']
174 174 if not subject and not user:
175 175 # Not an email, restore parsed headers if any
176 176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 177
178 178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 179 # should try to parse msg['Date']
180 180 date = None
181 181 nodeid = None
182 182 branch = None
183 183 parents = []
184 184
185 185 if subject:
186 186 if subject.startswith('[PATCH'):
187 187 pend = subject.find(']')
188 188 if pend >= 0:
189 189 subject = subject[pend + 1:].lstrip()
190 190 subject = subject.replace('\n\t', ' ')
191 191 ui.debug('Subject: %s\n' % subject)
192 192 if user:
193 193 ui.debug('From: %s\n' % user)
194 194 diffs_seen = 0
195 195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 196 message = ''
197 197 for part in msg.walk():
198 198 content_type = part.get_content_type()
199 199 ui.debug('Content-Type: %s\n' % content_type)
200 200 if content_type not in ok_types:
201 201 continue
202 202 payload = part.get_payload(decode=True)
203 203 m = diffre.search(payload)
204 204 if m:
205 205 hgpatch = False
206 206 hgpatchheader = False
207 207 ignoretext = False
208 208
209 209 ui.debug('found patch at byte %d\n' % m.start(0))
210 210 diffs_seen += 1
211 211 cfp = cStringIO.StringIO()
212 212 for line in payload[:m.start(0)].splitlines():
213 213 if line.startswith('# HG changeset patch') and not hgpatch:
214 214 ui.debug('patch generated by hg export\n')
215 215 hgpatch = True
216 216 hgpatchheader = True
217 217 # drop earlier commit message content
218 218 cfp.seek(0)
219 219 cfp.truncate()
220 220 subject = None
221 221 elif hgpatchheader:
222 222 if line.startswith('# User '):
223 223 user = line[7:]
224 224 ui.debug('From: %s\n' % user)
225 225 elif line.startswith("# Date "):
226 226 date = line[7:]
227 227 elif line.startswith("# Branch "):
228 228 branch = line[9:]
229 229 elif line.startswith("# Node ID "):
230 230 nodeid = line[10:]
231 231 elif line.startswith("# Parent "):
232 232 parents.append(line[10:])
233 233 elif not line.startswith("# "):
234 234 hgpatchheader = False
235 235 elif line == '---' and gitsendmail:
236 236 ignoretext = True
237 237 if not hgpatchheader and not ignoretext:
238 238 cfp.write(line)
239 239 cfp.write('\n')
240 240 message = cfp.getvalue()
241 241 if tmpfp:
242 242 tmpfp.write(payload)
243 243 if not payload.endswith('\n'):
244 244 tmpfp.write('\n')
245 245 elif not diffs_seen and message and content_type == 'text/plain':
246 246 message += '\n' + payload
247 247 except:
248 248 tmpfp.close()
249 249 os.unlink(tmpname)
250 250 raise
251 251
252 252 if subject and not message.startswith(subject):
253 253 message = '%s\n%s' % (subject, message)
254 254 tmpfp.close()
255 255 if not diffs_seen:
256 256 os.unlink(tmpname)
257 257 return None, message, user, date, branch, None, None, None
258 258 p1 = parents and parents.pop(0) or None
259 259 p2 = parents and parents.pop(0) or None
260 260 return tmpname, message, user, date, branch, nodeid, p1, p2
261 261
262 262 class patchmeta(object):
263 263 """Patched file metadata
264 264
265 265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 269 'islink' is True if the file is a symlink and 'isexec' is True if
270 270 the file is executable. Otherwise, 'mode' is None.
271 271 """
272 272 def __init__(self, path):
273 273 self.path = path
274 274 self.oldpath = None
275 275 self.mode = None
276 276 self.op = 'MODIFY'
277 277 self.binary = False
278 278
279 279 def setmode(self, mode):
280 280 islink = mode & 020000
281 281 isexec = mode & 0100
282 282 self.mode = (islink, isexec)
283 283
284 284 def __repr__(self):
285 285 return "<patchmeta %s %r>" % (self.op, self.path)
286 286
287 287 def readgitpatch(lr):
288 288 """extract git-style metadata about patches from <patchname>"""
289 289
290 290 # Filter patch for git information
291 291 gp = None
292 292 gitpatches = []
293 293 for line in lr:
294 294 line = line.rstrip(' \r\n')
295 295 if line.startswith('diff --git'):
296 296 m = gitre.match(line)
297 297 if m:
298 298 if gp:
299 299 gitpatches.append(gp)
300 300 dst = m.group(2)
301 301 gp = patchmeta(dst)
302 302 elif gp:
303 303 if line.startswith('--- '):
304 304 gitpatches.append(gp)
305 305 gp = None
306 306 continue
307 307 if line.startswith('rename from '):
308 308 gp.op = 'RENAME'
309 309 gp.oldpath = line[12:]
310 310 elif line.startswith('rename to '):
311 311 gp.path = line[10:]
312 312 elif line.startswith('copy from '):
313 313 gp.op = 'COPY'
314 314 gp.oldpath = line[10:]
315 315 elif line.startswith('copy to '):
316 316 gp.path = line[8:]
317 317 elif line.startswith('deleted file'):
318 318 gp.op = 'DELETE'
319 319 elif line.startswith('new file mode '):
320 320 gp.op = 'ADD'
321 321 gp.setmode(int(line[-6:], 8))
322 322 elif line.startswith('new mode '):
323 323 gp.setmode(int(line[-6:], 8))
324 324 elif line.startswith('GIT binary patch'):
325 325 gp.binary = True
326 326 if gp:
327 327 gitpatches.append(gp)
328 328
329 329 return gitpatches
330 330
331 331 class linereader(object):
332 332 # simple class to allow pushing lines back into the input stream
333 333 def __init__(self, fp):
334 334 self.fp = fp
335 335 self.buf = []
336 336
337 337 def push(self, line):
338 338 if line is not None:
339 339 self.buf.append(line)
340 340
341 341 def readline(self):
342 342 if self.buf:
343 343 l = self.buf[0]
344 344 del self.buf[0]
345 345 return l
346 346 return self.fp.readline()
347 347
348 348 def __iter__(self):
349 349 while 1:
350 350 l = self.readline()
351 351 if not l:
352 352 break
353 353 yield l
354 354
355 355 class abstractbackend(object):
356 356 def __init__(self, ui):
357 357 self.ui = ui
358 358
359 359 def getfile(self, fname):
360 360 """Return target file data and flags as a (data, (islink,
361 361 isexec)) tuple.
362 362 """
363 363 raise NotImplementedError
364 364
365 365 def setfile(self, fname, data, mode):
366 366 """Write data to target file fname and set its mode. mode is a
367 367 (islink, isexec) tuple. If data is None, the file content should
368 368 be left unchanged.
369 369 """
370 370 raise NotImplementedError
371 371
372 372 def unlink(self, fname):
373 373 """Unlink target file."""
374 374 raise NotImplementedError
375 375
376 376 def writerej(self, fname, failed, total, lines):
377 377 """Write rejected lines for fname. total is the number of hunks
378 378 which failed to apply and total the total number of hunks for this
379 379 files.
380 380 """
381 381 pass
382 382
383 383 def copy(self, src, dst):
384 384 """Copy src file into dst file. Create intermediate directories if
385 385 necessary. Files are specified relatively to the patching base
386 386 directory.
387 387 """
388 388 raise NotImplementedError
389 389
390 390 def exists(self, fname):
391 391 raise NotImplementedError
392 392
393 393 class fsbackend(abstractbackend):
394 394 def __init__(self, ui, basedir):
395 395 super(fsbackend, self).__init__(ui)
396 396 self.opener = scmutil.opener(basedir)
397 397
398 398 def _join(self, f):
399 399 return os.path.join(self.opener.base, f)
400 400
401 401 def getfile(self, fname):
402 402 path = self._join(fname)
403 403 if os.path.islink(path):
404 404 return (os.readlink(path), (True, False))
405 405 isexec, islink = False, False
406 406 try:
407 407 isexec = os.lstat(path).st_mode & 0100 != 0
408 408 islink = os.path.islink(path)
409 409 except OSError, e:
410 410 if e.errno != errno.ENOENT:
411 411 raise
412 412 return (self.opener.read(fname), (islink, isexec))
413 413
414 414 def setfile(self, fname, data, mode):
415 415 islink, isexec = mode
416 416 if data is None:
417 417 util.setflags(self._join(fname), islink, isexec)
418 418 return
419 419 if islink:
420 420 self.opener.symlink(data, fname)
421 421 else:
422 422 self.opener.write(fname, data)
423 423 if isexec:
424 424 util.setflags(self._join(fname), False, True)
425 425
426 426 def unlink(self, fname):
427 427 try:
428 428 util.unlinkpath(self._join(fname))
429 429 except OSError, inst:
430 430 if inst.errno != errno.ENOENT:
431 431 raise
432 432
433 433 def writerej(self, fname, failed, total, lines):
434 434 fname = fname + ".rej"
435 435 self.ui.warn(
436 436 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
437 437 (failed, total, fname))
438 438 fp = self.opener(fname, 'w')
439 439 fp.writelines(lines)
440 440 fp.close()
441 441
442 442 def copy(self, src, dst):
443 443 basedir = self.opener.base
444 444 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
445 445 for x in [src, dst]]
446 446 if os.path.lexists(absdst):
447 447 raise util.Abort(_("cannot create %s: destination already exists")
448 448 % dst)
449 449 dstdir = os.path.dirname(absdst)
450 450 if dstdir and not os.path.isdir(dstdir):
451 451 try:
452 452 os.makedirs(dstdir)
453 453 except IOError:
454 454 raise util.Abort(
455 455 _("cannot create %s: unable to create destination directory")
456 456 % dst)
457 457 util.copyfile(abssrc, absdst)
458 458
459 459 def exists(self, fname):
460 460 return os.path.lexists(self._join(fname))
461 461
462 462 class workingbackend(fsbackend):
463 463 def __init__(self, ui, repo, similarity):
464 464 super(workingbackend, self).__init__(ui, repo.root)
465 465 self.repo = repo
466 466 self.similarity = similarity
467 467 self.removed = set()
468 468 self.changed = set()
469 469 self.copied = []
470 470
471 471 def setfile(self, fname, data, mode):
472 472 super(workingbackend, self).setfile(fname, data, mode)
473 473 self.changed.add(fname)
474 474
475 475 def unlink(self, fname):
476 476 super(workingbackend, self).unlink(fname)
477 477 self.removed.add(fname)
478 478 self.changed.add(fname)
479 479
480 480 def copy(self, src, dst):
481 481 super(workingbackend, self).copy(src, dst)
482 482 self.copied.append((src, dst))
483 483 self.changed.add(dst)
484 484
485 485 def close(self):
486 486 wctx = self.repo[None]
487 487 addremoved = set(self.changed)
488 488 for src, dst in self.copied:
489 489 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
490 490 addremoved.discard(src)
491 491 if (not self.similarity) and self.removed:
492 492 wctx.forget(sorted(self.removed))
493 493 if addremoved:
494 494 cwd = self.repo.getcwd()
495 495 if cwd:
496 496 addremoved = [util.pathto(self.repo.root, cwd, f)
497 497 for f in addremoved]
498 498 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
499 499 return sorted(self.changed)
500 500
501 501 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
502 502 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
503 503 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
504 504 eolmodes = ['strict', 'crlf', 'lf', 'auto']
505 505
506 506 class patchfile(object):
507 def __init__(self, ui, fname, backend, mode, missing=False,
507 def __init__(self, ui, fname, backend, mode, create, remove, missing=False,
508 508 eolmode='strict'):
509 509 self.fname = fname
510 510 self.eolmode = eolmode
511 511 self.eol = None
512 512 self.backend = backend
513 513 self.ui = ui
514 514 self.lines = []
515 515 self.exists = False
516 516 self.missing = missing
517 517 self.mode = mode
518 self.create = create
519 self.remove = remove
518 520 if not missing:
519 521 try:
520 522 data, mode = self.backend.getfile(fname)
521 523 if data:
522 524 self.lines = data.splitlines(True)
523 525 if self.mode is None:
524 526 self.mode = mode
525 527 if self.lines:
526 528 # Normalize line endings
527 529 if self.lines[0].endswith('\r\n'):
528 530 self.eol = '\r\n'
529 531 elif self.lines[0].endswith('\n'):
530 532 self.eol = '\n'
531 533 if eolmode != 'strict':
532 534 nlines = []
533 535 for l in self.lines:
534 536 if l.endswith('\r\n'):
535 537 l = l[:-2] + '\n'
536 538 nlines.append(l)
537 539 self.lines = nlines
538 540 self.exists = True
539 541 except IOError:
540 542 if self.mode is None:
541 543 self.mode = (False, False)
542 544 else:
543 545 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
544 546
545 547 self.hash = {}
546 548 self.dirty = 0
547 549 self.offset = 0
548 550 self.skew = 0
549 551 self.rej = []
550 552 self.fileprinted = False
551 553 self.printfile(False)
552 554 self.hunks = 0
553 555
554 556 def writelines(self, fname, lines, mode):
555 557 if self.eolmode == 'auto':
556 558 eol = self.eol
557 559 elif self.eolmode == 'crlf':
558 560 eol = '\r\n'
559 561 else:
560 562 eol = '\n'
561 563
562 564 if self.eolmode != 'strict' and eol and eol != '\n':
563 565 rawlines = []
564 566 for l in lines:
565 567 if l and l[-1] == '\n':
566 568 l = l[:-1] + eol
567 569 rawlines.append(l)
568 570 lines = rawlines
569 571
570 572 self.backend.setfile(fname, ''.join(lines), mode)
571 573
572 574 def printfile(self, warn):
573 575 if self.fileprinted:
574 576 return
575 577 if warn or self.ui.verbose:
576 578 self.fileprinted = True
577 579 s = _("patching file %s\n") % self.fname
578 580 if warn:
579 581 self.ui.warn(s)
580 582 else:
581 583 self.ui.note(s)
582 584
583 585
584 586 def findlines(self, l, linenum):
585 587 # looks through the hash and finds candidate lines. The
586 588 # result is a list of line numbers sorted based on distance
587 589 # from linenum
588 590
589 591 cand = self.hash.get(l, [])
590 592 if len(cand) > 1:
591 593 # resort our list of potentials forward then back.
592 594 cand.sort(key=lambda x: abs(x - linenum))
593 595 return cand
594 596
595 597 def write_rej(self):
596 598 # our rejects are a little different from patch(1). This always
597 599 # creates rejects in the same form as the original patch. A file
598 600 # header is inserted so that you can run the reject through patch again
599 601 # without having to type the filename.
600 602 if not self.rej:
601 603 return
602 604 base = os.path.basename(self.fname)
603 605 lines = ["--- %s\n+++ %s\n" % (base, base)]
604 606 for x in self.rej:
605 607 for l in x.hunk:
606 608 lines.append(l)
607 609 if l[-1] != '\n':
608 610 lines.append("\n\ No newline at end of file\n")
609 611 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
610 612
611 613 def apply(self, h):
612 614 if not h.complete():
613 615 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
614 616 (h.number, h.desc, len(h.a), h.lena, len(h.b),
615 617 h.lenb))
616 618
617 619 self.hunks += 1
618 620
619 621 if self.missing:
620 622 self.rej.append(h)
621 623 return -1
622 624
623 if self.exists and h.createfile():
625 if self.exists and self.create:
624 626 self.ui.warn(_("file %s already exists\n") % self.fname)
625 627 self.rej.append(h)
626 628 return -1
627 629
628 630 if isinstance(h, binhunk):
629 if h.rmfile():
631 if self.remove:
630 632 self.backend.unlink(self.fname)
631 633 else:
632 634 self.lines[:] = h.new()
633 635 self.offset += len(h.new())
634 636 self.dirty = True
635 637 return 0
636 638
637 639 horig = h
638 640 if (self.eolmode in ('crlf', 'lf')
639 641 or self.eolmode == 'auto' and self.eol):
640 642 # If new eols are going to be normalized, then normalize
641 643 # hunk data before patching. Otherwise, preserve input
642 644 # line-endings.
643 645 h = h.getnormalized()
644 646
645 647 # fast case first, no offsets, no fuzz
646 648 old = h.old()
647 649 # patch starts counting at 1 unless we are adding the file
648 650 if h.starta == 0:
649 651 start = 0
650 652 else:
651 653 start = h.starta + self.offset - 1
652 654 orig_start = start
653 655 # if there's skew we want to emit the "(offset %d lines)" even
654 656 # when the hunk cleanly applies at start + skew, so skip the
655 657 # fast case code
656 658 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
657 if h.rmfile():
659 if self.remove:
658 660 self.backend.unlink(self.fname)
659 661 else:
660 662 self.lines[start : start + h.lena] = h.new()
661 663 self.offset += h.lenb - h.lena
662 664 self.dirty = True
663 665 return 0
664 666
665 667 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
666 668 self.hash = {}
667 669 for x, s in enumerate(self.lines):
668 670 self.hash.setdefault(s, []).append(x)
669 671 if h.hunk[-1][0] != ' ':
670 672 # if the hunk tried to put something at the bottom of the file
671 673 # override the start line and use eof here
672 674 search_start = len(self.lines)
673 675 else:
674 676 search_start = orig_start + self.skew
675 677
676 678 for fuzzlen in xrange(3):
677 679 for toponly in [True, False]:
678 680 old = h.old(fuzzlen, toponly)
679 681
680 682 cand = self.findlines(old[0][1:], search_start)
681 683 for l in cand:
682 684 if diffhelpers.testhunk(old, self.lines, l) == 0:
683 685 newlines = h.new(fuzzlen, toponly)
684 686 self.lines[l : l + len(old)] = newlines
685 687 self.offset += len(newlines) - len(old)
686 688 self.skew = l - orig_start
687 689 self.dirty = True
688 690 offset = l - orig_start - fuzzlen
689 691 if fuzzlen:
690 692 msg = _("Hunk #%d succeeded at %d "
691 693 "with fuzz %d "
692 694 "(offset %d lines).\n")
693 695 self.printfile(True)
694 696 self.ui.warn(msg %
695 697 (h.number, l + 1, fuzzlen, offset))
696 698 else:
697 699 msg = _("Hunk #%d succeeded at %d "
698 700 "(offset %d lines).\n")
699 701 self.ui.note(msg % (h.number, l + 1, offset))
700 702 return fuzzlen
701 703 self.printfile(True)
702 704 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
703 705 self.rej.append(horig)
704 706 return -1
705 707
706 708 def close(self):
707 709 if self.dirty:
708 710 self.writelines(self.fname, self.lines, self.mode)
709 711 self.write_rej()
710 712 return len(self.rej)
711 713
712 714 class hunk(object):
713 def __init__(self, desc, num, lr, context, create=False, remove=False):
715 def __init__(self, desc, num, lr, context):
714 716 self.number = num
715 717 self.desc = desc
716 718 self.hunk = [desc]
717 719 self.a = []
718 720 self.b = []
719 721 self.starta = self.lena = None
720 722 self.startb = self.lenb = None
721 723 if lr is not None:
722 724 if context:
723 725 self.read_context_hunk(lr)
724 726 else:
725 727 self.read_unified_hunk(lr)
726 self.create = create
727 self.remove = remove and not create
728 728
729 729 def getnormalized(self):
730 730 """Return a copy with line endings normalized to LF."""
731 731
732 732 def normalize(lines):
733 733 nlines = []
734 734 for line in lines:
735 735 if line.endswith('\r\n'):
736 736 line = line[:-2] + '\n'
737 737 nlines.append(line)
738 738 return nlines
739 739
740 740 # Dummy object, it is rebuilt manually
741 nh = hunk(self.desc, self.number, None, None, False, False)
741 nh = hunk(self.desc, self.number, None, None)
742 742 nh.number = self.number
743 743 nh.desc = self.desc
744 744 nh.hunk = self.hunk
745 745 nh.a = normalize(self.a)
746 746 nh.b = normalize(self.b)
747 747 nh.starta = self.starta
748 748 nh.startb = self.startb
749 749 nh.lena = self.lena
750 750 nh.lenb = self.lenb
751 nh.create = self.create
752 nh.remove = self.remove
753 751 return nh
754 752
755 753 def read_unified_hunk(self, lr):
756 754 m = unidesc.match(self.desc)
757 755 if not m:
758 756 raise PatchError(_("bad hunk #%d") % self.number)
759 757 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
760 758 if self.lena is None:
761 759 self.lena = 1
762 760 else:
763 761 self.lena = int(self.lena)
764 762 if self.lenb is None:
765 763 self.lenb = 1
766 764 else:
767 765 self.lenb = int(self.lenb)
768 766 self.starta = int(self.starta)
769 767 self.startb = int(self.startb)
770 768 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
771 769 # if we hit eof before finishing out the hunk, the last line will
772 770 # be zero length. Lets try to fix it up.
773 771 while len(self.hunk[-1]) == 0:
774 772 del self.hunk[-1]
775 773 del self.a[-1]
776 774 del self.b[-1]
777 775 self.lena -= 1
778 776 self.lenb -= 1
779 777 self._fixnewline(lr)
780 778
781 779 def read_context_hunk(self, lr):
782 780 self.desc = lr.readline()
783 781 m = contextdesc.match(self.desc)
784 782 if not m:
785 783 raise PatchError(_("bad hunk #%d") % self.number)
786 784 foo, self.starta, foo2, aend, foo3 = m.groups()
787 785 self.starta = int(self.starta)
788 786 if aend is None:
789 787 aend = self.starta
790 788 self.lena = int(aend) - self.starta
791 789 if self.starta:
792 790 self.lena += 1
793 791 for x in xrange(self.lena):
794 792 l = lr.readline()
795 793 if l.startswith('---'):
796 794 # lines addition, old block is empty
797 795 lr.push(l)
798 796 break
799 797 s = l[2:]
800 798 if l.startswith('- ') or l.startswith('! '):
801 799 u = '-' + s
802 800 elif l.startswith(' '):
803 801 u = ' ' + s
804 802 else:
805 803 raise PatchError(_("bad hunk #%d old text line %d") %
806 804 (self.number, x))
807 805 self.a.append(u)
808 806 self.hunk.append(u)
809 807
810 808 l = lr.readline()
811 809 if l.startswith('\ '):
812 810 s = self.a[-1][:-1]
813 811 self.a[-1] = s
814 812 self.hunk[-1] = s
815 813 l = lr.readline()
816 814 m = contextdesc.match(l)
817 815 if not m:
818 816 raise PatchError(_("bad hunk #%d") % self.number)
819 817 foo, self.startb, foo2, bend, foo3 = m.groups()
820 818 self.startb = int(self.startb)
821 819 if bend is None:
822 820 bend = self.startb
823 821 self.lenb = int(bend) - self.startb
824 822 if self.startb:
825 823 self.lenb += 1
826 824 hunki = 1
827 825 for x in xrange(self.lenb):
828 826 l = lr.readline()
829 827 if l.startswith('\ '):
830 828 # XXX: the only way to hit this is with an invalid line range.
831 829 # The no-eol marker is not counted in the line range, but I
832 830 # guess there are diff(1) out there which behave differently.
833 831 s = self.b[-1][:-1]
834 832 self.b[-1] = s
835 833 self.hunk[hunki - 1] = s
836 834 continue
837 835 if not l:
838 836 # line deletions, new block is empty and we hit EOF
839 837 lr.push(l)
840 838 break
841 839 s = l[2:]
842 840 if l.startswith('+ ') or l.startswith('! '):
843 841 u = '+' + s
844 842 elif l.startswith(' '):
845 843 u = ' ' + s
846 844 elif len(self.b) == 0:
847 845 # line deletions, new block is empty
848 846 lr.push(l)
849 847 break
850 848 else:
851 849 raise PatchError(_("bad hunk #%d old text line %d") %
852 850 (self.number, x))
853 851 self.b.append(s)
854 852 while True:
855 853 if hunki >= len(self.hunk):
856 854 h = ""
857 855 else:
858 856 h = self.hunk[hunki]
859 857 hunki += 1
860 858 if h == u:
861 859 break
862 860 elif h.startswith('-'):
863 861 continue
864 862 else:
865 863 self.hunk.insert(hunki - 1, u)
866 864 break
867 865
868 866 if not self.a:
869 867 # this happens when lines were only added to the hunk
870 868 for x in self.hunk:
871 869 if x.startswith('-') or x.startswith(' '):
872 870 self.a.append(x)
873 871 if not self.b:
874 872 # this happens when lines were only deleted from the hunk
875 873 for x in self.hunk:
876 874 if x.startswith('+') or x.startswith(' '):
877 875 self.b.append(x[1:])
878 876 # @@ -start,len +start,len @@
879 877 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
880 878 self.startb, self.lenb)
881 879 self.hunk[0] = self.desc
882 880 self._fixnewline(lr)
883 881
884 882 def _fixnewline(self, lr):
885 883 l = lr.readline()
886 884 if l.startswith('\ '):
887 885 diffhelpers.fix_newline(self.hunk, self.a, self.b)
888 886 else:
889 887 lr.push(l)
890 888
891 889 def complete(self):
892 890 return len(self.a) == self.lena and len(self.b) == self.lenb
893 891
894 def createfile(self):
895 return self.starta == 0 and self.lena == 0 and self.create
896
897 def rmfile(self):
898 return self.startb == 0 and self.lenb == 0 and self.remove
899
900 892 def fuzzit(self, l, fuzz, toponly):
901 893 # this removes context lines from the top and bottom of list 'l'. It
902 894 # checks the hunk to make sure only context lines are removed, and then
903 895 # returns a new shortened list of lines.
904 896 fuzz = min(fuzz, len(l)-1)
905 897 if fuzz:
906 898 top = 0
907 899 bot = 0
908 900 hlen = len(self.hunk)
909 901 for x in xrange(hlen - 1):
910 902 # the hunk starts with the @@ line, so use x+1
911 903 if self.hunk[x + 1][0] == ' ':
912 904 top += 1
913 905 else:
914 906 break
915 907 if not toponly:
916 908 for x in xrange(hlen - 1):
917 909 if self.hunk[hlen - bot - 1][0] == ' ':
918 910 bot += 1
919 911 else:
920 912 break
921 913
922 914 # top and bot now count context in the hunk
923 915 # adjust them if either one is short
924 916 context = max(top, bot, 3)
925 917 if bot < context:
926 918 bot = max(0, fuzz - (context - bot))
927 919 else:
928 920 bot = min(fuzz, bot)
929 921 if top < context:
930 922 top = max(0, fuzz - (context - top))
931 923 else:
932 924 top = min(fuzz, top)
933 925
934 926 return l[top:len(l)-bot]
935 927 return l
936 928
937 929 def old(self, fuzz=0, toponly=False):
938 930 return self.fuzzit(self.a, fuzz, toponly)
939 931
940 932 def new(self, fuzz=0, toponly=False):
941 933 return self.fuzzit(self.b, fuzz, toponly)
942 934
943 935 class binhunk:
944 936 'A binary patch file. Only understands literals so far.'
945 def __init__(self, gitpatch, lr):
946 self.gitpatch = gitpatch
937 def __init__(self, lr):
947 938 self.text = None
948 939 self.hunk = ['GIT binary patch\n']
949 940 self._read(lr)
950 941
951 def createfile(self):
952 return self.gitpatch.op == 'ADD'
953
954 def rmfile(self):
955 return self.gitpatch.op == 'DELETE'
956
957 942 def complete(self):
958 943 return self.text is not None
959 944
960 945 def new(self):
961 946 return [self.text]
962 947
963 948 def _read(self, lr):
964 949 line = lr.readline()
965 950 self.hunk.append(line)
966 951 while line and not line.startswith('literal '):
967 952 line = lr.readline()
968 953 self.hunk.append(line)
969 954 if not line:
970 955 raise PatchError(_('could not extract binary patch'))
971 956 size = int(line[8:].rstrip())
972 957 dec = []
973 958 line = lr.readline()
974 959 self.hunk.append(line)
975 960 while len(line) > 1:
976 961 l = line[0]
977 962 if l <= 'Z' and l >= 'A':
978 963 l = ord(l) - ord('A') + 1
979 964 else:
980 965 l = ord(l) - ord('a') + 27
981 966 dec.append(base85.b85decode(line[1:-1])[:l])
982 967 line = lr.readline()
983 968 self.hunk.append(line)
984 969 text = zlib.decompress(''.join(dec))
985 970 if len(text) != size:
986 971 raise PatchError(_('binary patch is %d bytes, not %d') %
987 972 len(text), size)
988 973 self.text = text
989 974
990 975 def parsefilename(str):
991 976 # --- filename \t|space stuff
992 977 s = str[4:].rstrip('\r\n')
993 978 i = s.find('\t')
994 979 if i < 0:
995 980 i = s.find(' ')
996 981 if i < 0:
997 982 return s
998 983 return s[:i]
999 984
1000 985 def pathstrip(path, strip):
1001 986 pathlen = len(path)
1002 987 i = 0
1003 988 if strip == 0:
1004 989 return '', path.rstrip()
1005 990 count = strip
1006 991 while count > 0:
1007 992 i = path.find('/', i)
1008 993 if i == -1:
1009 994 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1010 995 (count, strip, path))
1011 996 i += 1
1012 997 # consume '//' in the path
1013 998 while i < pathlen - 1 and path[i] == '/':
1014 999 i += 1
1015 1000 count -= 1
1016 1001 return path[:i].lstrip(), path[i:].rstrip()
1017 1002
1018 1003 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1019 1004 if gp:
1020 1005 # Git patches do not play games. Excluding copies from the
1021 1006 # following heuristic avoids a lot of confusion
1022 1007 fname = pathstrip(gp.path, strip - 1)[1]
1023 missing = not hunk.createfile() and not backend.exists(fname)
1024 return fname, missing
1008 create = gp.op == 'ADD'
1009 remove = gp.op == 'DELETE'
1010 missing = not create and not backend.exists(fname)
1011 return fname, missing, create, remove
1025 1012 nulla = afile_orig == "/dev/null"
1026 1013 nullb = bfile_orig == "/dev/null"
1014 create = nulla and hunk.starta == 0 and hunk.lena == 0
1015 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1027 1016 abase, afile = pathstrip(afile_orig, strip)
1028 1017 gooda = not nulla and backend.exists(afile)
1029 1018 bbase, bfile = pathstrip(bfile_orig, strip)
1030 1019 if afile == bfile:
1031 1020 goodb = gooda
1032 1021 else:
1033 1022 goodb = not nullb and backend.exists(bfile)
1034 createfunc = hunk.createfile
1035 missing = not goodb and not gooda and not createfunc()
1023 missing = not goodb and not gooda and not create
1036 1024
1037 1025 # some diff programs apparently produce patches where the afile is
1038 1026 # not /dev/null, but afile starts with bfile
1039 1027 abasedir = afile[:afile.rfind('/') + 1]
1040 1028 bbasedir = bfile[:bfile.rfind('/') + 1]
1041 if missing and abasedir == bbasedir and afile.startswith(bfile):
1042 # this isn't very pretty
1043 hunk.create = True
1044 if createfunc():
1045 missing = False
1046 else:
1047 hunk.create = False
1029 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1030 and hunk.starta == 0 and hunk.lena == 0):
1031 create = True
1032 missing = False
1048 1033
1049 1034 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1050 1035 # diff is between a file and its backup. In this case, the original
1051 1036 # file should be patched (see original mpatch code).
1052 1037 isbackup = (abase == bbase and bfile.startswith(afile))
1053 1038 fname = None
1054 1039 if not missing:
1055 1040 if gooda and goodb:
1056 1041 fname = isbackup and afile or bfile
1057 1042 elif gooda:
1058 1043 fname = afile
1059 1044
1060 1045 if not fname:
1061 1046 if not nullb:
1062 1047 fname = isbackup and afile or bfile
1063 1048 elif not nulla:
1064 1049 fname = afile
1065 1050 else:
1066 1051 raise PatchError(_("undefined source and destination files"))
1067 1052
1068 return fname, missing
1053 return fname, missing, create, remove
1069 1054
1070 1055 def scangitpatch(lr, firstline):
1071 1056 """
1072 1057 Git patches can emit:
1073 1058 - rename a to b
1074 1059 - change b
1075 1060 - copy a to c
1076 1061 - change c
1077 1062
1078 1063 We cannot apply this sequence as-is, the renamed 'a' could not be
1079 1064 found for it would have been renamed already. And we cannot copy
1080 1065 from 'b' instead because 'b' would have been changed already. So
1081 1066 we scan the git patch for copy and rename commands so we can
1082 1067 perform the copies ahead of time.
1083 1068 """
1084 1069 pos = 0
1085 1070 try:
1086 1071 pos = lr.fp.tell()
1087 1072 fp = lr.fp
1088 1073 except IOError:
1089 1074 fp = cStringIO.StringIO(lr.fp.read())
1090 1075 gitlr = linereader(fp)
1091 1076 gitlr.push(firstline)
1092 1077 gitpatches = readgitpatch(gitlr)
1093 1078 fp.seek(pos)
1094 1079 return gitpatches
1095 1080
1096 1081 def iterhunks(fp):
1097 1082 """Read a patch and yield the following events:
1098 1083 - ("file", afile, bfile, firsthunk): select a new target file.
1099 1084 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1100 1085 "file" event.
1101 1086 - ("git", gitchanges): current diff is in git format, gitchanges
1102 1087 maps filenames to gitpatch records. Unique event.
1103 1088 """
1104 1089 afile = ""
1105 1090 bfile = ""
1106 1091 state = None
1107 1092 hunknum = 0
1108 1093 emitfile = newfile = False
1109 1094 gitpatches = None
1110 1095
1111 1096 # our states
1112 1097 BFILE = 1
1113 1098 context = None
1114 1099 lr = linereader(fp)
1115 1100
1116 1101 while True:
1117 1102 x = lr.readline()
1118 1103 if not x:
1119 1104 break
1120 1105 if state == BFILE and (
1121 1106 (not context and x[0] == '@')
1122 1107 or (context is not False and x.startswith('***************'))
1123 1108 or x.startswith('GIT binary patch')):
1124 1109 gp = None
1125 1110 if gitpatches and gitpatches[-1][0] == bfile:
1126 1111 gp = gitpatches.pop()[1]
1127 1112 if x.startswith('GIT binary patch'):
1128 h = binhunk(gp, lr)
1113 h = binhunk(lr)
1129 1114 else:
1130 1115 if context is None and x.startswith('***************'):
1131 1116 context = True
1132 create = afile == '/dev/null' or gp and gp.op == 'ADD'
1133 remove = bfile == '/dev/null' or gp and gp.op == 'DELETE'
1134 h = hunk(x, hunknum + 1, lr, context, create, remove)
1117 h = hunk(x, hunknum + 1, lr, context)
1135 1118 hunknum += 1
1136 1119 if emitfile:
1137 1120 emitfile = False
1138 1121 yield 'file', (afile, bfile, h, gp)
1139 1122 yield 'hunk', h
1140 1123 elif x.startswith('diff --git'):
1141 1124 m = gitre.match(x)
1142 1125 if not m:
1143 1126 continue
1144 1127 if gitpatches is None:
1145 1128 # scan whole input for git metadata
1146 1129 gitpatches = [('b/' + gp.path, gp) for gp
1147 1130 in scangitpatch(lr, x)]
1148 1131 yield 'git', [g[1] for g in gitpatches
1149 1132 if g[1].op in ('COPY', 'RENAME')]
1150 1133 gitpatches.reverse()
1151 1134 afile = 'a/' + m.group(1)
1152 1135 bfile = 'b/' + m.group(2)
1153 1136 while bfile != gitpatches[-1][0]:
1154 1137 gp = gitpatches.pop()[1]
1155 1138 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1156 1139 gp = gitpatches[-1][1]
1157 1140 # copy/rename + modify should modify target, not source
1158 1141 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1159 1142 afile = bfile
1160 1143 newfile = True
1161 1144 elif x.startswith('---'):
1162 1145 # check for a unified diff
1163 1146 l2 = lr.readline()
1164 1147 if not l2.startswith('+++'):
1165 1148 lr.push(l2)
1166 1149 continue
1167 1150 newfile = True
1168 1151 context = False
1169 1152 afile = parsefilename(x)
1170 1153 bfile = parsefilename(l2)
1171 1154 elif x.startswith('***'):
1172 1155 # check for a context diff
1173 1156 l2 = lr.readline()
1174 1157 if not l2.startswith('---'):
1175 1158 lr.push(l2)
1176 1159 continue
1177 1160 l3 = lr.readline()
1178 1161 lr.push(l3)
1179 1162 if not l3.startswith("***************"):
1180 1163 lr.push(l2)
1181 1164 continue
1182 1165 newfile = True
1183 1166 context = True
1184 1167 afile = parsefilename(x)
1185 1168 bfile = parsefilename(l2)
1186 1169
1187 1170 if newfile:
1188 1171 newfile = False
1189 1172 emitfile = True
1190 1173 state = BFILE
1191 1174 hunknum = 0
1192 1175
1193 1176 while gitpatches:
1194 1177 gp = gitpatches.pop()[1]
1195 1178 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1196 1179
1197 1180 def applydiff(ui, fp, changed, backend, strip=1, eolmode='strict'):
1198 1181 """Reads a patch from fp and tries to apply it.
1199 1182
1200 1183 The dict 'changed' is filled in with all of the filenames changed
1201 1184 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1202 1185 found and 1 if there was any fuzz.
1203 1186
1204 1187 If 'eolmode' is 'strict', the patch content and patched file are
1205 1188 read in binary mode. Otherwise, line endings are ignored when
1206 1189 patching then normalized according to 'eolmode'.
1207 1190 """
1208 1191 return _applydiff(ui, fp, patchfile, backend, changed, strip=strip,
1209 1192 eolmode=eolmode)
1210 1193
1211 1194 def _applydiff(ui, fp, patcher, backend, changed, strip=1, eolmode='strict'):
1212 1195
1213 1196 def pstrip(p):
1214 1197 return pathstrip(p, strip - 1)[1]
1215 1198
1216 1199 rejects = 0
1217 1200 err = 0
1218 1201 current_file = None
1219 1202
1220 1203 for state, values in iterhunks(fp):
1221 1204 if state == 'hunk':
1222 1205 if not current_file:
1223 1206 continue
1224 1207 ret = current_file.apply(values)
1225 1208 if ret >= 0:
1226 1209 changed.setdefault(current_file.fname, None)
1227 1210 if ret > 0:
1228 1211 err = 1
1229 1212 elif state == 'file':
1230 1213 if current_file:
1231 1214 rejects += current_file.close()
1232 1215 current_file = None
1233 1216 afile, bfile, first_hunk, gp = values
1234 1217 if gp:
1235 1218 path = pstrip(gp.path)
1236 1219 changed[path] = gp
1237 1220 if gp.op == 'DELETE':
1238 1221 backend.unlink(path)
1239 1222 continue
1240 1223 if gp.op == 'RENAME':
1241 1224 backend.unlink(pstrip(gp.oldpath))
1242 1225 if gp.mode and not first_hunk:
1243 1226 data = None
1244 1227 if gp.op == 'ADD':
1245 1228 # Added files without content have no hunk and
1246 1229 # must be created
1247 1230 data = ''
1248 1231 backend.setfile(path, data, gp.mode)
1249 1232 if not first_hunk:
1250 1233 continue
1251 1234 try:
1252 1235 mode = gp and gp.mode or None
1253 current_file, missing = selectfile(backend, afile, bfile,
1254 first_hunk, strip, gp)
1236 current_file, missing, create, remove = selectfile(
1237 backend, afile, bfile, first_hunk, strip, gp)
1255 1238 current_file = patcher(ui, current_file, backend, mode,
1256 missing=missing, eolmode=eolmode)
1239 create, remove, missing=missing,
1240 eolmode=eolmode)
1257 1241 except PatchError, inst:
1258 1242 ui.warn(str(inst) + '\n')
1259 1243 current_file = None
1260 1244 rejects += 1
1261 1245 continue
1262 1246 elif state == 'git':
1263 1247 for gp in values:
1264 1248 backend.copy(pstrip(gp.oldpath), pstrip(gp.path))
1265 1249 else:
1266 1250 raise util.Abort(_('unsupported parser state: %s') % state)
1267 1251
1268 1252 if current_file:
1269 1253 rejects += current_file.close()
1270 1254
1271 1255 if rejects:
1272 1256 return -1
1273 1257 return err
1274 1258
1275 1259 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1276 1260 similarity):
1277 1261 """use <patcher> to apply <patchname> to the working directory.
1278 1262 returns whether patch was applied with fuzz factor."""
1279 1263
1280 1264 fuzz = False
1281 1265 args = []
1282 1266 cwd = repo.root
1283 1267 if cwd:
1284 1268 args.append('-d %s' % util.shellquote(cwd))
1285 1269 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1286 1270 util.shellquote(patchname)))
1287 1271 try:
1288 1272 for line in fp:
1289 1273 line = line.rstrip()
1290 1274 ui.note(line + '\n')
1291 1275 if line.startswith('patching file '):
1292 1276 pf = util.parsepatchoutput(line)
1293 1277 printed_file = False
1294 1278 files.setdefault(pf, None)
1295 1279 elif line.find('with fuzz') >= 0:
1296 1280 fuzz = True
1297 1281 if not printed_file:
1298 1282 ui.warn(pf + '\n')
1299 1283 printed_file = True
1300 1284 ui.warn(line + '\n')
1301 1285 elif line.find('saving rejects to file') >= 0:
1302 1286 ui.warn(line + '\n')
1303 1287 elif line.find('FAILED') >= 0:
1304 1288 if not printed_file:
1305 1289 ui.warn(pf + '\n')
1306 1290 printed_file = True
1307 1291 ui.warn(line + '\n')
1308 1292 finally:
1309 1293 if files:
1310 1294 cfiles = list(files)
1311 1295 cwd = repo.getcwd()
1312 1296 if cwd:
1313 1297 cfiles = [util.pathto(repo.root, cwd, f)
1314 1298 for f in cfile]
1315 1299 scmutil.addremove(repo, cfiles, similarity=similarity)
1316 1300 code = fp.close()
1317 1301 if code:
1318 1302 raise PatchError(_("patch command failed: %s") %
1319 1303 util.explainexit(code)[0])
1320 1304 return fuzz
1321 1305
1322 1306 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1323 1307 similarity=0):
1324 1308 """use builtin patch to apply <patchobj> to the working directory.
1325 1309 returns whether patch was applied with fuzz factor."""
1326 1310
1327 1311 if files is None:
1328 1312 files = {}
1329 1313 if eolmode is None:
1330 1314 eolmode = ui.config('patch', 'eol', 'strict')
1331 1315 if eolmode.lower() not in eolmodes:
1332 1316 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1333 1317 eolmode = eolmode.lower()
1334 1318
1335 1319 backend = workingbackend(ui, repo, similarity)
1336 1320 try:
1337 1321 fp = open(patchobj, 'rb')
1338 1322 except TypeError:
1339 1323 fp = patchobj
1340 1324 try:
1341 1325 ret = applydiff(ui, fp, files, backend, strip=strip, eolmode=eolmode)
1342 1326 finally:
1343 1327 if fp != patchobj:
1344 1328 fp.close()
1345 1329 files.update(dict.fromkeys(backend.close()))
1346 1330 if ret < 0:
1347 1331 raise PatchError(_('patch failed to apply'))
1348 1332 return ret > 0
1349 1333
1350 1334 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1351 1335 similarity=0):
1352 1336 """Apply <patchname> to the working directory.
1353 1337
1354 1338 'eolmode' specifies how end of lines should be handled. It can be:
1355 1339 - 'strict': inputs are read in binary mode, EOLs are preserved
1356 1340 - 'crlf': EOLs are ignored when patching and reset to CRLF
1357 1341 - 'lf': EOLs are ignored when patching and reset to LF
1358 1342 - None: get it from user settings, default to 'strict'
1359 1343 'eolmode' is ignored when using an external patcher program.
1360 1344
1361 1345 Returns whether patch was applied with fuzz factor.
1362 1346 """
1363 1347 patcher = ui.config('ui', 'patch')
1364 1348 if files is None:
1365 1349 files = {}
1366 1350 try:
1367 1351 if patcher:
1368 1352 return _externalpatch(ui, repo, patcher, patchname, strip,
1369 1353 files, similarity)
1370 1354 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1371 1355 similarity)
1372 1356 except PatchError, err:
1373 1357 raise util.Abort(str(err))
1374 1358
1375 1359 def changedfiles(ui, repo, patchpath, strip=1):
1376 1360 backend = fsbackend(ui, repo.root)
1377 1361 fp = open(patchpath, 'rb')
1378 1362 try:
1379 1363 changed = set()
1380 1364 for state, values in iterhunks(fp):
1381 1365 if state == 'file':
1382 1366 afile, bfile, first_hunk, gp = values
1383 1367 if gp:
1384 1368 changed.add(pathstrip(gp.path, strip - 1)[1])
1385 1369 if gp.op == 'RENAME':
1386 1370 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1387 1371 if not first_hunk:
1388 1372 continue
1389 current_file, missing = selectfile(backend, afile, bfile,
1390 first_hunk, strip, gp)
1373 current_file, missing, create, remove = selectfile(
1374 backend, afile, bfile, first_hunk, strip, gp)
1391 1375 changed.add(current_file)
1392 1376 elif state not in ('hunk', 'git'):
1393 1377 raise util.Abort(_('unsupported parser state: %s') % state)
1394 1378 return changed
1395 1379 finally:
1396 1380 fp.close()
1397 1381
1398 1382 def b85diff(to, tn):
1399 1383 '''print base85-encoded binary diff'''
1400 1384 def gitindex(text):
1401 1385 if not text:
1402 1386 return hex(nullid)
1403 1387 l = len(text)
1404 1388 s = util.sha1('blob %d\0' % l)
1405 1389 s.update(text)
1406 1390 return s.hexdigest()
1407 1391
1408 1392 def fmtline(line):
1409 1393 l = len(line)
1410 1394 if l <= 26:
1411 1395 l = chr(ord('A') + l - 1)
1412 1396 else:
1413 1397 l = chr(l - 26 + ord('a') - 1)
1414 1398 return '%c%s\n' % (l, base85.b85encode(line, True))
1415 1399
1416 1400 def chunk(text, csize=52):
1417 1401 l = len(text)
1418 1402 i = 0
1419 1403 while i < l:
1420 1404 yield text[i:i + csize]
1421 1405 i += csize
1422 1406
1423 1407 tohash = gitindex(to)
1424 1408 tnhash = gitindex(tn)
1425 1409 if tohash == tnhash:
1426 1410 return ""
1427 1411
1428 1412 # TODO: deltas
1429 1413 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1430 1414 (tohash, tnhash, len(tn))]
1431 1415 for l in chunk(zlib.compress(tn)):
1432 1416 ret.append(fmtline(l))
1433 1417 ret.append('\n')
1434 1418 return ''.join(ret)
1435 1419
1436 1420 class GitDiffRequired(Exception):
1437 1421 pass
1438 1422
1439 1423 def diffopts(ui, opts=None, untrusted=False):
1440 1424 def get(key, name=None, getter=ui.configbool):
1441 1425 return ((opts and opts.get(key)) or
1442 1426 getter('diff', name or key, None, untrusted=untrusted))
1443 1427 return mdiff.diffopts(
1444 1428 text=opts and opts.get('text'),
1445 1429 git=get('git'),
1446 1430 nodates=get('nodates'),
1447 1431 showfunc=get('show_function', 'showfunc'),
1448 1432 ignorews=get('ignore_all_space', 'ignorews'),
1449 1433 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1450 1434 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1451 1435 context=get('unified', getter=ui.config))
1452 1436
1453 1437 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1454 1438 losedatafn=None, prefix=''):
1455 1439 '''yields diff of changes to files between two nodes, or node and
1456 1440 working directory.
1457 1441
1458 1442 if node1 is None, use first dirstate parent instead.
1459 1443 if node2 is None, compare node1 with working directory.
1460 1444
1461 1445 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1462 1446 every time some change cannot be represented with the current
1463 1447 patch format. Return False to upgrade to git patch format, True to
1464 1448 accept the loss or raise an exception to abort the diff. It is
1465 1449 called with the name of current file being diffed as 'fn'. If set
1466 1450 to None, patches will always be upgraded to git format when
1467 1451 necessary.
1468 1452
1469 1453 prefix is a filename prefix that is prepended to all filenames on
1470 1454 display (used for subrepos).
1471 1455 '''
1472 1456
1473 1457 if opts is None:
1474 1458 opts = mdiff.defaultopts
1475 1459
1476 1460 if not node1 and not node2:
1477 1461 node1 = repo.dirstate.p1()
1478 1462
1479 1463 def lrugetfilectx():
1480 1464 cache = {}
1481 1465 order = []
1482 1466 def getfilectx(f, ctx):
1483 1467 fctx = ctx.filectx(f, filelog=cache.get(f))
1484 1468 if f not in cache:
1485 1469 if len(cache) > 20:
1486 1470 del cache[order.pop(0)]
1487 1471 cache[f] = fctx.filelog()
1488 1472 else:
1489 1473 order.remove(f)
1490 1474 order.append(f)
1491 1475 return fctx
1492 1476 return getfilectx
1493 1477 getfilectx = lrugetfilectx()
1494 1478
1495 1479 ctx1 = repo[node1]
1496 1480 ctx2 = repo[node2]
1497 1481
1498 1482 if not changes:
1499 1483 changes = repo.status(ctx1, ctx2, match=match)
1500 1484 modified, added, removed = changes[:3]
1501 1485
1502 1486 if not modified and not added and not removed:
1503 1487 return []
1504 1488
1505 1489 revs = None
1506 1490 if not repo.ui.quiet:
1507 1491 hexfunc = repo.ui.debugflag and hex or short
1508 1492 revs = [hexfunc(node) for node in [node1, node2] if node]
1509 1493
1510 1494 copy = {}
1511 1495 if opts.git or opts.upgrade:
1512 1496 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1513 1497
1514 1498 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1515 1499 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1516 1500 if opts.upgrade and not opts.git:
1517 1501 try:
1518 1502 def losedata(fn):
1519 1503 if not losedatafn or not losedatafn(fn=fn):
1520 1504 raise GitDiffRequired()
1521 1505 # Buffer the whole output until we are sure it can be generated
1522 1506 return list(difffn(opts.copy(git=False), losedata))
1523 1507 except GitDiffRequired:
1524 1508 return difffn(opts.copy(git=True), None)
1525 1509 else:
1526 1510 return difffn(opts, None)
1527 1511
1528 1512 def difflabel(func, *args, **kw):
1529 1513 '''yields 2-tuples of (output, label) based on the output of func()'''
1530 1514 prefixes = [('diff', 'diff.diffline'),
1531 1515 ('copy', 'diff.extended'),
1532 1516 ('rename', 'diff.extended'),
1533 1517 ('old', 'diff.extended'),
1534 1518 ('new', 'diff.extended'),
1535 1519 ('deleted', 'diff.extended'),
1536 1520 ('---', 'diff.file_a'),
1537 1521 ('+++', 'diff.file_b'),
1538 1522 ('@@', 'diff.hunk'),
1539 1523 ('-', 'diff.deleted'),
1540 1524 ('+', 'diff.inserted')]
1541 1525
1542 1526 for chunk in func(*args, **kw):
1543 1527 lines = chunk.split('\n')
1544 1528 for i, line in enumerate(lines):
1545 1529 if i != 0:
1546 1530 yield ('\n', '')
1547 1531 stripline = line
1548 1532 if line and line[0] in '+-':
1549 1533 # highlight trailing whitespace, but only in changed lines
1550 1534 stripline = line.rstrip()
1551 1535 for prefix, label in prefixes:
1552 1536 if stripline.startswith(prefix):
1553 1537 yield (stripline, label)
1554 1538 break
1555 1539 else:
1556 1540 yield (line, '')
1557 1541 if line != stripline:
1558 1542 yield (line[len(stripline):], 'diff.trailingwhitespace')
1559 1543
1560 1544 def diffui(*args, **kw):
1561 1545 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1562 1546 return difflabel(diff, *args, **kw)
1563 1547
1564 1548
1565 1549 def _addmodehdr(header, omode, nmode):
1566 1550 if omode != nmode:
1567 1551 header.append('old mode %s\n' % omode)
1568 1552 header.append('new mode %s\n' % nmode)
1569 1553
1570 1554 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1571 1555 copy, getfilectx, opts, losedatafn, prefix):
1572 1556
1573 1557 def join(f):
1574 1558 return os.path.join(prefix, f)
1575 1559
1576 1560 date1 = util.datestr(ctx1.date())
1577 1561 man1 = ctx1.manifest()
1578 1562
1579 1563 gone = set()
1580 1564 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1581 1565
1582 1566 copyto = dict([(v, k) for k, v in copy.items()])
1583 1567
1584 1568 if opts.git:
1585 1569 revs = None
1586 1570
1587 1571 for f in sorted(modified + added + removed):
1588 1572 to = None
1589 1573 tn = None
1590 1574 dodiff = True
1591 1575 header = []
1592 1576 if f in man1:
1593 1577 to = getfilectx(f, ctx1).data()
1594 1578 if f not in removed:
1595 1579 tn = getfilectx(f, ctx2).data()
1596 1580 a, b = f, f
1597 1581 if opts.git or losedatafn:
1598 1582 if f in added:
1599 1583 mode = gitmode[ctx2.flags(f)]
1600 1584 if f in copy or f in copyto:
1601 1585 if opts.git:
1602 1586 if f in copy:
1603 1587 a = copy[f]
1604 1588 else:
1605 1589 a = copyto[f]
1606 1590 omode = gitmode[man1.flags(a)]
1607 1591 _addmodehdr(header, omode, mode)
1608 1592 if a in removed and a not in gone:
1609 1593 op = 'rename'
1610 1594 gone.add(a)
1611 1595 else:
1612 1596 op = 'copy'
1613 1597 header.append('%s from %s\n' % (op, join(a)))
1614 1598 header.append('%s to %s\n' % (op, join(f)))
1615 1599 to = getfilectx(a, ctx1).data()
1616 1600 else:
1617 1601 losedatafn(f)
1618 1602 else:
1619 1603 if opts.git:
1620 1604 header.append('new file mode %s\n' % mode)
1621 1605 elif ctx2.flags(f):
1622 1606 losedatafn(f)
1623 1607 # In theory, if tn was copied or renamed we should check
1624 1608 # if the source is binary too but the copy record already
1625 1609 # forces git mode.
1626 1610 if util.binary(tn):
1627 1611 if opts.git:
1628 1612 dodiff = 'binary'
1629 1613 else:
1630 1614 losedatafn(f)
1631 1615 if not opts.git and not tn:
1632 1616 # regular diffs cannot represent new empty file
1633 1617 losedatafn(f)
1634 1618 elif f in removed:
1635 1619 if opts.git:
1636 1620 # have we already reported a copy above?
1637 1621 if ((f in copy and copy[f] in added
1638 1622 and copyto[copy[f]] == f) or
1639 1623 (f in copyto and copyto[f] in added
1640 1624 and copy[copyto[f]] == f)):
1641 1625 dodiff = False
1642 1626 else:
1643 1627 header.append('deleted file mode %s\n' %
1644 1628 gitmode[man1.flags(f)])
1645 1629 elif not to or util.binary(to):
1646 1630 # regular diffs cannot represent empty file deletion
1647 1631 losedatafn(f)
1648 1632 else:
1649 1633 oflag = man1.flags(f)
1650 1634 nflag = ctx2.flags(f)
1651 1635 binary = util.binary(to) or util.binary(tn)
1652 1636 if opts.git:
1653 1637 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1654 1638 if binary:
1655 1639 dodiff = 'binary'
1656 1640 elif binary or nflag != oflag:
1657 1641 losedatafn(f)
1658 1642 if opts.git:
1659 1643 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1660 1644
1661 1645 if dodiff:
1662 1646 if dodiff == 'binary':
1663 1647 text = b85diff(to, tn)
1664 1648 else:
1665 1649 text = mdiff.unidiff(to, date1,
1666 1650 # ctx2 date may be dynamic
1667 1651 tn, util.datestr(ctx2.date()),
1668 1652 join(a), join(b), revs, opts=opts)
1669 1653 if header and (text or len(header) > 1):
1670 1654 yield ''.join(header)
1671 1655 if text:
1672 1656 yield text
1673 1657
1674 1658 def diffstatsum(stats):
1675 1659 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1676 1660 for f, a, r, b in stats:
1677 1661 maxfile = max(maxfile, encoding.colwidth(f))
1678 1662 maxtotal = max(maxtotal, a + r)
1679 1663 addtotal += a
1680 1664 removetotal += r
1681 1665 binary = binary or b
1682 1666
1683 1667 return maxfile, maxtotal, addtotal, removetotal, binary
1684 1668
1685 1669 def diffstatdata(lines):
1686 1670 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1687 1671
1688 1672 results = []
1689 1673 filename, adds, removes = None, 0, 0
1690 1674
1691 1675 def addresult():
1692 1676 if filename:
1693 1677 isbinary = adds == 0 and removes == 0
1694 1678 results.append((filename, adds, removes, isbinary))
1695 1679
1696 1680 for line in lines:
1697 1681 if line.startswith('diff'):
1698 1682 addresult()
1699 1683 # set numbers to 0 anyway when starting new file
1700 1684 adds, removes = 0, 0
1701 1685 if line.startswith('diff --git'):
1702 1686 filename = gitre.search(line).group(1)
1703 1687 elif line.startswith('diff -r'):
1704 1688 # format: "diff -r ... -r ... filename"
1705 1689 filename = diffre.search(line).group(1)
1706 1690 elif line.startswith('+') and not line.startswith('+++'):
1707 1691 adds += 1
1708 1692 elif line.startswith('-') and not line.startswith('---'):
1709 1693 removes += 1
1710 1694 addresult()
1711 1695 return results
1712 1696
1713 1697 def diffstat(lines, width=80, git=False):
1714 1698 output = []
1715 1699 stats = diffstatdata(lines)
1716 1700 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1717 1701
1718 1702 countwidth = len(str(maxtotal))
1719 1703 if hasbinary and countwidth < 3:
1720 1704 countwidth = 3
1721 1705 graphwidth = width - countwidth - maxname - 6
1722 1706 if graphwidth < 10:
1723 1707 graphwidth = 10
1724 1708
1725 1709 def scale(i):
1726 1710 if maxtotal <= graphwidth:
1727 1711 return i
1728 1712 # If diffstat runs out of room it doesn't print anything,
1729 1713 # which isn't very useful, so always print at least one + or -
1730 1714 # if there were at least some changes.
1731 1715 return max(i * graphwidth // maxtotal, int(bool(i)))
1732 1716
1733 1717 for filename, adds, removes, isbinary in stats:
1734 1718 if git and isbinary:
1735 1719 count = 'Bin'
1736 1720 else:
1737 1721 count = adds + removes
1738 1722 pluses = '+' * scale(adds)
1739 1723 minuses = '-' * scale(removes)
1740 1724 output.append(' %s%s | %*s %s%s\n' %
1741 1725 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1742 1726 countwidth, count, pluses, minuses))
1743 1727
1744 1728 if stats:
1745 1729 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1746 1730 % (len(stats), totaladds, totalremoves))
1747 1731
1748 1732 return ''.join(output)
1749 1733
1750 1734 def diffstatui(*args, **kw):
1751 1735 '''like diffstat(), but yields 2-tuples of (output, label) for
1752 1736 ui.write()
1753 1737 '''
1754 1738
1755 1739 for line in diffstat(*args, **kw).splitlines():
1756 1740 if line and line[-1] in '+-':
1757 1741 name, graph = line.rsplit(' ', 1)
1758 1742 yield (name + ' ', '')
1759 1743 m = re.search(r'\++', graph)
1760 1744 if m:
1761 1745 yield (m.group(0), 'diffstat.inserted')
1762 1746 m = re.search(r'-+', graph)
1763 1747 if m:
1764 1748 yield (m.group(0), 'diffstat.deleted')
1765 1749 else:
1766 1750 yield (line, '')
1767 1751 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now