##// END OF EJS Templates
patch: generalize the use of patchmeta in applydiff()...
Patrick Mezard -
r14566:d0c2cc11 default
parent child Browse files
Show More
@@ -1,692 +1,690 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a DSCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56 The more specific you are in your filename patterns the less you
57 57 lose speed in huge repositories.
58 58
59 59 For [keywordmaps] template mapping and expansion demonstration and
60 60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 61 available templates and filters.
62 62
63 63 Three additional date template filters are provided:
64 64
65 65 :``utcdate``: "2006/09/18 15:13:13"
66 66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 68
69 69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 70 replaced with customized keywords and templates. Again, run
71 71 :hg:`kwdemo` to control the results of your configuration changes.
72 72
73 73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 74 to avoid storing expanded keywords in the change history.
75 75
76 76 To force expansion after enabling it, or a configuration change, run
77 77 :hg:`kwexpand`.
78 78
79 79 Expansions spanning more than one line and incremental expansions,
80 80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 81 {desc}" expands to the first line of the changeset description.
82 82 '''
83 83
84 84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 86 from mercurial import scmutil
87 87 from mercurial.hgweb import webcommands
88 88 from mercurial.i18n import _
89 89 import os, re, shutil, tempfile
90 90
91 91 commands.optionalrepo += ' kwdemo'
92 92
93 93 cmdtable = {}
94 94 command = cmdutil.command(cmdtable)
95 95
96 96 # hg commands that do not act on keywords
97 97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 98 ' outgoing push tip verify convert email glog')
99 99
100 100 # hg commands that trigger expansion only when writing to working dir,
101 101 # not when reading filelog, and unexpand when reading from working dir
102 102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
103 103
104 104 # names of extensions using dorecord
105 105 recordextensions = 'record'
106 106
107 107 colortable = {
108 108 'kwfiles.enabled': 'green bold',
109 109 'kwfiles.deleted': 'cyan bold underline',
110 110 'kwfiles.enabledunknown': 'green',
111 111 'kwfiles.ignored': 'bold',
112 112 'kwfiles.ignoredunknown': 'none'
113 113 }
114 114
115 115 # date like in cvs' $Date
116 116 def utcdate(text):
117 117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
118 118 '''
119 119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
120 120 # date like in svn's $Date
121 121 def svnisodate(text):
122 122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
123 123 +0200 (Tue, 18 Aug 2009)".
124 124 '''
125 125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
126 126 # date like in svn's $Id
127 127 def svnutcdate(text):
128 128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
129 129 11:00:13Z".
130 130 '''
131 131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
132 132
133 133 templatefilters.filters.update({'utcdate': utcdate,
134 134 'svnisodate': svnisodate,
135 135 'svnutcdate': svnutcdate})
136 136
137 137 # make keyword tools accessible
138 138 kwtools = {'templater': None, 'hgcmd': ''}
139 139
140 140 def _defaultkwmaps(ui):
141 141 '''Returns default keywordmaps according to keywordset configuration.'''
142 142 templates = {
143 143 'Revision': '{node|short}',
144 144 'Author': '{author|user}',
145 145 }
146 146 kwsets = ({
147 147 'Date': '{date|utcdate}',
148 148 'RCSfile': '{file|basename},v',
149 149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
150 150 # with hg-keyword
151 151 'Source': '{root}/{file},v',
152 152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
153 153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
154 154 }, {
155 155 'Date': '{date|svnisodate}',
156 156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
157 157 'LastChangedRevision': '{node|short}',
158 158 'LastChangedBy': '{author|user}',
159 159 'LastChangedDate': '{date|svnisodate}',
160 160 })
161 161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
162 162 return templates
163 163
164 164 def _shrinktext(text, subfunc):
165 165 '''Helper for keyword expansion removal in text.
166 166 Depending on subfunc also returns number of substitutions.'''
167 167 return subfunc(r'$\1$', text)
168 168
169 169 def _preselect(wstatus, changed):
170 170 '''Retrieves modfied and added files from a working directory state
171 171 and returns the subset of each contained in given changed files
172 172 retrieved from a change context.'''
173 173 modified, added = wstatus[:2]
174 174 modified = [f for f in modified if f in changed]
175 175 added = [f for f in added if f in changed]
176 176 return modified, added
177 177
178 178
179 179 class kwtemplater(object):
180 180 '''
181 181 Sets up keyword templates, corresponding keyword regex, and
182 182 provides keyword substitution functions.
183 183 '''
184 184
185 185 def __init__(self, ui, repo, inc, exc):
186 186 self.ui = ui
187 187 self.repo = repo
188 188 self.match = match.match(repo.root, '', [], inc, exc)
189 189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 190 self.record = False
191 191
192 192 kwmaps = self.ui.configitems('keywordmaps')
193 193 if kwmaps: # override default templates
194 194 self.templates = dict((k, templater.parsestring(v, False))
195 195 for k, v in kwmaps)
196 196 else:
197 197 self.templates = _defaultkwmaps(self.ui)
198 198
199 199 @util.propertycache
200 200 def escape(self):
201 201 '''Returns bar-separated and escaped keywords.'''
202 202 return '|'.join(map(re.escape, self.templates.keys()))
203 203
204 204 @util.propertycache
205 205 def rekw(self):
206 206 '''Returns regex for unexpanded keywords.'''
207 207 return re.compile(r'\$(%s)\$' % self.escape)
208 208
209 209 @util.propertycache
210 210 def rekwexp(self):
211 211 '''Returns regex for expanded keywords.'''
212 212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213 213
214 214 def substitute(self, data, path, ctx, subfunc):
215 215 '''Replaces keywords in data with expanded template.'''
216 216 def kwsub(mobj):
217 217 kw = mobj.group(1)
218 218 ct = cmdutil.changeset_templater(self.ui, self.repo,
219 219 False, None, '', False)
220 220 ct.use_template(self.templates[kw])
221 221 self.ui.pushbuffer()
222 222 ct.show(ctx, root=self.repo.root, file=path)
223 223 ekw = templatefilters.firstline(self.ui.popbuffer())
224 224 return '$%s: %s $' % (kw, ekw)
225 225 return subfunc(kwsub, data)
226 226
227 227 def linkctx(self, path, fileid):
228 228 '''Similar to filelog.linkrev, but returns a changectx.'''
229 229 return self.repo.filectx(path, fileid=fileid).changectx()
230 230
231 231 def expand(self, path, node, data):
232 232 '''Returns data with keywords expanded.'''
233 233 if not self.restrict and self.match(path) and not util.binary(data):
234 234 ctx = self.linkctx(path, node)
235 235 return self.substitute(data, path, ctx, self.rekw.sub)
236 236 return data
237 237
238 238 def iskwfile(self, cand, ctx):
239 239 '''Returns subset of candidates which are configured for keyword
240 240 expansion are not symbolic links.'''
241 241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
242 242
243 243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
244 244 '''Overwrites selected files expanding/shrinking keywords.'''
245 245 if self.restrict or lookup or self.record: # exclude kw_copy
246 246 candidates = self.iskwfile(candidates, ctx)
247 247 if not candidates:
248 248 return
249 249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
250 250 if self.restrict or expand and lookup:
251 251 mf = ctx.manifest()
252 252 lctx = ctx
253 253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
254 254 msg = (expand and _('overwriting %s expanding keywords\n')
255 255 or _('overwriting %s shrinking keywords\n'))
256 256 for f in candidates:
257 257 if self.restrict:
258 258 data = self.repo.file(f).read(mf[f])
259 259 else:
260 260 data = self.repo.wread(f)
261 261 if util.binary(data):
262 262 continue
263 263 if expand:
264 264 if lookup:
265 265 lctx = self.linkctx(f, mf[f])
266 266 data, found = self.substitute(data, f, lctx, re_kw.subn)
267 267 elif self.restrict:
268 268 found = re_kw.search(data)
269 269 else:
270 270 data, found = _shrinktext(data, re_kw.subn)
271 271 if found:
272 272 self.ui.note(msg % f)
273 273 self.repo.wwrite(f, data, ctx.flags(f))
274 274 if kwcmd:
275 275 self.repo.dirstate.normal(f)
276 276 elif self.record:
277 277 self.repo.dirstate.normallookup(f)
278 278
279 279 def shrink(self, fname, text):
280 280 '''Returns text with all keyword substitutions removed.'''
281 281 if self.match(fname) and not util.binary(text):
282 282 return _shrinktext(text, self.rekwexp.sub)
283 283 return text
284 284
285 285 def shrinklines(self, fname, lines):
286 286 '''Returns lines with keyword substitutions removed.'''
287 287 if self.match(fname):
288 288 text = ''.join(lines)
289 289 if not util.binary(text):
290 290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
291 291 return lines
292 292
293 293 def wread(self, fname, data):
294 294 '''If in restricted mode returns data read from wdir with
295 295 keyword substitutions removed.'''
296 296 return self.restrict and self.shrink(fname, data) or data
297 297
298 298 class kwfilelog(filelog.filelog):
299 299 '''
300 300 Subclass of filelog to hook into its read, add, cmp methods.
301 301 Keywords are "stored" unexpanded, and processed on reading.
302 302 '''
303 303 def __init__(self, opener, kwt, path):
304 304 super(kwfilelog, self).__init__(opener, path)
305 305 self.kwt = kwt
306 306 self.path = path
307 307
308 308 def read(self, node):
309 309 '''Expands keywords when reading filelog.'''
310 310 data = super(kwfilelog, self).read(node)
311 311 if self.renamed(node):
312 312 return data
313 313 return self.kwt.expand(self.path, node, data)
314 314
315 315 def add(self, text, meta, tr, link, p1=None, p2=None):
316 316 '''Removes keyword substitutions when adding to filelog.'''
317 317 text = self.kwt.shrink(self.path, text)
318 318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
319 319
320 320 def cmp(self, node, text):
321 321 '''Removes keyword substitutions for comparison.'''
322 322 text = self.kwt.shrink(self.path, text)
323 323 return super(kwfilelog, self).cmp(node, text)
324 324
325 325 def _status(ui, repo, kwt, *pats, **opts):
326 326 '''Bails out if [keyword] configuration is not active.
327 327 Returns status of working directory.'''
328 328 if kwt:
329 329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
330 330 unknown=opts.get('unknown') or opts.get('all'))
331 331 if ui.configitems('keyword'):
332 332 raise util.Abort(_('[keyword] patterns cannot match'))
333 333 raise util.Abort(_('no [keyword] patterns configured'))
334 334
335 335 def _kwfwrite(ui, repo, expand, *pats, **opts):
336 336 '''Selects files and passes them to kwtemplater.overwrite.'''
337 337 wctx = repo[None]
338 338 if len(wctx.parents()) > 1:
339 339 raise util.Abort(_('outstanding uncommitted merge'))
340 340 kwt = kwtools['templater']
341 341 wlock = repo.wlock()
342 342 try:
343 343 status = _status(ui, repo, kwt, *pats, **opts)
344 344 modified, added, removed, deleted, unknown, ignored, clean = status
345 345 if modified or added or removed or deleted:
346 346 raise util.Abort(_('outstanding uncommitted changes'))
347 347 kwt.overwrite(wctx, clean, True, expand)
348 348 finally:
349 349 wlock.release()
350 350
351 351 @command('kwdemo',
352 352 [('d', 'default', None, _('show default keyword template maps')),
353 353 ('f', 'rcfile', '',
354 354 _('read maps from rcfile'), _('FILE'))],
355 355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
356 356 def demo(ui, repo, *args, **opts):
357 357 '''print [keywordmaps] configuration and an expansion example
358 358
359 359 Show current, custom, or default keyword template maps and their
360 360 expansions.
361 361
362 362 Extend the current configuration by specifying maps as arguments
363 363 and using -f/--rcfile to source an external hgrc file.
364 364
365 365 Use -d/--default to disable current configuration.
366 366
367 367 See :hg:`help templates` for information on templates and filters.
368 368 '''
369 369 def demoitems(section, items):
370 370 ui.write('[%s]\n' % section)
371 371 for k, v in sorted(items):
372 372 ui.write('%s = %s\n' % (k, v))
373 373
374 374 fn = 'demo.txt'
375 375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
376 376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
377 377 repo = localrepo.localrepository(ui, tmpdir, True)
378 378 ui.setconfig('keyword', fn, '')
379 379 svn = ui.configbool('keywordset', 'svn')
380 380 # explicitly set keywordset for demo output
381 381 ui.setconfig('keywordset', 'svn', svn)
382 382
383 383 uikwmaps = ui.configitems('keywordmaps')
384 384 if args or opts.get('rcfile'):
385 385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
386 386 if uikwmaps:
387 387 ui.status(_('\textending current template maps\n'))
388 388 if opts.get('default') or not uikwmaps:
389 389 if svn:
390 390 ui.status(_('\toverriding default svn keywordset\n'))
391 391 else:
392 392 ui.status(_('\toverriding default cvs keywordset\n'))
393 393 if opts.get('rcfile'):
394 394 ui.readconfig(opts.get('rcfile'))
395 395 if args:
396 396 # simulate hgrc parsing
397 397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
398 398 fp = repo.opener('hgrc', 'w')
399 399 fp.writelines(rcmaps)
400 400 fp.close()
401 401 ui.readconfig(repo.join('hgrc'))
402 402 kwmaps = dict(ui.configitems('keywordmaps'))
403 403 elif opts.get('default'):
404 404 if svn:
405 405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
406 406 else:
407 407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
408 408 kwmaps = _defaultkwmaps(ui)
409 409 if uikwmaps:
410 410 ui.status(_('\tdisabling current template maps\n'))
411 411 for k, v in kwmaps.iteritems():
412 412 ui.setconfig('keywordmaps', k, v)
413 413 else:
414 414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
415 415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
416 416
417 417 uisetup(ui)
418 418 reposetup(ui, repo)
419 419 ui.write('[extensions]\nkeyword =\n')
420 420 demoitems('keyword', ui.configitems('keyword'))
421 421 demoitems('keywordset', ui.configitems('keywordset'))
422 422 demoitems('keywordmaps', kwmaps.iteritems())
423 423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
424 424 repo.wopener.write(fn, keywords)
425 425 repo[None].add([fn])
426 426 ui.note(_('\nkeywords written to %s:\n') % fn)
427 427 ui.note(keywords)
428 428 repo.dirstate.setbranch('demobranch')
429 429 for name, cmd in ui.configitems('hooks'):
430 430 if name.split('.', 1)[0].find('commit') > -1:
431 431 repo.ui.setconfig('hooks', name, '')
432 432 msg = _('hg keyword configuration and expansion example')
433 433 ui.note("hg ci -m '%s'\n" % msg)
434 434 repo.commit(text=msg)
435 435 ui.status(_('\n\tkeywords expanded\n'))
436 436 ui.write(repo.wread(fn))
437 437 shutil.rmtree(tmpdir, ignore_errors=True)
438 438
439 439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
440 440 def expand(ui, repo, *pats, **opts):
441 441 '''expand keywords in the working directory
442 442
443 443 Run after (re)enabling keyword expansion.
444 444
445 445 kwexpand refuses to run if given files contain local changes.
446 446 '''
447 447 # 3rd argument sets expansion to True
448 448 _kwfwrite(ui, repo, True, *pats, **opts)
449 449
450 450 @command('kwfiles',
451 451 [('A', 'all', None, _('show keyword status flags of all files')),
452 452 ('i', 'ignore', None, _('show files excluded from expansion')),
453 453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
454 454 ] + commands.walkopts,
455 455 _('hg kwfiles [OPTION]... [FILE]...'))
456 456 def files(ui, repo, *pats, **opts):
457 457 '''show files configured for keyword expansion
458 458
459 459 List which files in the working directory are matched by the
460 460 [keyword] configuration patterns.
461 461
462 462 Useful to prevent inadvertent keyword expansion and to speed up
463 463 execution by including only files that are actual candidates for
464 464 expansion.
465 465
466 466 See :hg:`help keyword` on how to construct patterns both for
467 467 inclusion and exclusion of files.
468 468
469 469 With -A/--all and -v/--verbose the codes used to show the status
470 470 of files are::
471 471
472 472 K = keyword expansion candidate
473 473 k = keyword expansion candidate (not tracked)
474 474 I = ignored
475 475 i = ignored (not tracked)
476 476 '''
477 477 kwt = kwtools['templater']
478 478 status = _status(ui, repo, kwt, *pats, **opts)
479 479 cwd = pats and repo.getcwd() or ''
480 480 modified, added, removed, deleted, unknown, ignored, clean = status
481 481 files = []
482 482 if not opts.get('unknown') or opts.get('all'):
483 483 files = sorted(modified + added + clean)
484 484 wctx = repo[None]
485 485 kwfiles = kwt.iskwfile(files, wctx)
486 486 kwdeleted = kwt.iskwfile(deleted, wctx)
487 487 kwunknown = kwt.iskwfile(unknown, wctx)
488 488 if not opts.get('ignore') or opts.get('all'):
489 489 showfiles = kwfiles, kwdeleted, kwunknown
490 490 else:
491 491 showfiles = [], [], []
492 492 if opts.get('all') or opts.get('ignore'):
493 493 showfiles += ([f for f in files if f not in kwfiles],
494 494 [f for f in unknown if f not in kwunknown])
495 495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
496 496 kwstates = zip('K!kIi', showfiles, kwlabels)
497 497 for char, filenames, kwstate in kwstates:
498 498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
499 499 for f in filenames:
500 500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
501 501
502 502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
503 503 def shrink(ui, repo, *pats, **opts):
504 504 '''revert expanded keywords in the working directory
505 505
506 506 Must be run before changing/disabling active keywords.
507 507
508 508 kwshrink refuses to run if given files contain local changes.
509 509 '''
510 510 # 3rd argument sets expansion to False
511 511 _kwfwrite(ui, repo, False, *pats, **opts)
512 512
513 513
514 514 def uisetup(ui):
515 515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
516 516
517 517 def kwdispatch_parse(orig, ui, args):
518 518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
519 519 cmd, func, args, options, cmdoptions = orig(ui, args)
520 520 kwtools['hgcmd'] = cmd
521 521 return cmd, func, args, options, cmdoptions
522 522
523 523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
524 524
525 525 def reposetup(ui, repo):
526 526 '''Sets up repo as kwrepo for keyword substitution.
527 527 Overrides file method to return kwfilelog instead of filelog
528 528 if file matches user configuration.
529 529 Wraps commit to overwrite configured files with updated
530 530 keyword substitutions.
531 531 Monkeypatches patch and webcommands.'''
532 532
533 533 try:
534 534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
535 535 or '.hg' in util.splitpath(repo.root)
536 536 or repo._url.startswith('bundle:')):
537 537 return
538 538 except AttributeError:
539 539 pass
540 540
541 541 inc, exc = [], ['.hg*']
542 542 for pat, opt in ui.configitems('keyword'):
543 543 if opt != 'ignore':
544 544 inc.append(pat)
545 545 else:
546 546 exc.append(pat)
547 547 if not inc:
548 548 return
549 549
550 550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
551 551
552 552 class kwrepo(repo.__class__):
553 553 def file(self, f):
554 554 if f[0] == '/':
555 555 f = f[1:]
556 556 return kwfilelog(self.sopener, kwt, f)
557 557
558 558 def wread(self, filename):
559 559 data = super(kwrepo, self).wread(filename)
560 560 return kwt.wread(filename, data)
561 561
562 562 def commit(self, *args, **opts):
563 563 # use custom commitctx for user commands
564 564 # other extensions can still wrap repo.commitctx directly
565 565 self.commitctx = self.kwcommitctx
566 566 try:
567 567 return super(kwrepo, self).commit(*args, **opts)
568 568 finally:
569 569 del self.commitctx
570 570
571 571 def kwcommitctx(self, ctx, error=False):
572 572 n = super(kwrepo, self).commitctx(ctx, error)
573 573 # no lock needed, only called from repo.commit() which already locks
574 574 if not kwt.record:
575 575 restrict = kwt.restrict
576 576 kwt.restrict = True
577 577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
578 578 False, True)
579 579 kwt.restrict = restrict
580 580 return n
581 581
582 582 def rollback(self, dryrun=False):
583 583 wlock = self.wlock()
584 584 try:
585 585 if not dryrun:
586 586 changed = self['.'].files()
587 587 ret = super(kwrepo, self).rollback(dryrun)
588 588 if not dryrun:
589 589 ctx = self['.']
590 590 modified, added = _preselect(self[None].status(), changed)
591 591 kwt.overwrite(ctx, modified, True, True)
592 592 kwt.overwrite(ctx, added, True, False)
593 593 return ret
594 594 finally:
595 595 wlock.release()
596 596
597 597 # monkeypatches
598 def kwpatchfile_init(orig, self, ui, fname, backend, store, mode, create,
599 remove, eolmode=None, copysource=None):
598 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
600 599 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 600 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, fname, backend, store, mode, create, remove,
603 eolmode, copysource)
601 orig(self, ui, gp, backend, store, eolmode)
604 602 # shrink keywords read from working dir
605 603 self.lines = kwt.shrinklines(self.fname, self.lines)
606 604
607 605 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
608 606 opts=None, prefix=''):
609 607 '''Monkeypatch patch.diff to avoid expansion.'''
610 608 kwt.restrict = True
611 609 return orig(repo, node1, node2, match, changes, opts, prefix)
612 610
613 611 def kwweb_skip(orig, web, req, tmpl):
614 612 '''Wraps webcommands.x turning off keyword expansion.'''
615 613 kwt.match = util.never
616 614 return orig(web, req, tmpl)
617 615
618 616 def kw_copy(orig, ui, repo, pats, opts, rename=False):
619 617 '''Wraps cmdutil.copy so that copy/rename destinations do not
620 618 contain expanded keywords.
621 619 Note that the source of a regular file destination may also be a
622 620 symlink:
623 621 hg cp sym x -> x is symlink
624 622 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
625 623 For the latter we have to follow the symlink to find out whether its
626 624 target is configured for expansion and we therefore must unexpand the
627 625 keywords in the destination.'''
628 626 orig(ui, repo, pats, opts, rename)
629 627 if opts.get('dry_run'):
630 628 return
631 629 wctx = repo[None]
632 630 cwd = repo.getcwd()
633 631
634 632 def haskwsource(dest):
635 633 '''Returns true if dest is a regular file and configured for
636 634 expansion or a symlink which points to a file configured for
637 635 expansion. '''
638 636 source = repo.dirstate.copied(dest)
639 637 if 'l' in wctx.flags(source):
640 638 source = scmutil.canonpath(repo.root, cwd,
641 639 os.path.realpath(source))
642 640 return kwt.match(source)
643 641
644 642 candidates = [f for f in repo.dirstate.copies() if
645 643 not 'l' in wctx.flags(f) and haskwsource(f)]
646 644 kwt.overwrite(wctx, candidates, False, False)
647 645
648 646 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
649 647 '''Wraps record.dorecord expanding keywords after recording.'''
650 648 wlock = repo.wlock()
651 649 try:
652 650 # record returns 0 even when nothing has changed
653 651 # therefore compare nodes before and after
654 652 kwt.record = True
655 653 ctx = repo['.']
656 654 wstatus = repo[None].status()
657 655 ret = orig(ui, repo, commitfunc, *pats, **opts)
658 656 recctx = repo['.']
659 657 if ctx != recctx:
660 658 modified, added = _preselect(wstatus, recctx.files())
661 659 kwt.restrict = False
662 660 kwt.overwrite(recctx, modified, False, True)
663 661 kwt.overwrite(recctx, added, False, True, True)
664 662 kwt.restrict = True
665 663 return ret
666 664 finally:
667 665 wlock.release()
668 666
669 667 def kwfilectx_cmp(orig, self, fctx):
670 668 # keyword affects data size, comparing wdir and filelog size does
671 669 # not make sense
672 670 if (fctx._filerev is None and
673 671 (self._repo._encodefilterpats or
674 672 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
675 673 self.size() == fctx.size()):
676 674 return self._filelog.cmp(self._filenode, fctx.data())
677 675 return True
678 676
679 677 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
680 678 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
681 679 extensions.wrapfunction(patch, 'diff', kw_diff)
682 680 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
683 681 for c in 'annotate changeset rev filediff diff'.split():
684 682 extensions.wrapfunction(webcommands, c, kwweb_skip)
685 683 for name in recordextensions.split():
686 684 try:
687 685 record = extensions.find(name)
688 686 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
689 687 except KeyError:
690 688 pass
691 689
692 690 repo.__class__ = kwrepo
@@ -1,1781 +1,1783 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re
10 10 import tempfile, zlib, shutil
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15 15
16 16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 17
18 18 class PatchError(Exception):
19 19 pass
20 20
21 21
22 22 # public functions
23 23
24 24 def split(stream):
25 25 '''return an iterator of individual patches from a stream'''
26 26 def isheader(line, inheader):
27 27 if inheader and line[0] in (' ', '\t'):
28 28 # continuation
29 29 return True
30 30 if line[0] in (' ', '-', '+'):
31 31 # diff line - don't check for header pattern in there
32 32 return False
33 33 l = line.split(': ', 1)
34 34 return len(l) == 2 and ' ' not in l[0]
35 35
36 36 def chunk(lines):
37 37 return cStringIO.StringIO(''.join(lines))
38 38
39 39 def hgsplit(stream, cur):
40 40 inheader = True
41 41
42 42 for line in stream:
43 43 if not line.strip():
44 44 inheader = False
45 45 if not inheader and line.startswith('# HG changeset patch'):
46 46 yield chunk(cur)
47 47 cur = []
48 48 inheader = True
49 49
50 50 cur.append(line)
51 51
52 52 if cur:
53 53 yield chunk(cur)
54 54
55 55 def mboxsplit(stream, cur):
56 56 for line in stream:
57 57 if line.startswith('From '):
58 58 for c in split(chunk(cur[1:])):
59 59 yield c
60 60 cur = []
61 61
62 62 cur.append(line)
63 63
64 64 if cur:
65 65 for c in split(chunk(cur[1:])):
66 66 yield c
67 67
68 68 def mimesplit(stream, cur):
69 69 def msgfp(m):
70 70 fp = cStringIO.StringIO()
71 71 g = email.Generator.Generator(fp, mangle_from_=False)
72 72 g.flatten(m)
73 73 fp.seek(0)
74 74 return fp
75 75
76 76 for line in stream:
77 77 cur.append(line)
78 78 c = chunk(cur)
79 79
80 80 m = email.Parser.Parser().parse(c)
81 81 if not m.is_multipart():
82 82 yield msgfp(m)
83 83 else:
84 84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 85 for part in m.walk():
86 86 ct = part.get_content_type()
87 87 if ct not in ok_types:
88 88 continue
89 89 yield msgfp(part)
90 90
91 91 def headersplit(stream, cur):
92 92 inheader = False
93 93
94 94 for line in stream:
95 95 if not inheader and isheader(line, inheader):
96 96 yield chunk(cur)
97 97 cur = []
98 98 inheader = True
99 99 if inheader and not isheader(line, inheader):
100 100 inheader = False
101 101
102 102 cur.append(line)
103 103
104 104 if cur:
105 105 yield chunk(cur)
106 106
107 107 def remainder(cur):
108 108 yield chunk(cur)
109 109
110 110 class fiter(object):
111 111 def __init__(self, fp):
112 112 self.fp = fp
113 113
114 114 def __iter__(self):
115 115 return self
116 116
117 117 def next(self):
118 118 l = self.fp.readline()
119 119 if not l:
120 120 raise StopIteration
121 121 return l
122 122
123 123 inheader = False
124 124 cur = []
125 125
126 126 mimeheaders = ['content-type']
127 127
128 128 if not hasattr(stream, 'next'):
129 129 # http responses, for example, have readline but not next
130 130 stream = fiter(stream)
131 131
132 132 for line in stream:
133 133 cur.append(line)
134 134 if line.startswith('# HG changeset patch'):
135 135 return hgsplit(stream, cur)
136 136 elif line.startswith('From '):
137 137 return mboxsplit(stream, cur)
138 138 elif isheader(line, inheader):
139 139 inheader = True
140 140 if line.split(':', 1)[0].lower() in mimeheaders:
141 141 # let email parser handle this
142 142 return mimesplit(stream, cur)
143 143 elif line.startswith('--- ') and inheader:
144 144 # No evil headers seen by diff start, split by hand
145 145 return headersplit(stream, cur)
146 146 # Not enough info, keep reading
147 147
148 148 # if we are here, we have a very plain patch
149 149 return remainder(cur)
150 150
151 151 def extract(ui, fileobj):
152 152 '''extract patch from data read from fileobj.
153 153
154 154 patch can be a normal patch or contained in an email message.
155 155
156 156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 157 Any item in the returned tuple can be None. If filename is None,
158 158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 159
160 160 # attempt to detect the start of a patch
161 161 # (this heuristic is borrowed from quilt)
162 162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 164 r'---[ \t].*?^\+\+\+[ \t]|'
165 165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 166
167 167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 168 tmpfp = os.fdopen(fd, 'w')
169 169 try:
170 170 msg = email.Parser.Parser().parse(fileobj)
171 171
172 172 subject = msg['Subject']
173 173 user = msg['From']
174 174 if not subject and not user:
175 175 # Not an email, restore parsed headers if any
176 176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 177
178 178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 179 # should try to parse msg['Date']
180 180 date = None
181 181 nodeid = None
182 182 branch = None
183 183 parents = []
184 184
185 185 if subject:
186 186 if subject.startswith('[PATCH'):
187 187 pend = subject.find(']')
188 188 if pend >= 0:
189 189 subject = subject[pend + 1:].lstrip()
190 190 subject = subject.replace('\n\t', ' ')
191 191 ui.debug('Subject: %s\n' % subject)
192 192 if user:
193 193 ui.debug('From: %s\n' % user)
194 194 diffs_seen = 0
195 195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 196 message = ''
197 197 for part in msg.walk():
198 198 content_type = part.get_content_type()
199 199 ui.debug('Content-Type: %s\n' % content_type)
200 200 if content_type not in ok_types:
201 201 continue
202 202 payload = part.get_payload(decode=True)
203 203 m = diffre.search(payload)
204 204 if m:
205 205 hgpatch = False
206 206 hgpatchheader = False
207 207 ignoretext = False
208 208
209 209 ui.debug('found patch at byte %d\n' % m.start(0))
210 210 diffs_seen += 1
211 211 cfp = cStringIO.StringIO()
212 212 for line in payload[:m.start(0)].splitlines():
213 213 if line.startswith('# HG changeset patch') and not hgpatch:
214 214 ui.debug('patch generated by hg export\n')
215 215 hgpatch = True
216 216 hgpatchheader = True
217 217 # drop earlier commit message content
218 218 cfp.seek(0)
219 219 cfp.truncate()
220 220 subject = None
221 221 elif hgpatchheader:
222 222 if line.startswith('# User '):
223 223 user = line[7:]
224 224 ui.debug('From: %s\n' % user)
225 225 elif line.startswith("# Date "):
226 226 date = line[7:]
227 227 elif line.startswith("# Branch "):
228 228 branch = line[9:]
229 229 elif line.startswith("# Node ID "):
230 230 nodeid = line[10:]
231 231 elif line.startswith("# Parent "):
232 232 parents.append(line[10:])
233 233 elif not line.startswith("# "):
234 234 hgpatchheader = False
235 235 elif line == '---' and gitsendmail:
236 236 ignoretext = True
237 237 if not hgpatchheader and not ignoretext:
238 238 cfp.write(line)
239 239 cfp.write('\n')
240 240 message = cfp.getvalue()
241 241 if tmpfp:
242 242 tmpfp.write(payload)
243 243 if not payload.endswith('\n'):
244 244 tmpfp.write('\n')
245 245 elif not diffs_seen and message and content_type == 'text/plain':
246 246 message += '\n' + payload
247 247 except:
248 248 tmpfp.close()
249 249 os.unlink(tmpname)
250 250 raise
251 251
252 252 if subject and not message.startswith(subject):
253 253 message = '%s\n%s' % (subject, message)
254 254 tmpfp.close()
255 255 if not diffs_seen:
256 256 os.unlink(tmpname)
257 257 return None, message, user, date, branch, None, None, None
258 258 p1 = parents and parents.pop(0) or None
259 259 p2 = parents and parents.pop(0) or None
260 260 return tmpname, message, user, date, branch, nodeid, p1, p2
261 261
262 262 class patchmeta(object):
263 263 """Patched file metadata
264 264
265 265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 269 'islink' is True if the file is a symlink and 'isexec' is True if
270 270 the file is executable. Otherwise, 'mode' is None.
271 271 """
272 272 def __init__(self, path):
273 273 self.path = path
274 274 self.oldpath = None
275 275 self.mode = None
276 276 self.op = 'MODIFY'
277 277 self.binary = False
278 278
279 279 def setmode(self, mode):
280 280 islink = mode & 020000
281 281 isexec = mode & 0100
282 282 self.mode = (islink, isexec)
283 283
284 def copy(self):
285 other = patchmeta(self.path)
286 other.oldpath = self.oldpath
287 other.mode = self.mode
288 other.op = self.op
289 other.binary = self.binary
290 return other
291
284 292 def __repr__(self):
285 293 return "<patchmeta %s %r>" % (self.op, self.path)
286 294
287 295 def readgitpatch(lr):
288 296 """extract git-style metadata about patches from <patchname>"""
289 297
290 298 # Filter patch for git information
291 299 gp = None
292 300 gitpatches = []
293 301 for line in lr:
294 302 line = line.rstrip(' \r\n')
295 303 if line.startswith('diff --git'):
296 304 m = gitre.match(line)
297 305 if m:
298 306 if gp:
299 307 gitpatches.append(gp)
300 308 dst = m.group(2)
301 309 gp = patchmeta(dst)
302 310 elif gp:
303 311 if line.startswith('--- '):
304 312 gitpatches.append(gp)
305 313 gp = None
306 314 continue
307 315 if line.startswith('rename from '):
308 316 gp.op = 'RENAME'
309 317 gp.oldpath = line[12:]
310 318 elif line.startswith('rename to '):
311 319 gp.path = line[10:]
312 320 elif line.startswith('copy from '):
313 321 gp.op = 'COPY'
314 322 gp.oldpath = line[10:]
315 323 elif line.startswith('copy to '):
316 324 gp.path = line[8:]
317 325 elif line.startswith('deleted file'):
318 326 gp.op = 'DELETE'
319 327 elif line.startswith('new file mode '):
320 328 gp.op = 'ADD'
321 329 gp.setmode(int(line[-6:], 8))
322 330 elif line.startswith('new mode '):
323 331 gp.setmode(int(line[-6:], 8))
324 332 elif line.startswith('GIT binary patch'):
325 333 gp.binary = True
326 334 if gp:
327 335 gitpatches.append(gp)
328 336
329 337 return gitpatches
330 338
331 339 class linereader(object):
332 340 # simple class to allow pushing lines back into the input stream
333 341 def __init__(self, fp):
334 342 self.fp = fp
335 343 self.buf = []
336 344
337 345 def push(self, line):
338 346 if line is not None:
339 347 self.buf.append(line)
340 348
341 349 def readline(self):
342 350 if self.buf:
343 351 l = self.buf[0]
344 352 del self.buf[0]
345 353 return l
346 354 return self.fp.readline()
347 355
348 356 def __iter__(self):
349 357 while True:
350 358 l = self.readline()
351 359 if not l:
352 360 break
353 361 yield l
354 362
355 363 class abstractbackend(object):
356 364 def __init__(self, ui):
357 365 self.ui = ui
358 366
359 367 def getfile(self, fname):
360 368 """Return target file data and flags as a (data, (islink,
361 369 isexec)) tuple.
362 370 """
363 371 raise NotImplementedError
364 372
365 373 def setfile(self, fname, data, mode, copysource):
366 374 """Write data to target file fname and set its mode. mode is a
367 375 (islink, isexec) tuple. If data is None, the file content should
368 376 be left unchanged. If the file is modified after being copied,
369 377 copysource is set to the original file name.
370 378 """
371 379 raise NotImplementedError
372 380
373 381 def unlink(self, fname):
374 382 """Unlink target file."""
375 383 raise NotImplementedError
376 384
377 385 def writerej(self, fname, failed, total, lines):
378 386 """Write rejected lines for fname. total is the number of hunks
379 387 which failed to apply and total the total number of hunks for this
380 388 files.
381 389 """
382 390 pass
383 391
384 392 def exists(self, fname):
385 393 raise NotImplementedError
386 394
387 395 class fsbackend(abstractbackend):
388 396 def __init__(self, ui, basedir):
389 397 super(fsbackend, self).__init__(ui)
390 398 self.opener = scmutil.opener(basedir)
391 399
392 400 def _join(self, f):
393 401 return os.path.join(self.opener.base, f)
394 402
395 403 def getfile(self, fname):
396 404 path = self._join(fname)
397 405 if os.path.islink(path):
398 406 return (os.readlink(path), (True, False))
399 407 isexec = False
400 408 try:
401 409 isexec = os.lstat(path).st_mode & 0100 != 0
402 410 except OSError, e:
403 411 if e.errno != errno.ENOENT:
404 412 raise
405 413 return (self.opener.read(fname), (False, isexec))
406 414
407 415 def setfile(self, fname, data, mode, copysource):
408 416 islink, isexec = mode
409 417 if data is None:
410 418 util.setflags(self._join(fname), islink, isexec)
411 419 return
412 420 if islink:
413 421 self.opener.symlink(data, fname)
414 422 else:
415 423 self.opener.write(fname, data)
416 424 if isexec:
417 425 util.setflags(self._join(fname), False, True)
418 426
419 427 def unlink(self, fname):
420 428 try:
421 429 util.unlinkpath(self._join(fname))
422 430 except OSError, inst:
423 431 if inst.errno != errno.ENOENT:
424 432 raise
425 433
426 434 def writerej(self, fname, failed, total, lines):
427 435 fname = fname + ".rej"
428 436 self.ui.warn(
429 437 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
430 438 (failed, total, fname))
431 439 fp = self.opener(fname, 'w')
432 440 fp.writelines(lines)
433 441 fp.close()
434 442
435 443 def exists(self, fname):
436 444 return os.path.lexists(self._join(fname))
437 445
438 446 class workingbackend(fsbackend):
439 447 def __init__(self, ui, repo, similarity):
440 448 super(workingbackend, self).__init__(ui, repo.root)
441 449 self.repo = repo
442 450 self.similarity = similarity
443 451 self.removed = set()
444 452 self.changed = set()
445 453 self.copied = []
446 454
447 455 def _checkknown(self, fname):
448 456 if self.repo.dirstate[fname] == '?' and self.exists(fname):
449 457 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
450 458
451 459 def setfile(self, fname, data, mode, copysource):
452 460 self._checkknown(fname)
453 461 super(workingbackend, self).setfile(fname, data, mode, copysource)
454 462 if copysource is not None:
455 463 self.copied.append((copysource, fname))
456 464 self.changed.add(fname)
457 465
458 466 def unlink(self, fname):
459 467 self._checkknown(fname)
460 468 super(workingbackend, self).unlink(fname)
461 469 self.removed.add(fname)
462 470 self.changed.add(fname)
463 471
464 472 def close(self):
465 473 wctx = self.repo[None]
466 474 addremoved = set(self.changed)
467 475 for src, dst in self.copied:
468 476 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
469 477 addremoved.discard(src)
470 478 if (not self.similarity) and self.removed:
471 479 wctx.forget(sorted(self.removed))
472 480 if addremoved:
473 481 cwd = self.repo.getcwd()
474 482 if cwd:
475 483 addremoved = [util.pathto(self.repo.root, cwd, f)
476 484 for f in addremoved]
477 485 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
478 486 return sorted(self.changed)
479 487
480 488 class filestore(object):
481 489 def __init__(self):
482 490 self.opener = None
483 491 self.files = {}
484 492 self.created = 0
485 493
486 494 def setfile(self, fname, data, mode):
487 495 if self.opener is None:
488 496 root = tempfile.mkdtemp(prefix='hg-patch-')
489 497 self.opener = scmutil.opener(root)
490 498 # Avoid filename issues with these simple names
491 499 fn = str(self.created)
492 500 self.opener.write(fn, data)
493 501 self.created += 1
494 502 self.files[fname] = (fn, mode)
495 503
496 504 def getfile(self, fname):
497 505 if fname not in self.files:
498 506 raise IOError()
499 507 fn, mode = self.files[fname]
500 508 return self.opener.read(fn), mode
501 509
502 510 def close(self):
503 511 if self.opener:
504 512 shutil.rmtree(self.opener.base)
505 513
506 514 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
507 515 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
508 516 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
509 517 eolmodes = ['strict', 'crlf', 'lf', 'auto']
510 518
511 519 class patchfile(object):
512 def __init__(self, ui, fname, backend, store, mode, create, remove,
513 eolmode='strict', copysource=None):
514 self.fname = fname
520 def __init__(self, ui, gp, backend, store, eolmode='strict'):
521 self.fname = gp.path
515 522 self.eolmode = eolmode
516 523 self.eol = None
517 524 self.backend = backend
518 525 self.ui = ui
519 526 self.lines = []
520 527 self.exists = False
521 528 self.missing = True
522 self.mode = mode
523 self.copysource = copysource
524 self.create = create
525 self.remove = remove
529 self.mode = gp.mode
530 self.copysource = gp.oldpath
531 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
532 self.remove = gp.op == 'DELETE'
526 533 try:
527 if copysource is None:
528 data, mode = backend.getfile(fname)
534 if self.copysource is None:
535 data, mode = backend.getfile(self.fname)
529 536 self.exists = True
530 537 else:
531 data, mode = store.getfile(copysource)
532 self.exists = backend.exists(fname)
538 data, mode = store.getfile(self.copysource)
539 self.exists = backend.exists(self.fname)
533 540 self.missing = False
534 541 if data:
535 542 self.lines = data.splitlines(True)
536 543 if self.mode is None:
537 544 self.mode = mode
538 545 if self.lines:
539 546 # Normalize line endings
540 547 if self.lines[0].endswith('\r\n'):
541 548 self.eol = '\r\n'
542 549 elif self.lines[0].endswith('\n'):
543 550 self.eol = '\n'
544 551 if eolmode != 'strict':
545 552 nlines = []
546 553 for l in self.lines:
547 554 if l.endswith('\r\n'):
548 555 l = l[:-2] + '\n'
549 556 nlines.append(l)
550 557 self.lines = nlines
551 558 except IOError:
552 if create:
559 if self.create:
553 560 self.missing = False
554 561 if self.mode is None:
555 562 self.mode = (False, False)
556 563 if self.missing:
557 564 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
558 565
559 566 self.hash = {}
560 567 self.dirty = 0
561 568 self.offset = 0
562 569 self.skew = 0
563 570 self.rej = []
564 571 self.fileprinted = False
565 572 self.printfile(False)
566 573 self.hunks = 0
567 574
568 575 def writelines(self, fname, lines, mode):
569 576 if self.eolmode == 'auto':
570 577 eol = self.eol
571 578 elif self.eolmode == 'crlf':
572 579 eol = '\r\n'
573 580 else:
574 581 eol = '\n'
575 582
576 583 if self.eolmode != 'strict' and eol and eol != '\n':
577 584 rawlines = []
578 585 for l in lines:
579 586 if l and l[-1] == '\n':
580 587 l = l[:-1] + eol
581 588 rawlines.append(l)
582 589 lines = rawlines
583 590
584 591 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
585 592
586 593 def printfile(self, warn):
587 594 if self.fileprinted:
588 595 return
589 596 if warn or self.ui.verbose:
590 597 self.fileprinted = True
591 598 s = _("patching file %s\n") % self.fname
592 599 if warn:
593 600 self.ui.warn(s)
594 601 else:
595 602 self.ui.note(s)
596 603
597 604
598 605 def findlines(self, l, linenum):
599 606 # looks through the hash and finds candidate lines. The
600 607 # result is a list of line numbers sorted based on distance
601 608 # from linenum
602 609
603 610 cand = self.hash.get(l, [])
604 611 if len(cand) > 1:
605 612 # resort our list of potentials forward then back.
606 613 cand.sort(key=lambda x: abs(x - linenum))
607 614 return cand
608 615
609 616 def write_rej(self):
610 617 # our rejects are a little different from patch(1). This always
611 618 # creates rejects in the same form as the original patch. A file
612 619 # header is inserted so that you can run the reject through patch again
613 620 # without having to type the filename.
614 621 if not self.rej:
615 622 return
616 623 base = os.path.basename(self.fname)
617 624 lines = ["--- %s\n+++ %s\n" % (base, base)]
618 625 for x in self.rej:
619 626 for l in x.hunk:
620 627 lines.append(l)
621 628 if l[-1] != '\n':
622 629 lines.append("\n\ No newline at end of file\n")
623 630 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
624 631
625 632 def apply(self, h):
626 633 if not h.complete():
627 634 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
628 635 (h.number, h.desc, len(h.a), h.lena, len(h.b),
629 636 h.lenb))
630 637
631 638 self.hunks += 1
632 639
633 640 if self.missing:
634 641 self.rej.append(h)
635 642 return -1
636 643
637 644 if self.exists and self.create:
638 645 if self.copysource:
639 646 self.ui.warn(_("cannot create %s: destination already "
640 647 "exists\n" % self.fname))
641 648 else:
642 649 self.ui.warn(_("file %s already exists\n") % self.fname)
643 650 self.rej.append(h)
644 651 return -1
645 652
646 653 if isinstance(h, binhunk):
647 654 if self.remove:
648 655 self.backend.unlink(self.fname)
649 656 else:
650 657 self.lines[:] = h.new()
651 658 self.offset += len(h.new())
652 659 self.dirty = True
653 660 return 0
654 661
655 662 horig = h
656 663 if (self.eolmode in ('crlf', 'lf')
657 664 or self.eolmode == 'auto' and self.eol):
658 665 # If new eols are going to be normalized, then normalize
659 666 # hunk data before patching. Otherwise, preserve input
660 667 # line-endings.
661 668 h = h.getnormalized()
662 669
663 670 # fast case first, no offsets, no fuzz
664 671 old = h.old()
665 672 # patch starts counting at 1 unless we are adding the file
666 673 if h.starta == 0:
667 674 start = 0
668 675 else:
669 676 start = h.starta + self.offset - 1
670 677 orig_start = start
671 678 # if there's skew we want to emit the "(offset %d lines)" even
672 679 # when the hunk cleanly applies at start + skew, so skip the
673 680 # fast case code
674 681 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
675 682 if self.remove:
676 683 self.backend.unlink(self.fname)
677 684 else:
678 685 self.lines[start : start + h.lena] = h.new()
679 686 self.offset += h.lenb - h.lena
680 687 self.dirty = True
681 688 return 0
682 689
683 690 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
684 691 self.hash = {}
685 692 for x, s in enumerate(self.lines):
686 693 self.hash.setdefault(s, []).append(x)
687 694 if h.hunk[-1][0] != ' ':
688 695 # if the hunk tried to put something at the bottom of the file
689 696 # override the start line and use eof here
690 697 search_start = len(self.lines)
691 698 else:
692 699 search_start = orig_start + self.skew
693 700
694 701 for fuzzlen in xrange(3):
695 702 for toponly in [True, False]:
696 703 old = h.old(fuzzlen, toponly)
697 704
698 705 cand = self.findlines(old[0][1:], search_start)
699 706 for l in cand:
700 707 if diffhelpers.testhunk(old, self.lines, l) == 0:
701 708 newlines = h.new(fuzzlen, toponly)
702 709 self.lines[l : l + len(old)] = newlines
703 710 self.offset += len(newlines) - len(old)
704 711 self.skew = l - orig_start
705 712 self.dirty = True
706 713 offset = l - orig_start - fuzzlen
707 714 if fuzzlen:
708 715 msg = _("Hunk #%d succeeded at %d "
709 716 "with fuzz %d "
710 717 "(offset %d lines).\n")
711 718 self.printfile(True)
712 719 self.ui.warn(msg %
713 720 (h.number, l + 1, fuzzlen, offset))
714 721 else:
715 722 msg = _("Hunk #%d succeeded at %d "
716 723 "(offset %d lines).\n")
717 724 self.ui.note(msg % (h.number, l + 1, offset))
718 725 return fuzzlen
719 726 self.printfile(True)
720 727 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
721 728 self.rej.append(horig)
722 729 return -1
723 730
724 731 def close(self):
725 732 if self.dirty:
726 733 self.writelines(self.fname, self.lines, self.mode)
727 734 self.write_rej()
728 735 return len(self.rej)
729 736
730 737 class hunk(object):
731 738 def __init__(self, desc, num, lr, context):
732 739 self.number = num
733 740 self.desc = desc
734 741 self.hunk = [desc]
735 742 self.a = []
736 743 self.b = []
737 744 self.starta = self.lena = None
738 745 self.startb = self.lenb = None
739 746 if lr is not None:
740 747 if context:
741 748 self.read_context_hunk(lr)
742 749 else:
743 750 self.read_unified_hunk(lr)
744 751
745 752 def getnormalized(self):
746 753 """Return a copy with line endings normalized to LF."""
747 754
748 755 def normalize(lines):
749 756 nlines = []
750 757 for line in lines:
751 758 if line.endswith('\r\n'):
752 759 line = line[:-2] + '\n'
753 760 nlines.append(line)
754 761 return nlines
755 762
756 763 # Dummy object, it is rebuilt manually
757 764 nh = hunk(self.desc, self.number, None, None)
758 765 nh.number = self.number
759 766 nh.desc = self.desc
760 767 nh.hunk = self.hunk
761 768 nh.a = normalize(self.a)
762 769 nh.b = normalize(self.b)
763 770 nh.starta = self.starta
764 771 nh.startb = self.startb
765 772 nh.lena = self.lena
766 773 nh.lenb = self.lenb
767 774 return nh
768 775
769 776 def read_unified_hunk(self, lr):
770 777 m = unidesc.match(self.desc)
771 778 if not m:
772 779 raise PatchError(_("bad hunk #%d") % self.number)
773 780 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
774 781 if self.lena is None:
775 782 self.lena = 1
776 783 else:
777 784 self.lena = int(self.lena)
778 785 if self.lenb is None:
779 786 self.lenb = 1
780 787 else:
781 788 self.lenb = int(self.lenb)
782 789 self.starta = int(self.starta)
783 790 self.startb = int(self.startb)
784 791 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
785 792 # if we hit eof before finishing out the hunk, the last line will
786 793 # be zero length. Lets try to fix it up.
787 794 while len(self.hunk[-1]) == 0:
788 795 del self.hunk[-1]
789 796 del self.a[-1]
790 797 del self.b[-1]
791 798 self.lena -= 1
792 799 self.lenb -= 1
793 800 self._fixnewline(lr)
794 801
795 802 def read_context_hunk(self, lr):
796 803 self.desc = lr.readline()
797 804 m = contextdesc.match(self.desc)
798 805 if not m:
799 806 raise PatchError(_("bad hunk #%d") % self.number)
800 807 foo, self.starta, foo2, aend, foo3 = m.groups()
801 808 self.starta = int(self.starta)
802 809 if aend is None:
803 810 aend = self.starta
804 811 self.lena = int(aend) - self.starta
805 812 if self.starta:
806 813 self.lena += 1
807 814 for x in xrange(self.lena):
808 815 l = lr.readline()
809 816 if l.startswith('---'):
810 817 # lines addition, old block is empty
811 818 lr.push(l)
812 819 break
813 820 s = l[2:]
814 821 if l.startswith('- ') or l.startswith('! '):
815 822 u = '-' + s
816 823 elif l.startswith(' '):
817 824 u = ' ' + s
818 825 else:
819 826 raise PatchError(_("bad hunk #%d old text line %d") %
820 827 (self.number, x))
821 828 self.a.append(u)
822 829 self.hunk.append(u)
823 830
824 831 l = lr.readline()
825 832 if l.startswith('\ '):
826 833 s = self.a[-1][:-1]
827 834 self.a[-1] = s
828 835 self.hunk[-1] = s
829 836 l = lr.readline()
830 837 m = contextdesc.match(l)
831 838 if not m:
832 839 raise PatchError(_("bad hunk #%d") % self.number)
833 840 foo, self.startb, foo2, bend, foo3 = m.groups()
834 841 self.startb = int(self.startb)
835 842 if bend is None:
836 843 bend = self.startb
837 844 self.lenb = int(bend) - self.startb
838 845 if self.startb:
839 846 self.lenb += 1
840 847 hunki = 1
841 848 for x in xrange(self.lenb):
842 849 l = lr.readline()
843 850 if l.startswith('\ '):
844 851 # XXX: the only way to hit this is with an invalid line range.
845 852 # The no-eol marker is not counted in the line range, but I
846 853 # guess there are diff(1) out there which behave differently.
847 854 s = self.b[-1][:-1]
848 855 self.b[-1] = s
849 856 self.hunk[hunki - 1] = s
850 857 continue
851 858 if not l:
852 859 # line deletions, new block is empty and we hit EOF
853 860 lr.push(l)
854 861 break
855 862 s = l[2:]
856 863 if l.startswith('+ ') or l.startswith('! '):
857 864 u = '+' + s
858 865 elif l.startswith(' '):
859 866 u = ' ' + s
860 867 elif len(self.b) == 0:
861 868 # line deletions, new block is empty
862 869 lr.push(l)
863 870 break
864 871 else:
865 872 raise PatchError(_("bad hunk #%d old text line %d") %
866 873 (self.number, x))
867 874 self.b.append(s)
868 875 while True:
869 876 if hunki >= len(self.hunk):
870 877 h = ""
871 878 else:
872 879 h = self.hunk[hunki]
873 880 hunki += 1
874 881 if h == u:
875 882 break
876 883 elif h.startswith('-'):
877 884 continue
878 885 else:
879 886 self.hunk.insert(hunki - 1, u)
880 887 break
881 888
882 889 if not self.a:
883 890 # this happens when lines were only added to the hunk
884 891 for x in self.hunk:
885 892 if x.startswith('-') or x.startswith(' '):
886 893 self.a.append(x)
887 894 if not self.b:
888 895 # this happens when lines were only deleted from the hunk
889 896 for x in self.hunk:
890 897 if x.startswith('+') or x.startswith(' '):
891 898 self.b.append(x[1:])
892 899 # @@ -start,len +start,len @@
893 900 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
894 901 self.startb, self.lenb)
895 902 self.hunk[0] = self.desc
896 903 self._fixnewline(lr)
897 904
898 905 def _fixnewline(self, lr):
899 906 l = lr.readline()
900 907 if l.startswith('\ '):
901 908 diffhelpers.fix_newline(self.hunk, self.a, self.b)
902 909 else:
903 910 lr.push(l)
904 911
905 912 def complete(self):
906 913 return len(self.a) == self.lena and len(self.b) == self.lenb
907 914
908 915 def fuzzit(self, l, fuzz, toponly):
909 916 # this removes context lines from the top and bottom of list 'l'. It
910 917 # checks the hunk to make sure only context lines are removed, and then
911 918 # returns a new shortened list of lines.
912 919 fuzz = min(fuzz, len(l)-1)
913 920 if fuzz:
914 921 top = 0
915 922 bot = 0
916 923 hlen = len(self.hunk)
917 924 for x in xrange(hlen - 1):
918 925 # the hunk starts with the @@ line, so use x+1
919 926 if self.hunk[x + 1][0] == ' ':
920 927 top += 1
921 928 else:
922 929 break
923 930 if not toponly:
924 931 for x in xrange(hlen - 1):
925 932 if self.hunk[hlen - bot - 1][0] == ' ':
926 933 bot += 1
927 934 else:
928 935 break
929 936
930 937 # top and bot now count context in the hunk
931 938 # adjust them if either one is short
932 939 context = max(top, bot, 3)
933 940 if bot < context:
934 941 bot = max(0, fuzz - (context - bot))
935 942 else:
936 943 bot = min(fuzz, bot)
937 944 if top < context:
938 945 top = max(0, fuzz - (context - top))
939 946 else:
940 947 top = min(fuzz, top)
941 948
942 949 return l[top:len(l)-bot]
943 950 return l
944 951
945 952 def old(self, fuzz=0, toponly=False):
946 953 return self.fuzzit(self.a, fuzz, toponly)
947 954
948 955 def new(self, fuzz=0, toponly=False):
949 956 return self.fuzzit(self.b, fuzz, toponly)
950 957
951 958 class binhunk:
952 959 'A binary patch file. Only understands literals so far.'
953 960 def __init__(self, lr):
954 961 self.text = None
955 962 self.hunk = ['GIT binary patch\n']
956 963 self._read(lr)
957 964
958 965 def complete(self):
959 966 return self.text is not None
960 967
961 968 def new(self):
962 969 return [self.text]
963 970
964 971 def _read(self, lr):
965 972 line = lr.readline()
966 973 self.hunk.append(line)
967 974 while line and not line.startswith('literal '):
968 975 line = lr.readline()
969 976 self.hunk.append(line)
970 977 if not line:
971 978 raise PatchError(_('could not extract binary patch'))
972 979 size = int(line[8:].rstrip())
973 980 dec = []
974 981 line = lr.readline()
975 982 self.hunk.append(line)
976 983 while len(line) > 1:
977 984 l = line[0]
978 985 if l <= 'Z' and l >= 'A':
979 986 l = ord(l) - ord('A') + 1
980 987 else:
981 988 l = ord(l) - ord('a') + 27
982 989 dec.append(base85.b85decode(line[1:-1])[:l])
983 990 line = lr.readline()
984 991 self.hunk.append(line)
985 992 text = zlib.decompress(''.join(dec))
986 993 if len(text) != size:
987 994 raise PatchError(_('binary patch is %d bytes, not %d') %
988 995 len(text), size)
989 996 self.text = text
990 997
991 998 def parsefilename(str):
992 999 # --- filename \t|space stuff
993 1000 s = str[4:].rstrip('\r\n')
994 1001 i = s.find('\t')
995 1002 if i < 0:
996 1003 i = s.find(' ')
997 1004 if i < 0:
998 1005 return s
999 1006 return s[:i]
1000 1007
1001 1008 def pathstrip(path, strip):
1002 1009 pathlen = len(path)
1003 1010 i = 0
1004 1011 if strip == 0:
1005 1012 return '', path.rstrip()
1006 1013 count = strip
1007 1014 while count > 0:
1008 1015 i = path.find('/', i)
1009 1016 if i == -1:
1010 1017 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1011 1018 (count, strip, path))
1012 1019 i += 1
1013 1020 # consume '//' in the path
1014 1021 while i < pathlen - 1 and path[i] == '/':
1015 1022 i += 1
1016 1023 count -= 1
1017 1024 return path[:i].lstrip(), path[i:].rstrip()
1018 1025
1019 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1020 if gp:
1021 # Git patches do not play games. Excluding copies from the
1022 # following heuristic avoids a lot of confusion
1023 fname = pathstrip(gp.path, strip - 1)[1]
1024 create = gp.op in ('ADD', 'COPY', 'RENAME')
1025 remove = gp.op == 'DELETE'
1026 return fname, create, remove
1026 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1027 1027 nulla = afile_orig == "/dev/null"
1028 1028 nullb = bfile_orig == "/dev/null"
1029 1029 create = nulla and hunk.starta == 0 and hunk.lena == 0
1030 1030 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1031 1031 abase, afile = pathstrip(afile_orig, strip)
1032 1032 gooda = not nulla and backend.exists(afile)
1033 1033 bbase, bfile = pathstrip(bfile_orig, strip)
1034 1034 if afile == bfile:
1035 1035 goodb = gooda
1036 1036 else:
1037 1037 goodb = not nullb and backend.exists(bfile)
1038 1038 missing = not goodb and not gooda and not create
1039 1039
1040 1040 # some diff programs apparently produce patches where the afile is
1041 1041 # not /dev/null, but afile starts with bfile
1042 1042 abasedir = afile[:afile.rfind('/') + 1]
1043 1043 bbasedir = bfile[:bfile.rfind('/') + 1]
1044 1044 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1045 1045 and hunk.starta == 0 and hunk.lena == 0):
1046 1046 create = True
1047 1047 missing = False
1048 1048
1049 1049 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1050 1050 # diff is between a file and its backup. In this case, the original
1051 1051 # file should be patched (see original mpatch code).
1052 1052 isbackup = (abase == bbase and bfile.startswith(afile))
1053 1053 fname = None
1054 1054 if not missing:
1055 1055 if gooda and goodb:
1056 1056 fname = isbackup and afile or bfile
1057 1057 elif gooda:
1058 1058 fname = afile
1059 1059
1060 1060 if not fname:
1061 1061 if not nullb:
1062 1062 fname = isbackup and afile or bfile
1063 1063 elif not nulla:
1064 1064 fname = afile
1065 1065 else:
1066 1066 raise PatchError(_("undefined source and destination files"))
1067 1067
1068 return fname, create, remove
1068 gp = patchmeta(fname)
1069 if create:
1070 gp.op = 'ADD'
1071 elif remove:
1072 gp.op = 'DELETE'
1073 return gp
1069 1074
1070 1075 def scangitpatch(lr, firstline):
1071 1076 """
1072 1077 Git patches can emit:
1073 1078 - rename a to b
1074 1079 - change b
1075 1080 - copy a to c
1076 1081 - change c
1077 1082
1078 1083 We cannot apply this sequence as-is, the renamed 'a' could not be
1079 1084 found for it would have been renamed already. And we cannot copy
1080 1085 from 'b' instead because 'b' would have been changed already. So
1081 1086 we scan the git patch for copy and rename commands so we can
1082 1087 perform the copies ahead of time.
1083 1088 """
1084 1089 pos = 0
1085 1090 try:
1086 1091 pos = lr.fp.tell()
1087 1092 fp = lr.fp
1088 1093 except IOError:
1089 1094 fp = cStringIO.StringIO(lr.fp.read())
1090 1095 gitlr = linereader(fp)
1091 1096 gitlr.push(firstline)
1092 1097 gitpatches = readgitpatch(gitlr)
1093 1098 fp.seek(pos)
1094 1099 return gitpatches
1095 1100
1096 1101 def iterhunks(fp):
1097 1102 """Read a patch and yield the following events:
1098 1103 - ("file", afile, bfile, firsthunk): select a new target file.
1099 1104 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1100 1105 "file" event.
1101 1106 - ("git", gitchanges): current diff is in git format, gitchanges
1102 1107 maps filenames to gitpatch records. Unique event.
1103 1108 """
1104 1109 afile = ""
1105 1110 bfile = ""
1106 1111 state = None
1107 1112 hunknum = 0
1108 1113 emitfile = newfile = False
1109 1114 gitpatches = None
1110 1115
1111 1116 # our states
1112 1117 BFILE = 1
1113 1118 context = None
1114 1119 lr = linereader(fp)
1115 1120
1116 1121 while True:
1117 1122 x = lr.readline()
1118 1123 if not x:
1119 1124 break
1120 1125 if state == BFILE and (
1121 1126 (not context and x[0] == '@')
1122 1127 or (context is not False and x.startswith('***************'))
1123 1128 or x.startswith('GIT binary patch')):
1124 1129 gp = None
1125 1130 if (gitpatches and
1126 1131 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1127 1132 gp = gitpatches.pop()[2]
1128 1133 if x.startswith('GIT binary patch'):
1129 1134 h = binhunk(lr)
1130 1135 else:
1131 1136 if context is None and x.startswith('***************'):
1132 1137 context = True
1133 1138 h = hunk(x, hunknum + 1, lr, context)
1134 1139 hunknum += 1
1135 1140 if emitfile:
1136 1141 emitfile = False
1137 yield 'file', (afile, bfile, h, gp)
1142 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1138 1143 yield 'hunk', h
1139 1144 elif x.startswith('diff --git'):
1140 1145 m = gitre.match(x)
1141 1146 if not m:
1142 1147 continue
1143 1148 if gitpatches is None:
1144 1149 # scan whole input for git metadata
1145 1150 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1146 1151 in scangitpatch(lr, x)]
1147 yield 'git', [g[2] for g in gitpatches
1152 yield 'git', [g[2].copy() for g in gitpatches
1148 1153 if g[2].op in ('COPY', 'RENAME')]
1149 1154 gitpatches.reverse()
1150 1155 afile = 'a/' + m.group(1)
1151 1156 bfile = 'b/' + m.group(2)
1152 1157 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1153 1158 gp = gitpatches.pop()[2]
1154 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1159 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1155 1160 gp = gitpatches[-1][2]
1156 1161 # copy/rename + modify should modify target, not source
1157 1162 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1158 1163 afile = bfile
1159 1164 newfile = True
1160 1165 elif x.startswith('---'):
1161 1166 # check for a unified diff
1162 1167 l2 = lr.readline()
1163 1168 if not l2.startswith('+++'):
1164 1169 lr.push(l2)
1165 1170 continue
1166 1171 newfile = True
1167 1172 context = False
1168 1173 afile = parsefilename(x)
1169 1174 bfile = parsefilename(l2)
1170 1175 elif x.startswith('***'):
1171 1176 # check for a context diff
1172 1177 l2 = lr.readline()
1173 1178 if not l2.startswith('---'):
1174 1179 lr.push(l2)
1175 1180 continue
1176 1181 l3 = lr.readline()
1177 1182 lr.push(l3)
1178 1183 if not l3.startswith("***************"):
1179 1184 lr.push(l2)
1180 1185 continue
1181 1186 newfile = True
1182 1187 context = True
1183 1188 afile = parsefilename(x)
1184 1189 bfile = parsefilename(l2)
1185 1190
1186 1191 if newfile:
1187 1192 newfile = False
1188 1193 emitfile = True
1189 1194 state = BFILE
1190 1195 hunknum = 0
1191 1196
1192 1197 while gitpatches:
1193 1198 gp = gitpatches.pop()[2]
1194 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1199 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1195 1200
1196 1201 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1197 1202 """Reads a patch from fp and tries to apply it.
1198 1203
1199 1204 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1200 1205 there was any fuzz.
1201 1206
1202 1207 If 'eolmode' is 'strict', the patch content and patched file are
1203 1208 read in binary mode. Otherwise, line endings are ignored when
1204 1209 patching then normalized according to 'eolmode'.
1205 1210 """
1206 1211 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1207 1212 eolmode=eolmode)
1208 1213
1209 1214 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1210 1215 eolmode='strict'):
1211 1216
1212 1217 def pstrip(p):
1213 1218 return pathstrip(p, strip - 1)[1]
1214 1219
1215 1220 rejects = 0
1216 1221 err = 0
1217 1222 current_file = None
1218 1223
1219 1224 for state, values in iterhunks(fp):
1220 1225 if state == 'hunk':
1221 1226 if not current_file:
1222 1227 continue
1223 1228 ret = current_file.apply(values)
1224 1229 if ret > 0:
1225 1230 err = 1
1226 1231 elif state == 'file':
1227 1232 if current_file:
1228 1233 rejects += current_file.close()
1229 1234 current_file = None
1230 1235 afile, bfile, first_hunk, gp = values
1231 copysource = None
1232 1236 if gp:
1233 1237 path = pstrip(gp.path)
1238 gp.path = pstrip(gp.path)
1234 1239 if gp.oldpath:
1235 copysource = pstrip(gp.oldpath)
1236 if gp.op == 'RENAME':
1237 backend.unlink(copysource)
1238 if not first_hunk:
1239 if gp.op == 'DELETE':
1240 backend.unlink(path)
1241 continue
1242 data, mode = None, None
1243 if gp.op in ('RENAME', 'COPY'):
1244 data, mode = store.getfile(copysource)
1245 if gp.mode:
1246 mode = gp.mode
1247 if gp.op == 'ADD':
1248 # Added files without content have no hunk and
1249 # must be created
1250 data = ''
1251 if data or mode:
1252 if (gp.op in ('ADD', 'RENAME', 'COPY')
1253 and backend.exists(path)):
1254 raise PatchError(_("cannot create %s: destination "
1255 "already exists") % path)
1256 backend.setfile(path, data, mode, copysource)
1240 gp.oldpath = pstrip(gp.oldpath)
1241 else:
1242 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1243 if gp.op == 'RENAME':
1244 backend.unlink(gp.oldpath)
1257 1245 if not first_hunk:
1246 if gp.op == 'DELETE':
1247 backend.unlink(gp.path)
1248 continue
1249 data, mode = None, None
1250 if gp.op in ('RENAME', 'COPY'):
1251 data, mode = store.getfile(gp.oldpath)
1252 if gp.mode:
1253 mode = gp.mode
1254 if gp.op == 'ADD':
1255 # Added files without content have no hunk and
1256 # must be created
1257 data = ''
1258 if data or mode:
1259 if (gp.op in ('ADD', 'RENAME', 'COPY')
1260 and backend.exists(gp.path)):
1261 raise PatchError(_("cannot create %s: destination "
1262 "already exists") % gp.path)
1263 backend.setfile(gp.path, data, mode, gp.oldpath)
1258 1264 continue
1259 1265 try:
1260 mode = gp and gp.mode or None
1261 current_file, create, remove = selectfile(
1262 backend, afile, bfile, first_hunk, strip, gp)
1263 current_file = patcher(ui, current_file, backend, store, mode,
1264 create, remove, eolmode=eolmode,
1265 copysource=copysource)
1266 current_file = patcher(ui, gp, backend, store,
1267 eolmode=eolmode)
1266 1268 except PatchError, inst:
1267 1269 ui.warn(str(inst) + '\n')
1268 1270 current_file = None
1269 1271 rejects += 1
1270 1272 continue
1271 1273 elif state == 'git':
1272 1274 for gp in values:
1273 1275 path = pstrip(gp.oldpath)
1274 1276 data, mode = backend.getfile(path)
1275 1277 store.setfile(path, data, mode)
1276 1278 else:
1277 1279 raise util.Abort(_('unsupported parser state: %s') % state)
1278 1280
1279 1281 if current_file:
1280 1282 rejects += current_file.close()
1281 1283
1282 1284 if rejects:
1283 1285 return -1
1284 1286 return err
1285 1287
1286 1288 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1287 1289 similarity):
1288 1290 """use <patcher> to apply <patchname> to the working directory.
1289 1291 returns whether patch was applied with fuzz factor."""
1290 1292
1291 1293 fuzz = False
1292 1294 args = []
1293 1295 cwd = repo.root
1294 1296 if cwd:
1295 1297 args.append('-d %s' % util.shellquote(cwd))
1296 1298 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1297 1299 util.shellquote(patchname)))
1298 1300 try:
1299 1301 for line in fp:
1300 1302 line = line.rstrip()
1301 1303 ui.note(line + '\n')
1302 1304 if line.startswith('patching file '):
1303 1305 pf = util.parsepatchoutput(line)
1304 1306 printed_file = False
1305 1307 files.add(pf)
1306 1308 elif line.find('with fuzz') >= 0:
1307 1309 fuzz = True
1308 1310 if not printed_file:
1309 1311 ui.warn(pf + '\n')
1310 1312 printed_file = True
1311 1313 ui.warn(line + '\n')
1312 1314 elif line.find('saving rejects to file') >= 0:
1313 1315 ui.warn(line + '\n')
1314 1316 elif line.find('FAILED') >= 0:
1315 1317 if not printed_file:
1316 1318 ui.warn(pf + '\n')
1317 1319 printed_file = True
1318 1320 ui.warn(line + '\n')
1319 1321 finally:
1320 1322 if files:
1321 1323 cfiles = list(files)
1322 1324 cwd = repo.getcwd()
1323 1325 if cwd:
1324 1326 cfiles = [util.pathto(repo.root, cwd, f)
1325 1327 for f in cfile]
1326 1328 scmutil.addremove(repo, cfiles, similarity=similarity)
1327 1329 code = fp.close()
1328 1330 if code:
1329 1331 raise PatchError(_("patch command failed: %s") %
1330 1332 util.explainexit(code)[0])
1331 1333 return fuzz
1332 1334
1333 1335 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1334 1336 similarity=0):
1335 1337 """use builtin patch to apply <patchobj> to the working directory.
1336 1338 returns whether patch was applied with fuzz factor."""
1337 1339
1338 1340 if files is None:
1339 1341 files = set()
1340 1342 if eolmode is None:
1341 1343 eolmode = ui.config('patch', 'eol', 'strict')
1342 1344 if eolmode.lower() not in eolmodes:
1343 1345 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1344 1346 eolmode = eolmode.lower()
1345 1347
1346 1348 store = filestore()
1347 1349 backend = workingbackend(ui, repo, similarity)
1348 1350 try:
1349 1351 fp = open(patchobj, 'rb')
1350 1352 except TypeError:
1351 1353 fp = patchobj
1352 1354 try:
1353 1355 ret = applydiff(ui, fp, backend, store, strip=strip,
1354 1356 eolmode=eolmode)
1355 1357 finally:
1356 1358 if fp != patchobj:
1357 1359 fp.close()
1358 1360 files.update(backend.close())
1359 1361 store.close()
1360 1362 if ret < 0:
1361 1363 raise PatchError(_('patch failed to apply'))
1362 1364 return ret > 0
1363 1365
1364 1366 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1365 1367 similarity=0):
1366 1368 """Apply <patchname> to the working directory.
1367 1369
1368 1370 'eolmode' specifies how end of lines should be handled. It can be:
1369 1371 - 'strict': inputs are read in binary mode, EOLs are preserved
1370 1372 - 'crlf': EOLs are ignored when patching and reset to CRLF
1371 1373 - 'lf': EOLs are ignored when patching and reset to LF
1372 1374 - None: get it from user settings, default to 'strict'
1373 1375 'eolmode' is ignored when using an external patcher program.
1374 1376
1375 1377 Returns whether patch was applied with fuzz factor.
1376 1378 """
1377 1379 patcher = ui.config('ui', 'patch')
1378 1380 if files is None:
1379 1381 files = set()
1380 1382 try:
1381 1383 if patcher:
1382 1384 return _externalpatch(ui, repo, patcher, patchname, strip,
1383 1385 files, similarity)
1384 1386 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1385 1387 similarity)
1386 1388 except PatchError, err:
1387 1389 raise util.Abort(str(err))
1388 1390
1389 1391 def changedfiles(ui, repo, patchpath, strip=1):
1390 1392 backend = fsbackend(ui, repo.root)
1391 1393 fp = open(patchpath, 'rb')
1392 1394 try:
1393 1395 changed = set()
1394 1396 for state, values in iterhunks(fp):
1395 1397 if state == 'file':
1396 1398 afile, bfile, first_hunk, gp = values
1397 1399 if gp:
1398 changed.add(pathstrip(gp.path, strip - 1)[1])
1399 if gp.op == 'RENAME':
1400 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1401 if not first_hunk:
1402 continue
1403 current_file, create, remove = selectfile(
1404 backend, afile, bfile, first_hunk, strip, gp)
1405 changed.add(current_file)
1400 gp.path = pathstrip(gp.path, strip - 1)[1]
1401 if gp.oldpath:
1402 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1403 else:
1404 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1405 changed.add(gp.path)
1406 if gp.op == 'RENAME':
1407 changed.add(gp.oldpath)
1406 1408 elif state not in ('hunk', 'git'):
1407 1409 raise util.Abort(_('unsupported parser state: %s') % state)
1408 1410 return changed
1409 1411 finally:
1410 1412 fp.close()
1411 1413
1412 1414 def b85diff(to, tn):
1413 1415 '''print base85-encoded binary diff'''
1414 1416 def gitindex(text):
1415 1417 if not text:
1416 1418 return hex(nullid)
1417 1419 l = len(text)
1418 1420 s = util.sha1('blob %d\0' % l)
1419 1421 s.update(text)
1420 1422 return s.hexdigest()
1421 1423
1422 1424 def fmtline(line):
1423 1425 l = len(line)
1424 1426 if l <= 26:
1425 1427 l = chr(ord('A') + l - 1)
1426 1428 else:
1427 1429 l = chr(l - 26 + ord('a') - 1)
1428 1430 return '%c%s\n' % (l, base85.b85encode(line, True))
1429 1431
1430 1432 def chunk(text, csize=52):
1431 1433 l = len(text)
1432 1434 i = 0
1433 1435 while i < l:
1434 1436 yield text[i:i + csize]
1435 1437 i += csize
1436 1438
1437 1439 tohash = gitindex(to)
1438 1440 tnhash = gitindex(tn)
1439 1441 if tohash == tnhash:
1440 1442 return ""
1441 1443
1442 1444 # TODO: deltas
1443 1445 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1444 1446 (tohash, tnhash, len(tn))]
1445 1447 for l in chunk(zlib.compress(tn)):
1446 1448 ret.append(fmtline(l))
1447 1449 ret.append('\n')
1448 1450 return ''.join(ret)
1449 1451
1450 1452 class GitDiffRequired(Exception):
1451 1453 pass
1452 1454
1453 1455 def diffopts(ui, opts=None, untrusted=False):
1454 1456 def get(key, name=None, getter=ui.configbool):
1455 1457 return ((opts and opts.get(key)) or
1456 1458 getter('diff', name or key, None, untrusted=untrusted))
1457 1459 return mdiff.diffopts(
1458 1460 text=opts and opts.get('text'),
1459 1461 git=get('git'),
1460 1462 nodates=get('nodates'),
1461 1463 showfunc=get('show_function', 'showfunc'),
1462 1464 ignorews=get('ignore_all_space', 'ignorews'),
1463 1465 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1464 1466 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1465 1467 context=get('unified', getter=ui.config))
1466 1468
1467 1469 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1468 1470 losedatafn=None, prefix=''):
1469 1471 '''yields diff of changes to files between two nodes, or node and
1470 1472 working directory.
1471 1473
1472 1474 if node1 is None, use first dirstate parent instead.
1473 1475 if node2 is None, compare node1 with working directory.
1474 1476
1475 1477 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1476 1478 every time some change cannot be represented with the current
1477 1479 patch format. Return False to upgrade to git patch format, True to
1478 1480 accept the loss or raise an exception to abort the diff. It is
1479 1481 called with the name of current file being diffed as 'fn'. If set
1480 1482 to None, patches will always be upgraded to git format when
1481 1483 necessary.
1482 1484
1483 1485 prefix is a filename prefix that is prepended to all filenames on
1484 1486 display (used for subrepos).
1485 1487 '''
1486 1488
1487 1489 if opts is None:
1488 1490 opts = mdiff.defaultopts
1489 1491
1490 1492 if not node1 and not node2:
1491 1493 node1 = repo.dirstate.p1()
1492 1494
1493 1495 def lrugetfilectx():
1494 1496 cache = {}
1495 1497 order = []
1496 1498 def getfilectx(f, ctx):
1497 1499 fctx = ctx.filectx(f, filelog=cache.get(f))
1498 1500 if f not in cache:
1499 1501 if len(cache) > 20:
1500 1502 del cache[order.pop(0)]
1501 1503 cache[f] = fctx.filelog()
1502 1504 else:
1503 1505 order.remove(f)
1504 1506 order.append(f)
1505 1507 return fctx
1506 1508 return getfilectx
1507 1509 getfilectx = lrugetfilectx()
1508 1510
1509 1511 ctx1 = repo[node1]
1510 1512 ctx2 = repo[node2]
1511 1513
1512 1514 if not changes:
1513 1515 changes = repo.status(ctx1, ctx2, match=match)
1514 1516 modified, added, removed = changes[:3]
1515 1517
1516 1518 if not modified and not added and not removed:
1517 1519 return []
1518 1520
1519 1521 revs = None
1520 1522 if not repo.ui.quiet:
1521 1523 hexfunc = repo.ui.debugflag and hex or short
1522 1524 revs = [hexfunc(node) for node in [node1, node2] if node]
1523 1525
1524 1526 copy = {}
1525 1527 if opts.git or opts.upgrade:
1526 1528 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1527 1529
1528 1530 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1529 1531 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1530 1532 if opts.upgrade and not opts.git:
1531 1533 try:
1532 1534 def losedata(fn):
1533 1535 if not losedatafn or not losedatafn(fn=fn):
1534 1536 raise GitDiffRequired()
1535 1537 # Buffer the whole output until we are sure it can be generated
1536 1538 return list(difffn(opts.copy(git=False), losedata))
1537 1539 except GitDiffRequired:
1538 1540 return difffn(opts.copy(git=True), None)
1539 1541 else:
1540 1542 return difffn(opts, None)
1541 1543
1542 1544 def difflabel(func, *args, **kw):
1543 1545 '''yields 2-tuples of (output, label) based on the output of func()'''
1544 1546 prefixes = [('diff', 'diff.diffline'),
1545 1547 ('copy', 'diff.extended'),
1546 1548 ('rename', 'diff.extended'),
1547 1549 ('old', 'diff.extended'),
1548 1550 ('new', 'diff.extended'),
1549 1551 ('deleted', 'diff.extended'),
1550 1552 ('---', 'diff.file_a'),
1551 1553 ('+++', 'diff.file_b'),
1552 1554 ('@@', 'diff.hunk'),
1553 1555 ('-', 'diff.deleted'),
1554 1556 ('+', 'diff.inserted')]
1555 1557
1556 1558 for chunk in func(*args, **kw):
1557 1559 lines = chunk.split('\n')
1558 1560 for i, line in enumerate(lines):
1559 1561 if i != 0:
1560 1562 yield ('\n', '')
1561 1563 stripline = line
1562 1564 if line and line[0] in '+-':
1563 1565 # highlight trailing whitespace, but only in changed lines
1564 1566 stripline = line.rstrip()
1565 1567 for prefix, label in prefixes:
1566 1568 if stripline.startswith(prefix):
1567 1569 yield (stripline, label)
1568 1570 break
1569 1571 else:
1570 1572 yield (line, '')
1571 1573 if line != stripline:
1572 1574 yield (line[len(stripline):], 'diff.trailingwhitespace')
1573 1575
1574 1576 def diffui(*args, **kw):
1575 1577 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1576 1578 return difflabel(diff, *args, **kw)
1577 1579
1578 1580
1579 1581 def _addmodehdr(header, omode, nmode):
1580 1582 if omode != nmode:
1581 1583 header.append('old mode %s\n' % omode)
1582 1584 header.append('new mode %s\n' % nmode)
1583 1585
1584 1586 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1585 1587 copy, getfilectx, opts, losedatafn, prefix):
1586 1588
1587 1589 def join(f):
1588 1590 return os.path.join(prefix, f)
1589 1591
1590 1592 date1 = util.datestr(ctx1.date())
1591 1593 man1 = ctx1.manifest()
1592 1594
1593 1595 gone = set()
1594 1596 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1595 1597
1596 1598 copyto = dict([(v, k) for k, v in copy.items()])
1597 1599
1598 1600 if opts.git:
1599 1601 revs = None
1600 1602
1601 1603 for f in sorted(modified + added + removed):
1602 1604 to = None
1603 1605 tn = None
1604 1606 dodiff = True
1605 1607 header = []
1606 1608 if f in man1:
1607 1609 to = getfilectx(f, ctx1).data()
1608 1610 if f not in removed:
1609 1611 tn = getfilectx(f, ctx2).data()
1610 1612 a, b = f, f
1611 1613 if opts.git or losedatafn:
1612 1614 if f in added:
1613 1615 mode = gitmode[ctx2.flags(f)]
1614 1616 if f in copy or f in copyto:
1615 1617 if opts.git:
1616 1618 if f in copy:
1617 1619 a = copy[f]
1618 1620 else:
1619 1621 a = copyto[f]
1620 1622 omode = gitmode[man1.flags(a)]
1621 1623 _addmodehdr(header, omode, mode)
1622 1624 if a in removed and a not in gone:
1623 1625 op = 'rename'
1624 1626 gone.add(a)
1625 1627 else:
1626 1628 op = 'copy'
1627 1629 header.append('%s from %s\n' % (op, join(a)))
1628 1630 header.append('%s to %s\n' % (op, join(f)))
1629 1631 to = getfilectx(a, ctx1).data()
1630 1632 else:
1631 1633 losedatafn(f)
1632 1634 else:
1633 1635 if opts.git:
1634 1636 header.append('new file mode %s\n' % mode)
1635 1637 elif ctx2.flags(f):
1636 1638 losedatafn(f)
1637 1639 # In theory, if tn was copied or renamed we should check
1638 1640 # if the source is binary too but the copy record already
1639 1641 # forces git mode.
1640 1642 if util.binary(tn):
1641 1643 if opts.git:
1642 1644 dodiff = 'binary'
1643 1645 else:
1644 1646 losedatafn(f)
1645 1647 if not opts.git and not tn:
1646 1648 # regular diffs cannot represent new empty file
1647 1649 losedatafn(f)
1648 1650 elif f in removed:
1649 1651 if opts.git:
1650 1652 # have we already reported a copy above?
1651 1653 if ((f in copy and copy[f] in added
1652 1654 and copyto[copy[f]] == f) or
1653 1655 (f in copyto and copyto[f] in added
1654 1656 and copy[copyto[f]] == f)):
1655 1657 dodiff = False
1656 1658 else:
1657 1659 header.append('deleted file mode %s\n' %
1658 1660 gitmode[man1.flags(f)])
1659 1661 elif not to or util.binary(to):
1660 1662 # regular diffs cannot represent empty file deletion
1661 1663 losedatafn(f)
1662 1664 else:
1663 1665 oflag = man1.flags(f)
1664 1666 nflag = ctx2.flags(f)
1665 1667 binary = util.binary(to) or util.binary(tn)
1666 1668 if opts.git:
1667 1669 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1668 1670 if binary:
1669 1671 dodiff = 'binary'
1670 1672 elif binary or nflag != oflag:
1671 1673 losedatafn(f)
1672 1674 if opts.git:
1673 1675 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1674 1676
1675 1677 if dodiff:
1676 1678 if dodiff == 'binary':
1677 1679 text = b85diff(to, tn)
1678 1680 else:
1679 1681 text = mdiff.unidiff(to, date1,
1680 1682 # ctx2 date may be dynamic
1681 1683 tn, util.datestr(ctx2.date()),
1682 1684 join(a), join(b), revs, opts=opts)
1683 1685 if header and (text or len(header) > 1):
1684 1686 yield ''.join(header)
1685 1687 if text:
1686 1688 yield text
1687 1689
1688 1690 def diffstatsum(stats):
1689 1691 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1690 1692 for f, a, r, b in stats:
1691 1693 maxfile = max(maxfile, encoding.colwidth(f))
1692 1694 maxtotal = max(maxtotal, a + r)
1693 1695 addtotal += a
1694 1696 removetotal += r
1695 1697 binary = binary or b
1696 1698
1697 1699 return maxfile, maxtotal, addtotal, removetotal, binary
1698 1700
1699 1701 def diffstatdata(lines):
1700 1702 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1701 1703
1702 1704 results = []
1703 1705 filename, adds, removes = None, 0, 0
1704 1706
1705 1707 def addresult():
1706 1708 if filename:
1707 1709 isbinary = adds == 0 and removes == 0
1708 1710 results.append((filename, adds, removes, isbinary))
1709 1711
1710 1712 for line in lines:
1711 1713 if line.startswith('diff'):
1712 1714 addresult()
1713 1715 # set numbers to 0 anyway when starting new file
1714 1716 adds, removes = 0, 0
1715 1717 if line.startswith('diff --git'):
1716 1718 filename = gitre.search(line).group(1)
1717 1719 elif line.startswith('diff -r'):
1718 1720 # format: "diff -r ... -r ... filename"
1719 1721 filename = diffre.search(line).group(1)
1720 1722 elif line.startswith('+') and not line.startswith('+++'):
1721 1723 adds += 1
1722 1724 elif line.startswith('-') and not line.startswith('---'):
1723 1725 removes += 1
1724 1726 addresult()
1725 1727 return results
1726 1728
1727 1729 def diffstat(lines, width=80, git=False):
1728 1730 output = []
1729 1731 stats = diffstatdata(lines)
1730 1732 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1731 1733
1732 1734 countwidth = len(str(maxtotal))
1733 1735 if hasbinary and countwidth < 3:
1734 1736 countwidth = 3
1735 1737 graphwidth = width - countwidth - maxname - 6
1736 1738 if graphwidth < 10:
1737 1739 graphwidth = 10
1738 1740
1739 1741 def scale(i):
1740 1742 if maxtotal <= graphwidth:
1741 1743 return i
1742 1744 # If diffstat runs out of room it doesn't print anything,
1743 1745 # which isn't very useful, so always print at least one + or -
1744 1746 # if there were at least some changes.
1745 1747 return max(i * graphwidth // maxtotal, int(bool(i)))
1746 1748
1747 1749 for filename, adds, removes, isbinary in stats:
1748 1750 if git and isbinary:
1749 1751 count = 'Bin'
1750 1752 else:
1751 1753 count = adds + removes
1752 1754 pluses = '+' * scale(adds)
1753 1755 minuses = '-' * scale(removes)
1754 1756 output.append(' %s%s | %*s %s%s\n' %
1755 1757 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1756 1758 countwidth, count, pluses, minuses))
1757 1759
1758 1760 if stats:
1759 1761 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1760 1762 % (len(stats), totaladds, totalremoves))
1761 1763
1762 1764 return ''.join(output)
1763 1765
1764 1766 def diffstatui(*args, **kw):
1765 1767 '''like diffstat(), but yields 2-tuples of (output, label) for
1766 1768 ui.write()
1767 1769 '''
1768 1770
1769 1771 for line in diffstat(*args, **kw).splitlines():
1770 1772 if line and line[-1] in '+-':
1771 1773 name, graph = line.rsplit(' ', 1)
1772 1774 yield (name + ' ', '')
1773 1775 m = re.search(r'\++', graph)
1774 1776 if m:
1775 1777 yield (m.group(0), 'diffstat.inserted')
1776 1778 m = re.search(r'-+', graph)
1777 1779 if m:
1778 1780 yield (m.group(0), 'diffstat.deleted')
1779 1781 else:
1780 1782 yield (line, '')
1781 1783 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now