##// END OF EJS Templates
record: move header class from record to patch...
Laurent Charignon -
r24261:20aac24e default
parent child Browse files
Show More
@@ -1,672 +1,612 b''
1 1 # record.py
2 2 #
3 3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''commands to interactively select changes for commit/qrefresh'''
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import cmdutil, commands, extensions, hg, patch
12 12 from mercurial import util
13 13 import copy, cStringIO, errno, os, re, shutil, tempfile
14 14
15 15 cmdtable = {}
16 16 command = cmdutil.command(cmdtable)
17 17 testedwith = 'internal'
18 18
19 19 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
20 20
21 21 def scanpatch(fp):
22 22 """like patch.iterhunks, but yield different events
23 23
24 24 - ('file', [header_lines + fromfile + tofile])
25 25 - ('context', [context_lines])
26 26 - ('hunk', [hunk_lines])
27 27 - ('range', (-start,len, +start,len, proc))
28 28 """
29 29 lr = patch.linereader(fp)
30 30
31 31 def scanwhile(first, p):
32 32 """scan lr while predicate holds"""
33 33 lines = [first]
34 34 while True:
35 35 line = lr.readline()
36 36 if not line:
37 37 break
38 38 if p(line):
39 39 lines.append(line)
40 40 else:
41 41 lr.push(line)
42 42 break
43 43 return lines
44 44
45 45 while True:
46 46 line = lr.readline()
47 47 if not line:
48 48 break
49 49 if line.startswith('diff --git a/') or line.startswith('diff -r '):
50 50 def notheader(line):
51 51 s = line.split(None, 1)
52 52 return not s or s[0] not in ('---', 'diff')
53 53 header = scanwhile(line, notheader)
54 54 fromfile = lr.readline()
55 55 if fromfile.startswith('---'):
56 56 tofile = lr.readline()
57 57 header += [fromfile, tofile]
58 58 else:
59 59 lr.push(fromfile)
60 60 yield 'file', header
61 61 elif line[0] == ' ':
62 62 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
63 63 elif line[0] in '-+':
64 64 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
65 65 else:
66 66 m = lines_re.match(line)
67 67 if m:
68 68 yield 'range', m.groups()
69 69 else:
70 70 yield 'other', line
71 71
72 class header(object):
73 """patch header
74
75 XXX shouldn't we move this to mercurial/patch.py ?
76 """
77 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
78 diff_re = re.compile('diff -r .* (.*)$')
79 allhunks_re = re.compile('(?:index|deleted file) ')
80 pretty_re = re.compile('(?:new file|deleted file) ')
81 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
82
83 def __init__(self, header):
84 self.header = header
85 self.hunks = []
86
87 def binary(self):
88 return util.any(h.startswith('index ') for h in self.header)
89
90 def pretty(self, fp):
91 for h in self.header:
92 if h.startswith('index '):
93 fp.write(_('this modifies a binary file (all or nothing)\n'))
94 break
95 if self.pretty_re.match(h):
96 fp.write(h)
97 if self.binary():
98 fp.write(_('this is a binary file\n'))
99 break
100 if h.startswith('---'):
101 fp.write(_('%d hunks, %d lines changed\n') %
102 (len(self.hunks),
103 sum([max(h.added, h.removed) for h in self.hunks])))
104 break
105 fp.write(h)
106
107 def write(self, fp):
108 fp.write(''.join(self.header))
109
110 def allhunks(self):
111 return util.any(self.allhunks_re.match(h) for h in self.header)
112
113 def files(self):
114 match = self.diffgit_re.match(self.header[0])
115 if match:
116 fromfile, tofile = match.groups()
117 if fromfile == tofile:
118 return [fromfile]
119 return [fromfile, tofile]
120 else:
121 return self.diff_re.match(self.header[0]).groups()
122
123 def filename(self):
124 return self.files()[-1]
125
126 def __repr__(self):
127 return '<header %s>' % (' '.join(map(repr, self.files())))
128
129 def special(self):
130 return util.any(self.special_re.match(h) for h in self.header)
131
132 72 def countchanges(hunk):
133 73 """hunk -> (n+,n-)"""
134 74 add = len([h for h in hunk if h[0] == '+'])
135 75 rem = len([h for h in hunk if h[0] == '-'])
136 76 return add, rem
137 77
138 78 class hunk(object):
139 79 """patch hunk
140 80
141 81 XXX shouldn't we merge this with patch.hunk ?
142 82 """
143 83 maxcontext = 3
144 84
145 85 def __init__(self, header, fromline, toline, proc, before, hunk, after):
146 86 def trimcontext(number, lines):
147 87 delta = len(lines) - self.maxcontext
148 88 if False and delta > 0:
149 89 return number + delta, lines[:self.maxcontext]
150 90 return number, lines
151 91
152 92 self.header = header
153 93 self.fromline, self.before = trimcontext(fromline, before)
154 94 self.toline, self.after = trimcontext(toline, after)
155 95 self.proc = proc
156 96 self.hunk = hunk
157 97 self.added, self.removed = countchanges(self.hunk)
158 98
159 99 def write(self, fp):
160 100 delta = len(self.before) + len(self.after)
161 101 if self.after and self.after[-1] == '\\ No newline at end of file\n':
162 102 delta -= 1
163 103 fromlen = delta + self.removed
164 104 tolen = delta + self.added
165 105 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
166 106 (self.fromline, fromlen, self.toline, tolen,
167 107 self.proc and (' ' + self.proc)))
168 108 fp.write(''.join(self.before + self.hunk + self.after))
169 109
170 110 pretty = write
171 111
172 112 def filename(self):
173 113 return self.header.filename()
174 114
175 115 def __repr__(self):
176 116 return '<hunk %r@%d>' % (self.filename(), self.fromline)
177 117
178 118 def parsepatch(fp):
179 119 """patch -> [] of headers -> [] of hunks """
180 120 class parser(object):
181 121 """patch parsing state machine"""
182 122 def __init__(self):
183 123 self.fromline = 0
184 124 self.toline = 0
185 125 self.proc = ''
186 126 self.header = None
187 127 self.context = []
188 128 self.before = []
189 129 self.hunk = []
190 130 self.headers = []
191 131
192 132 def addrange(self, limits):
193 133 fromstart, fromend, tostart, toend, proc = limits
194 134 self.fromline = int(fromstart)
195 135 self.toline = int(tostart)
196 136 self.proc = proc
197 137
198 138 def addcontext(self, context):
199 139 if self.hunk:
200 140 h = hunk(self.header, self.fromline, self.toline, self.proc,
201 141 self.before, self.hunk, context)
202 142 self.header.hunks.append(h)
203 143 self.fromline += len(self.before) + h.removed
204 144 self.toline += len(self.before) + h.added
205 145 self.before = []
206 146 self.hunk = []
207 147 self.proc = ''
208 148 self.context = context
209 149
210 150 def addhunk(self, hunk):
211 151 if self.context:
212 152 self.before = self.context
213 153 self.context = []
214 154 self.hunk = hunk
215 155
216 156 def newfile(self, hdr):
217 157 self.addcontext([])
218 h = header(hdr)
158 h = patch.header(hdr)
219 159 self.headers.append(h)
220 160 self.header = h
221 161
222 162 def addother(self, line):
223 163 pass # 'other' lines are ignored
224 164
225 165 def finished(self):
226 166 self.addcontext([])
227 167 return self.headers
228 168
229 169 transitions = {
230 170 'file': {'context': addcontext,
231 171 'file': newfile,
232 172 'hunk': addhunk,
233 173 'range': addrange},
234 174 'context': {'file': newfile,
235 175 'hunk': addhunk,
236 176 'range': addrange,
237 177 'other': addother},
238 178 'hunk': {'context': addcontext,
239 179 'file': newfile,
240 180 'range': addrange},
241 181 'range': {'context': addcontext,
242 182 'hunk': addhunk},
243 183 'other': {'other': addother},
244 184 }
245 185
246 186 p = parser()
247 187
248 188 state = 'context'
249 189 for newstate, data in scanpatch(fp):
250 190 try:
251 191 p.transitions[state][newstate](p, data)
252 192 except KeyError:
253 193 raise patch.PatchError('unhandled transition: %s -> %s' %
254 194 (state, newstate))
255 195 state = newstate
256 196 return p.finished()
257 197
258 198 def filterpatch(ui, headers):
259 199 """Interactively filter patch chunks into applied-only chunks"""
260 200
261 201 def prompt(skipfile, skipall, query, chunk):
262 202 """prompt query, and process base inputs
263 203
264 204 - y/n for the rest of file
265 205 - y/n for the rest
266 206 - ? (help)
267 207 - q (quit)
268 208
269 209 Return True/False and possibly updated skipfile and skipall.
270 210 """
271 211 newpatches = None
272 212 if skipall is not None:
273 213 return skipall, skipfile, skipall, newpatches
274 214 if skipfile is not None:
275 215 return skipfile, skipfile, skipall, newpatches
276 216 while True:
277 217 resps = _('[Ynesfdaq?]'
278 218 '$$ &Yes, record this change'
279 219 '$$ &No, skip this change'
280 220 '$$ &Edit this change manually'
281 221 '$$ &Skip remaining changes to this file'
282 222 '$$ Record remaining changes to this &file'
283 223 '$$ &Done, skip remaining changes and files'
284 224 '$$ Record &all changes to all remaining files'
285 225 '$$ &Quit, recording no changes'
286 226 '$$ &? (display help)')
287 227 r = ui.promptchoice("%s %s" % (query, resps))
288 228 ui.write("\n")
289 229 if r == 8: # ?
290 230 for c, t in ui.extractchoices(resps)[1]:
291 231 ui.write('%s - %s\n' % (c, t.lower()))
292 232 continue
293 233 elif r == 0: # yes
294 234 ret = True
295 235 elif r == 1: # no
296 236 ret = False
297 237 elif r == 2: # Edit patch
298 238 if chunk is None:
299 239 ui.write(_('cannot edit patch for whole file'))
300 240 ui.write("\n")
301 241 continue
302 242 if chunk.header.binary():
303 243 ui.write(_('cannot edit patch for binary file'))
304 244 ui.write("\n")
305 245 continue
306 246 # Patch comment based on the Git one (based on comment at end of
307 247 # http://mercurial.selenic.com/wiki/RecordExtension)
308 248 phelp = '---' + _("""
309 249 To remove '-' lines, make them ' ' lines (context).
310 250 To remove '+' lines, delete them.
311 251 Lines starting with # will be removed from the patch.
312 252
313 253 If the patch applies cleanly, the edited hunk will immediately be
314 254 added to the record list. If it does not apply cleanly, a rejects
315 255 file will be generated: you can use that when you try again. If
316 256 all lines of the hunk are removed, then the edit is aborted and
317 257 the hunk is left unchanged.
318 258 """)
319 259 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
320 260 suffix=".diff", text=True)
321 261 ncpatchfp = None
322 262 try:
323 263 # Write the initial patch
324 264 f = os.fdopen(patchfd, "w")
325 265 chunk.header.write(f)
326 266 chunk.write(f)
327 267 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
328 268 f.close()
329 269 # Start the editor and wait for it to complete
330 270 editor = ui.geteditor()
331 271 ui.system("%s \"%s\"" % (editor, patchfn),
332 272 environ={'HGUSER': ui.username()},
333 273 onerr=util.Abort, errprefix=_("edit failed"))
334 274 # Remove comment lines
335 275 patchfp = open(patchfn)
336 276 ncpatchfp = cStringIO.StringIO()
337 277 for line in patchfp:
338 278 if not line.startswith('#'):
339 279 ncpatchfp.write(line)
340 280 patchfp.close()
341 281 ncpatchfp.seek(0)
342 282 newpatches = parsepatch(ncpatchfp)
343 283 finally:
344 284 os.unlink(patchfn)
345 285 del ncpatchfp
346 286 # Signal that the chunk shouldn't be applied as-is, but
347 287 # provide the new patch to be used instead.
348 288 ret = False
349 289 elif r == 3: # Skip
350 290 ret = skipfile = False
351 291 elif r == 4: # file (Record remaining)
352 292 ret = skipfile = True
353 293 elif r == 5: # done, skip remaining
354 294 ret = skipall = False
355 295 elif r == 6: # all
356 296 ret = skipall = True
357 297 elif r == 7: # quit
358 298 raise util.Abort(_('user quit'))
359 299 return ret, skipfile, skipall, newpatches
360 300
361 301 seen = set()
362 302 applied = {} # 'filename' -> [] of chunks
363 303 skipfile, skipall = None, None
364 304 pos, total = 1, sum(len(h.hunks) for h in headers)
365 305 for h in headers:
366 306 pos += len(h.hunks)
367 307 skipfile = None
368 308 fixoffset = 0
369 309 hdr = ''.join(h.header)
370 310 if hdr in seen:
371 311 continue
372 312 seen.add(hdr)
373 313 if skipall is None:
374 314 h.pretty(ui)
375 315 msg = (_('examine changes to %s?') %
376 316 _(' and ').join("'%s'" % f for f in h.files()))
377 317 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
378 318 if not r:
379 319 continue
380 320 applied[h.filename()] = [h]
381 321 if h.allhunks():
382 322 applied[h.filename()] += h.hunks
383 323 continue
384 324 for i, chunk in enumerate(h.hunks):
385 325 if skipfile is None and skipall is None:
386 326 chunk.pretty(ui)
387 327 if total == 1:
388 328 msg = _("record this change to '%s'?") % chunk.filename()
389 329 else:
390 330 idx = pos - len(h.hunks) + i
391 331 msg = _("record change %d/%d to '%s'?") % (idx, total,
392 332 chunk.filename())
393 333 r, skipfile, skipall, newpatches = prompt(skipfile,
394 334 skipall, msg, chunk)
395 335 if r:
396 336 if fixoffset:
397 337 chunk = copy.copy(chunk)
398 338 chunk.toline += fixoffset
399 339 applied[chunk.filename()].append(chunk)
400 340 elif newpatches is not None:
401 341 for newpatch in newpatches:
402 342 for newhunk in newpatch.hunks:
403 343 if fixoffset:
404 344 newhunk.toline += fixoffset
405 345 applied[newhunk.filename()].append(newhunk)
406 346 else:
407 347 fixoffset += chunk.removed - chunk.added
408 348 return sum([h for h in applied.itervalues()
409 349 if h[0].special() or len(h) > 1], [])
410 350
411 351 @command("record",
412 352 # same options as commit + white space diff options
413 353 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
414 354 _('hg record [OPTION]... [FILE]...'))
415 355 def record(ui, repo, *pats, **opts):
416 356 '''interactively select changes to commit
417 357
418 358 If a list of files is omitted, all changes reported by :hg:`status`
419 359 will be candidates for recording.
420 360
421 361 See :hg:`help dates` for a list of formats valid for -d/--date.
422 362
423 363 You will be prompted for whether to record changes to each
424 364 modified file, and for files with multiple changes, for each
425 365 change to use. For each query, the following responses are
426 366 possible::
427 367
428 368 y - record this change
429 369 n - skip this change
430 370 e - edit this change manually
431 371
432 372 s - skip remaining changes to this file
433 373 f - record remaining changes to this file
434 374
435 375 d - done, skip remaining changes and files
436 376 a - record all changes to all remaining files
437 377 q - quit, recording no changes
438 378
439 379 ? - display help
440 380
441 381 This command is not available when committing a merge.'''
442 382
443 383 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
444 384
445 385 def qrefresh(origfn, ui, repo, *pats, **opts):
446 386 if not opts['interactive']:
447 387 return origfn(ui, repo, *pats, **opts)
448 388
449 389 mq = extensions.find('mq')
450 390
451 391 def committomq(ui, repo, *pats, **opts):
452 392 # At this point the working copy contains only changes that
453 393 # were accepted. All other changes were reverted.
454 394 # We can't pass *pats here since qrefresh will undo all other
455 395 # changed files in the patch that aren't in pats.
456 396 mq.refresh(ui, repo, **opts)
457 397
458 398 # backup all changed files
459 399 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
460 400
461 401 # This command registration is replaced during uisetup().
462 402 @command('qrecord',
463 403 [],
464 404 _('hg qrecord [OPTION]... PATCH [FILE]...'),
465 405 inferrepo=True)
466 406 def qrecord(ui, repo, patch, *pats, **opts):
467 407 '''interactively record a new patch
468 408
469 409 See :hg:`help qnew` & :hg:`help record` for more information and
470 410 usage.
471 411 '''
472 412
473 413 try:
474 414 mq = extensions.find('mq')
475 415 except KeyError:
476 416 raise util.Abort(_("'mq' extension not loaded"))
477 417
478 418 repo.mq.checkpatchname(patch)
479 419
480 420 def committomq(ui, repo, *pats, **opts):
481 421 opts['checkname'] = False
482 422 mq.new(ui, repo, patch, *pats, **opts)
483 423
484 424 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
485 425
486 426 def qnew(origfn, ui, repo, patch, *args, **opts):
487 427 if opts['interactive']:
488 428 return qrecord(ui, repo, patch, *args, **opts)
489 429 return origfn(ui, repo, patch, *args, **opts)
490 430
491 431 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
492 432 if not ui.interactive():
493 433 raise util.Abort(_('running non-interactively, use %s instead') %
494 434 cmdsuggest)
495 435
496 436 # make sure username is set before going interactive
497 437 if not opts.get('user'):
498 438 ui.username() # raise exception, username not provided
499 439
500 440 def recordfunc(ui, repo, message, match, opts):
501 441 """This is generic record driver.
502 442
503 443 Its job is to interactively filter local changes, and
504 444 accordingly prepare working directory into a state in which the
505 445 job can be delegated to a non-interactive commit command such as
506 446 'commit' or 'qrefresh'.
507 447
508 448 After the actual job is done by non-interactive command, the
509 449 working directory is restored to its original state.
510 450
511 451 In the end we'll record interesting changes, and everything else
512 452 will be left in place, so the user can continue working.
513 453 """
514 454
515 455 cmdutil.checkunfinished(repo, commit=True)
516 456 merge = len(repo[None].parents()) > 1
517 457 if merge:
518 458 raise util.Abort(_('cannot partially commit a merge '
519 459 '(use "hg commit" instead)'))
520 460
521 461 status = repo.status(match=match)
522 462 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
523 463 diffopts.nodates = True
524 464 diffopts.git = True
525 465 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
526 466 fp = cStringIO.StringIO()
527 467 fp.write(''.join(originalchunks))
528 468 fp.seek(0)
529 469
530 470 # 1. filter patch, so we have intending-to apply subset of it
531 471 try:
532 472 chunks = filterpatch(ui, parsepatch(fp))
533 473 except patch.PatchError, err:
534 474 raise util.Abort(_('error parsing patch: %s') % err)
535 475
536 476 del fp
537 477
538 478 contenders = set()
539 479 for h in chunks:
540 480 try:
541 481 contenders.update(set(h.files()))
542 482 except AttributeError:
543 483 pass
544 484
545 485 changed = status.modified + status.added + status.removed
546 486 newfiles = [f for f in changed if f in contenders]
547 487 if not newfiles:
548 488 ui.status(_('no changes to record\n'))
549 489 return 0
550 490
551 491 newandmodifiedfiles = set()
552 492 for h in chunks:
553 493 ishunk = isinstance(h, hunk)
554 494 isnew = h.filename() in status.added
555 495 if ishunk and isnew and not h in originalchunks:
556 496 newandmodifiedfiles.add(h.filename())
557 497
558 498 modified = set(status.modified)
559 499
560 500 # 2. backup changed files, so we can restore them in the end
561 501
562 502 if backupall:
563 503 tobackup = changed
564 504 else:
565 505 tobackup = [f for f in newfiles
566 506 if f in modified or f in newandmodifiedfiles]
567 507
568 508 backups = {}
569 509 if tobackup:
570 510 backupdir = repo.join('record-backups')
571 511 try:
572 512 os.mkdir(backupdir)
573 513 except OSError, err:
574 514 if err.errno != errno.EEXIST:
575 515 raise
576 516 try:
577 517 # backup continues
578 518 for f in tobackup:
579 519 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
580 520 dir=backupdir)
581 521 os.close(fd)
582 522 ui.debug('backup %r as %r\n' % (f, tmpname))
583 523 util.copyfile(repo.wjoin(f), tmpname)
584 524 shutil.copystat(repo.wjoin(f), tmpname)
585 525 backups[f] = tmpname
586 526
587 527 fp = cStringIO.StringIO()
588 528 for c in chunks:
589 529 fname = c.filename()
590 530 if fname in backups or fname in newandmodifiedfiles:
591 531 c.write(fp)
592 532 dopatch = fp.tell()
593 533 fp.seek(0)
594 534
595 535 [os.unlink(c) for c in newandmodifiedfiles]
596 536
597 537 # 3a. apply filtered patch to clean repo (clean)
598 538 if backups:
599 539 hg.revert(repo, repo.dirstate.p1(),
600 540 lambda key: key in backups)
601 541
602 542 # 3b. (apply)
603 543 if dopatch:
604 544 try:
605 545 ui.debug('applying patch\n')
606 546 ui.debug(fp.getvalue())
607 547 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
608 548 except patch.PatchError, err:
609 549 raise util.Abort(str(err))
610 550 del fp
611 551
612 552 # 4. We prepared working directory according to filtered
613 553 # patch. Now is the time to delegate the job to
614 554 # commit/qrefresh or the like!
615 555
616 556 # Make all of the pathnames absolute.
617 557 newfiles = [repo.wjoin(nf) for nf in newfiles]
618 558 commitfunc(ui, repo, *newfiles, **opts)
619 559
620 560 return 0
621 561 finally:
622 562 # 5. finally restore backed-up files
623 563 try:
624 564 for realname, tmpname in backups.iteritems():
625 565 ui.debug('restoring %r to %r\n' % (tmpname, realname))
626 566 util.copyfile(tmpname, repo.wjoin(realname))
627 567 # Our calls to copystat() here and above are a
628 568 # hack to trick any editors that have f open that
629 569 # we haven't modified them.
630 570 #
631 571 # Also note that this racy as an editor could
632 572 # notice the file's mtime before we've finished
633 573 # writing it.
634 574 shutil.copystat(tmpname, repo.wjoin(realname))
635 575 os.unlink(tmpname)
636 576 if tobackup:
637 577 os.rmdir(backupdir)
638 578 except OSError:
639 579 pass
640 580
641 581 # wrap ui.write so diff output can be labeled/colorized
642 582 def wrapwrite(orig, *args, **kw):
643 583 label = kw.pop('label', '')
644 584 for chunk, l in patch.difflabel(lambda: args):
645 585 orig(chunk, label=label + l)
646 586 oldwrite = ui.write
647 587 extensions.wrapfunction(ui, 'write', wrapwrite)
648 588 try:
649 589 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
650 590 finally:
651 591 ui.write = oldwrite
652 592
653 593 def uisetup(ui):
654 594 try:
655 595 mq = extensions.find('mq')
656 596 except KeyError:
657 597 return
658 598
659 599 cmdtable["qrecord"] = \
660 600 (qrecord,
661 601 # same options as qnew, but copy them so we don't get
662 602 # -i/--interactive for qrecord and add white space diff options
663 603 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
664 604 _('hg qrecord [OPTION]... PATCH [FILE]...'))
665 605
666 606 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
667 607 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
668 608 _("interactively select changes to refresh"))
669 609
670 610 def _wrapcmd(cmd, table, wrapfn, msg):
671 611 entry = extensions.wrapcommand(table, cmd, wrapfn)
672 612 entry[1].append(('i', 'interactive', None, msg))
@@ -1,1990 +1,2048 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email, os, errno, re, posixpath
10 10 import tempfile, zlib, shutil
11 11 # On python2.4 you have to import these by name or they fail to
12 12 # load. This was not a problem on Python 2.7.
13 13 import email.Generator
14 14 import email.Parser
15 15
16 16 from i18n import _
17 17 from node import hex, short
18 18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 19
20 20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22 22
23 23 class PatchError(Exception):
24 24 pass
25 25
26 26
27 27 # public functions
28 28
29 29 def split(stream):
30 30 '''return an iterator of individual patches from a stream'''
31 31 def isheader(line, inheader):
32 32 if inheader and line[0] in (' ', '\t'):
33 33 # continuation
34 34 return True
35 35 if line[0] in (' ', '-', '+'):
36 36 # diff line - don't check for header pattern in there
37 37 return False
38 38 l = line.split(': ', 1)
39 39 return len(l) == 2 and ' ' not in l[0]
40 40
41 41 def chunk(lines):
42 42 return cStringIO.StringIO(''.join(lines))
43 43
44 44 def hgsplit(stream, cur):
45 45 inheader = True
46 46
47 47 for line in stream:
48 48 if not line.strip():
49 49 inheader = False
50 50 if not inheader and line.startswith('# HG changeset patch'):
51 51 yield chunk(cur)
52 52 cur = []
53 53 inheader = True
54 54
55 55 cur.append(line)
56 56
57 57 if cur:
58 58 yield chunk(cur)
59 59
60 60 def mboxsplit(stream, cur):
61 61 for line in stream:
62 62 if line.startswith('From '):
63 63 for c in split(chunk(cur[1:])):
64 64 yield c
65 65 cur = []
66 66
67 67 cur.append(line)
68 68
69 69 if cur:
70 70 for c in split(chunk(cur[1:])):
71 71 yield c
72 72
73 73 def mimesplit(stream, cur):
74 74 def msgfp(m):
75 75 fp = cStringIO.StringIO()
76 76 g = email.Generator.Generator(fp, mangle_from_=False)
77 77 g.flatten(m)
78 78 fp.seek(0)
79 79 return fp
80 80
81 81 for line in stream:
82 82 cur.append(line)
83 83 c = chunk(cur)
84 84
85 85 m = email.Parser.Parser().parse(c)
86 86 if not m.is_multipart():
87 87 yield msgfp(m)
88 88 else:
89 89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 90 for part in m.walk():
91 91 ct = part.get_content_type()
92 92 if ct not in ok_types:
93 93 continue
94 94 yield msgfp(part)
95 95
96 96 def headersplit(stream, cur):
97 97 inheader = False
98 98
99 99 for line in stream:
100 100 if not inheader and isheader(line, inheader):
101 101 yield chunk(cur)
102 102 cur = []
103 103 inheader = True
104 104 if inheader and not isheader(line, inheader):
105 105 inheader = False
106 106
107 107 cur.append(line)
108 108
109 109 if cur:
110 110 yield chunk(cur)
111 111
112 112 def remainder(cur):
113 113 yield chunk(cur)
114 114
115 115 class fiter(object):
116 116 def __init__(self, fp):
117 117 self.fp = fp
118 118
119 119 def __iter__(self):
120 120 return self
121 121
122 122 def next(self):
123 123 l = self.fp.readline()
124 124 if not l:
125 125 raise StopIteration
126 126 return l
127 127
128 128 inheader = False
129 129 cur = []
130 130
131 131 mimeheaders = ['content-type']
132 132
133 133 if not util.safehasattr(stream, 'next'):
134 134 # http responses, for example, have readline but not next
135 135 stream = fiter(stream)
136 136
137 137 for line in stream:
138 138 cur.append(line)
139 139 if line.startswith('# HG changeset patch'):
140 140 return hgsplit(stream, cur)
141 141 elif line.startswith('From '):
142 142 return mboxsplit(stream, cur)
143 143 elif isheader(line, inheader):
144 144 inheader = True
145 145 if line.split(':', 1)[0].lower() in mimeheaders:
146 146 # let email parser handle this
147 147 return mimesplit(stream, cur)
148 148 elif line.startswith('--- ') and inheader:
149 149 # No evil headers seen by diff start, split by hand
150 150 return headersplit(stream, cur)
151 151 # Not enough info, keep reading
152 152
153 153 # if we are here, we have a very plain patch
154 154 return remainder(cur)
155 155
156 156 def extract(ui, fileobj):
157 157 '''extract patch from data read from fileobj.
158 158
159 159 patch can be a normal patch or contained in an email message.
160 160
161 161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 162 Any item in the returned tuple can be None. If filename is None,
163 163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164 164
165 165 # attempt to detect the start of a patch
166 166 # (this heuristic is borrowed from quilt)
167 167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 169 r'---[ \t].*?^\+\+\+[ \t]|'
170 170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171 171
172 172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 173 tmpfp = os.fdopen(fd, 'w')
174 174 try:
175 175 msg = email.Parser.Parser().parse(fileobj)
176 176
177 177 subject = msg['Subject']
178 178 user = msg['From']
179 179 if not subject and not user:
180 180 # Not an email, restore parsed headers if any
181 181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182 182
183 183 # should try to parse msg['Date']
184 184 date = None
185 185 nodeid = None
186 186 branch = None
187 187 parents = []
188 188
189 189 if subject:
190 190 if subject.startswith('[PATCH'):
191 191 pend = subject.find(']')
192 192 if pend >= 0:
193 193 subject = subject[pend + 1:].lstrip()
194 194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 195 ui.debug('Subject: %s\n' % subject)
196 196 if user:
197 197 ui.debug('From: %s\n' % user)
198 198 diffs_seen = 0
199 199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 200 message = ''
201 201 for part in msg.walk():
202 202 content_type = part.get_content_type()
203 203 ui.debug('Content-Type: %s\n' % content_type)
204 204 if content_type not in ok_types:
205 205 continue
206 206 payload = part.get_payload(decode=True)
207 207 m = diffre.search(payload)
208 208 if m:
209 209 hgpatch = False
210 210 hgpatchheader = False
211 211 ignoretext = False
212 212
213 213 ui.debug('found patch at byte %d\n' % m.start(0))
214 214 diffs_seen += 1
215 215 cfp = cStringIO.StringIO()
216 216 for line in payload[:m.start(0)].splitlines():
217 217 if line.startswith('# HG changeset patch') and not hgpatch:
218 218 ui.debug('patch generated by hg export\n')
219 219 hgpatch = True
220 220 hgpatchheader = True
221 221 # drop earlier commit message content
222 222 cfp.seek(0)
223 223 cfp.truncate()
224 224 subject = None
225 225 elif hgpatchheader:
226 226 if line.startswith('# User '):
227 227 user = line[7:]
228 228 ui.debug('From: %s\n' % user)
229 229 elif line.startswith("# Date "):
230 230 date = line[7:]
231 231 elif line.startswith("# Branch "):
232 232 branch = line[9:]
233 233 elif line.startswith("# Node ID "):
234 234 nodeid = line[10:]
235 235 elif line.startswith("# Parent "):
236 236 parents.append(line[9:].lstrip())
237 237 elif not line.startswith("# "):
238 238 hgpatchheader = False
239 239 elif line == '---':
240 240 ignoretext = True
241 241 if not hgpatchheader and not ignoretext:
242 242 cfp.write(line)
243 243 cfp.write('\n')
244 244 message = cfp.getvalue()
245 245 if tmpfp:
246 246 tmpfp.write(payload)
247 247 if not payload.endswith('\n'):
248 248 tmpfp.write('\n')
249 249 elif not diffs_seen and message and content_type == 'text/plain':
250 250 message += '\n' + payload
251 251 except: # re-raises
252 252 tmpfp.close()
253 253 os.unlink(tmpname)
254 254 raise
255 255
256 256 if subject and not message.startswith(subject):
257 257 message = '%s\n%s' % (subject, message)
258 258 tmpfp.close()
259 259 if not diffs_seen:
260 260 os.unlink(tmpname)
261 261 return None, message, user, date, branch, None, None, None
262 262 p1 = parents and parents.pop(0) or None
263 263 p2 = parents and parents.pop(0) or None
264 264 return tmpname, message, user, date, branch, nodeid, p1, p2
265 265
266 266 class patchmeta(object):
267 267 """Patched file metadata
268 268
269 269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 273 'islink' is True if the file is a symlink and 'isexec' is True if
274 274 the file is executable. Otherwise, 'mode' is None.
275 275 """
276 276 def __init__(self, path):
277 277 self.path = path
278 278 self.oldpath = None
279 279 self.mode = None
280 280 self.op = 'MODIFY'
281 281 self.binary = False
282 282
283 283 def setmode(self, mode):
284 284 islink = mode & 020000
285 285 isexec = mode & 0100
286 286 self.mode = (islink, isexec)
287 287
288 288 def copy(self):
289 289 other = patchmeta(self.path)
290 290 other.oldpath = self.oldpath
291 291 other.mode = self.mode
292 292 other.op = self.op
293 293 other.binary = self.binary
294 294 return other
295 295
296 296 def _ispatchinga(self, afile):
297 297 if afile == '/dev/null':
298 298 return self.op == 'ADD'
299 299 return afile == 'a/' + (self.oldpath or self.path)
300 300
301 301 def _ispatchingb(self, bfile):
302 302 if bfile == '/dev/null':
303 303 return self.op == 'DELETE'
304 304 return bfile == 'b/' + self.path
305 305
306 306 def ispatching(self, afile, bfile):
307 307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308 308
309 309 def __repr__(self):
310 310 return "<patchmeta %s %r>" % (self.op, self.path)
311 311
312 312 def readgitpatch(lr):
313 313 """extract git-style metadata about patches from <patchname>"""
314 314
315 315 # Filter patch for git information
316 316 gp = None
317 317 gitpatches = []
318 318 for line in lr:
319 319 line = line.rstrip(' \r\n')
320 320 if line.startswith('diff --git a/'):
321 321 m = gitre.match(line)
322 322 if m:
323 323 if gp:
324 324 gitpatches.append(gp)
325 325 dst = m.group(2)
326 326 gp = patchmeta(dst)
327 327 elif gp:
328 328 if line.startswith('--- '):
329 329 gitpatches.append(gp)
330 330 gp = None
331 331 continue
332 332 if line.startswith('rename from '):
333 333 gp.op = 'RENAME'
334 334 gp.oldpath = line[12:]
335 335 elif line.startswith('rename to '):
336 336 gp.path = line[10:]
337 337 elif line.startswith('copy from '):
338 338 gp.op = 'COPY'
339 339 gp.oldpath = line[10:]
340 340 elif line.startswith('copy to '):
341 341 gp.path = line[8:]
342 342 elif line.startswith('deleted file'):
343 343 gp.op = 'DELETE'
344 344 elif line.startswith('new file mode '):
345 345 gp.op = 'ADD'
346 346 gp.setmode(int(line[-6:], 8))
347 347 elif line.startswith('new mode '):
348 348 gp.setmode(int(line[-6:], 8))
349 349 elif line.startswith('GIT binary patch'):
350 350 gp.binary = True
351 351 if gp:
352 352 gitpatches.append(gp)
353 353
354 354 return gitpatches
355 355
356 356 class linereader(object):
357 357 # simple class to allow pushing lines back into the input stream
358 358 def __init__(self, fp):
359 359 self.fp = fp
360 360 self.buf = []
361 361
362 362 def push(self, line):
363 363 if line is not None:
364 364 self.buf.append(line)
365 365
366 366 def readline(self):
367 367 if self.buf:
368 368 l = self.buf[0]
369 369 del self.buf[0]
370 370 return l
371 371 return self.fp.readline()
372 372
373 373 def __iter__(self):
374 374 while True:
375 375 l = self.readline()
376 376 if not l:
377 377 break
378 378 yield l
379 379
380 380 class abstractbackend(object):
381 381 def __init__(self, ui):
382 382 self.ui = ui
383 383
384 384 def getfile(self, fname):
385 385 """Return target file data and flags as a (data, (islink,
386 386 isexec)) tuple. Data is None if file is missing/deleted.
387 387 """
388 388 raise NotImplementedError
389 389
390 390 def setfile(self, fname, data, mode, copysource):
391 391 """Write data to target file fname and set its mode. mode is a
392 392 (islink, isexec) tuple. If data is None, the file content should
393 393 be left unchanged. If the file is modified after being copied,
394 394 copysource is set to the original file name.
395 395 """
396 396 raise NotImplementedError
397 397
398 398 def unlink(self, fname):
399 399 """Unlink target file."""
400 400 raise NotImplementedError
401 401
402 402 def writerej(self, fname, failed, total, lines):
403 403 """Write rejected lines for fname. total is the number of hunks
404 404 which failed to apply and total the total number of hunks for this
405 405 files.
406 406 """
407 407 pass
408 408
409 409 def exists(self, fname):
410 410 raise NotImplementedError
411 411
412 412 class fsbackend(abstractbackend):
413 413 def __init__(self, ui, basedir):
414 414 super(fsbackend, self).__init__(ui)
415 415 self.opener = scmutil.opener(basedir)
416 416
417 417 def _join(self, f):
418 418 return os.path.join(self.opener.base, f)
419 419
420 420 def getfile(self, fname):
421 421 if self.opener.islink(fname):
422 422 return (self.opener.readlink(fname), (True, False))
423 423
424 424 isexec = False
425 425 try:
426 426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 427 except OSError, e:
428 428 if e.errno != errno.ENOENT:
429 429 raise
430 430 try:
431 431 return (self.opener.read(fname), (False, isexec))
432 432 except IOError, e:
433 433 if e.errno != errno.ENOENT:
434 434 raise
435 435 return None, None
436 436
437 437 def setfile(self, fname, data, mode, copysource):
438 438 islink, isexec = mode
439 439 if data is None:
440 440 self.opener.setflags(fname, islink, isexec)
441 441 return
442 442 if islink:
443 443 self.opener.symlink(data, fname)
444 444 else:
445 445 self.opener.write(fname, data)
446 446 if isexec:
447 447 self.opener.setflags(fname, False, True)
448 448
449 449 def unlink(self, fname):
450 450 self.opener.unlinkpath(fname, ignoremissing=True)
451 451
452 452 def writerej(self, fname, failed, total, lines):
453 453 fname = fname + ".rej"
454 454 self.ui.warn(
455 455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 456 (failed, total, fname))
457 457 fp = self.opener(fname, 'w')
458 458 fp.writelines(lines)
459 459 fp.close()
460 460
461 461 def exists(self, fname):
462 462 return self.opener.lexists(fname)
463 463
464 464 class workingbackend(fsbackend):
465 465 def __init__(self, ui, repo, similarity):
466 466 super(workingbackend, self).__init__(ui, repo.root)
467 467 self.repo = repo
468 468 self.similarity = similarity
469 469 self.removed = set()
470 470 self.changed = set()
471 471 self.copied = []
472 472
473 473 def _checkknown(self, fname):
474 474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476 476
477 477 def setfile(self, fname, data, mode, copysource):
478 478 self._checkknown(fname)
479 479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 480 if copysource is not None:
481 481 self.copied.append((copysource, fname))
482 482 self.changed.add(fname)
483 483
484 484 def unlink(self, fname):
485 485 self._checkknown(fname)
486 486 super(workingbackend, self).unlink(fname)
487 487 self.removed.add(fname)
488 488 self.changed.add(fname)
489 489
490 490 def close(self):
491 491 wctx = self.repo[None]
492 492 changed = set(self.changed)
493 493 for src, dst in self.copied:
494 494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 495 if self.removed:
496 496 wctx.forget(sorted(self.removed))
497 497 for f in self.removed:
498 498 if f not in self.repo.dirstate:
499 499 # File was deleted and no longer belongs to the
500 500 # dirstate, it was probably marked added then
501 501 # deleted, and should not be considered by
502 502 # marktouched().
503 503 changed.discard(f)
504 504 if changed:
505 505 scmutil.marktouched(self.repo, changed, self.similarity)
506 506 return sorted(self.changed)
507 507
508 508 class filestore(object):
509 509 def __init__(self, maxsize=None):
510 510 self.opener = None
511 511 self.files = {}
512 512 self.created = 0
513 513 self.maxsize = maxsize
514 514 if self.maxsize is None:
515 515 self.maxsize = 4*(2**20)
516 516 self.size = 0
517 517 self.data = {}
518 518
519 519 def setfile(self, fname, data, mode, copied=None):
520 520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 521 self.data[fname] = (data, mode, copied)
522 522 self.size += len(data)
523 523 else:
524 524 if self.opener is None:
525 525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 526 self.opener = scmutil.opener(root)
527 527 # Avoid filename issues with these simple names
528 528 fn = str(self.created)
529 529 self.opener.write(fn, data)
530 530 self.created += 1
531 531 self.files[fname] = (fn, mode, copied)
532 532
533 533 def getfile(self, fname):
534 534 if fname in self.data:
535 535 return self.data[fname]
536 536 if not self.opener or fname not in self.files:
537 537 return None, None, None
538 538 fn, mode, copied = self.files[fname]
539 539 return self.opener.read(fn), mode, copied
540 540
541 541 def close(self):
542 542 if self.opener:
543 543 shutil.rmtree(self.opener.base)
544 544
545 545 class repobackend(abstractbackend):
546 546 def __init__(self, ui, repo, ctx, store):
547 547 super(repobackend, self).__init__(ui)
548 548 self.repo = repo
549 549 self.ctx = ctx
550 550 self.store = store
551 551 self.changed = set()
552 552 self.removed = set()
553 553 self.copied = {}
554 554
555 555 def _checkknown(self, fname):
556 556 if fname not in self.ctx:
557 557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558 558
559 559 def getfile(self, fname):
560 560 try:
561 561 fctx = self.ctx[fname]
562 562 except error.LookupError:
563 563 return None, None
564 564 flags = fctx.flags()
565 565 return fctx.data(), ('l' in flags, 'x' in flags)
566 566
567 567 def setfile(self, fname, data, mode, copysource):
568 568 if copysource:
569 569 self._checkknown(copysource)
570 570 if data is None:
571 571 data = self.ctx[fname].data()
572 572 self.store.setfile(fname, data, mode, copysource)
573 573 self.changed.add(fname)
574 574 if copysource:
575 575 self.copied[fname] = copysource
576 576
577 577 def unlink(self, fname):
578 578 self._checkknown(fname)
579 579 self.removed.add(fname)
580 580
581 581 def exists(self, fname):
582 582 return fname in self.ctx
583 583
584 584 def close(self):
585 585 return self.changed | self.removed
586 586
587 587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591 591
592 592 class patchfile(object):
593 593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 594 self.fname = gp.path
595 595 self.eolmode = eolmode
596 596 self.eol = None
597 597 self.backend = backend
598 598 self.ui = ui
599 599 self.lines = []
600 600 self.exists = False
601 601 self.missing = True
602 602 self.mode = gp.mode
603 603 self.copysource = gp.oldpath
604 604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 605 self.remove = gp.op == 'DELETE'
606 606 if self.copysource is None:
607 607 data, mode = backend.getfile(self.fname)
608 608 else:
609 609 data, mode = store.getfile(self.copysource)[:2]
610 610 if data is not None:
611 611 self.exists = self.copysource is None or backend.exists(self.fname)
612 612 self.missing = False
613 613 if data:
614 614 self.lines = mdiff.splitnewlines(data)
615 615 if self.mode is None:
616 616 self.mode = mode
617 617 if self.lines:
618 618 # Normalize line endings
619 619 if self.lines[0].endswith('\r\n'):
620 620 self.eol = '\r\n'
621 621 elif self.lines[0].endswith('\n'):
622 622 self.eol = '\n'
623 623 if eolmode != 'strict':
624 624 nlines = []
625 625 for l in self.lines:
626 626 if l.endswith('\r\n'):
627 627 l = l[:-2] + '\n'
628 628 nlines.append(l)
629 629 self.lines = nlines
630 630 else:
631 631 if self.create:
632 632 self.missing = False
633 633 if self.mode is None:
634 634 self.mode = (False, False)
635 635 if self.missing:
636 636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637 637
638 638 self.hash = {}
639 639 self.dirty = 0
640 640 self.offset = 0
641 641 self.skew = 0
642 642 self.rej = []
643 643 self.fileprinted = False
644 644 self.printfile(False)
645 645 self.hunks = 0
646 646
647 647 def writelines(self, fname, lines, mode):
648 648 if self.eolmode == 'auto':
649 649 eol = self.eol
650 650 elif self.eolmode == 'crlf':
651 651 eol = '\r\n'
652 652 else:
653 653 eol = '\n'
654 654
655 655 if self.eolmode != 'strict' and eol and eol != '\n':
656 656 rawlines = []
657 657 for l in lines:
658 658 if l and l[-1] == '\n':
659 659 l = l[:-1] + eol
660 660 rawlines.append(l)
661 661 lines = rawlines
662 662
663 663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664 664
665 665 def printfile(self, warn):
666 666 if self.fileprinted:
667 667 return
668 668 if warn or self.ui.verbose:
669 669 self.fileprinted = True
670 670 s = _("patching file %s\n") % self.fname
671 671 if warn:
672 672 self.ui.warn(s)
673 673 else:
674 674 self.ui.note(s)
675 675
676 676
677 677 def findlines(self, l, linenum):
678 678 # looks through the hash and finds candidate lines. The
679 679 # result is a list of line numbers sorted based on distance
680 680 # from linenum
681 681
682 682 cand = self.hash.get(l, [])
683 683 if len(cand) > 1:
684 684 # resort our list of potentials forward then back.
685 685 cand.sort(key=lambda x: abs(x - linenum))
686 686 return cand
687 687
688 688 def write_rej(self):
689 689 # our rejects are a little different from patch(1). This always
690 690 # creates rejects in the same form as the original patch. A file
691 691 # header is inserted so that you can run the reject through patch again
692 692 # without having to type the filename.
693 693 if not self.rej:
694 694 return
695 695 base = os.path.basename(self.fname)
696 696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 697 for x in self.rej:
698 698 for l in x.hunk:
699 699 lines.append(l)
700 700 if l[-1] != '\n':
701 701 lines.append("\n\ No newline at end of file\n")
702 702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703 703
704 704 def apply(self, h):
705 705 if not h.complete():
706 706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 708 h.lenb))
709 709
710 710 self.hunks += 1
711 711
712 712 if self.missing:
713 713 self.rej.append(h)
714 714 return -1
715 715
716 716 if self.exists and self.create:
717 717 if self.copysource:
718 718 self.ui.warn(_("cannot create %s: destination already "
719 719 "exists\n") % self.fname)
720 720 else:
721 721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 722 self.rej.append(h)
723 723 return -1
724 724
725 725 if isinstance(h, binhunk):
726 726 if self.remove:
727 727 self.backend.unlink(self.fname)
728 728 else:
729 729 l = h.new(self.lines)
730 730 self.lines[:] = l
731 731 self.offset += len(l)
732 732 self.dirty = True
733 733 return 0
734 734
735 735 horig = h
736 736 if (self.eolmode in ('crlf', 'lf')
737 737 or self.eolmode == 'auto' and self.eol):
738 738 # If new eols are going to be normalized, then normalize
739 739 # hunk data before patching. Otherwise, preserve input
740 740 # line-endings.
741 741 h = h.getnormalized()
742 742
743 743 # fast case first, no offsets, no fuzz
744 744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 745 oldstart += self.offset
746 746 orig_start = oldstart
747 747 # if there's skew we want to emit the "(offset %d lines)" even
748 748 # when the hunk cleanly applies at start + skew, so skip the
749 749 # fast case code
750 750 if (self.skew == 0 and
751 751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 752 if self.remove:
753 753 self.backend.unlink(self.fname)
754 754 else:
755 755 self.lines[oldstart:oldstart + len(old)] = new
756 756 self.offset += len(new) - len(old)
757 757 self.dirty = True
758 758 return 0
759 759
760 760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 761 self.hash = {}
762 762 for x, s in enumerate(self.lines):
763 763 self.hash.setdefault(s, []).append(x)
764 764
765 765 for fuzzlen in xrange(3):
766 766 for toponly in [True, False]:
767 767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 768 oldstart = oldstart + self.offset + self.skew
769 769 oldstart = min(oldstart, len(self.lines))
770 770 if old:
771 771 cand = self.findlines(old[0][1:], oldstart)
772 772 else:
773 773 # Only adding lines with no or fuzzed context, just
774 774 # take the skew in account
775 775 cand = [oldstart]
776 776
777 777 for l in cand:
778 778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 779 self.lines[l : l + len(old)] = new
780 780 self.offset += len(new) - len(old)
781 781 self.skew = l - orig_start
782 782 self.dirty = True
783 783 offset = l - orig_start - fuzzlen
784 784 if fuzzlen:
785 785 msg = _("Hunk #%d succeeded at %d "
786 786 "with fuzz %d "
787 787 "(offset %d lines).\n")
788 788 self.printfile(True)
789 789 self.ui.warn(msg %
790 790 (h.number, l + 1, fuzzlen, offset))
791 791 else:
792 792 msg = _("Hunk #%d succeeded at %d "
793 793 "(offset %d lines).\n")
794 794 self.ui.note(msg % (h.number, l + 1, offset))
795 795 return fuzzlen
796 796 self.printfile(True)
797 797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 798 self.rej.append(horig)
799 799 return -1
800 800
801 801 def close(self):
802 802 if self.dirty:
803 803 self.writelines(self.fname, self.lines, self.mode)
804 804 self.write_rej()
805 805 return len(self.rej)
806 806
807 class header(object):
808 """patch header
809 """
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
812 allhunks_re = re.compile('(?:index|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
815
816 def __init__(self, header):
817 self.header = header
818 self.hunks = []
819
820 def binary(self):
821 return util.any(h.startswith('index ') for h in self.header)
822
823 def pretty(self, fp):
824 for h in self.header:
825 if h.startswith('index '):
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
827 break
828 if self.pretty_re.match(h):
829 fp.write(h)
830 if self.binary():
831 fp.write(_('this is a binary file\n'))
832 break
833 if h.startswith('---'):
834 fp.write(_('%d hunks, %d lines changed\n') %
835 (len(self.hunks),
836 sum([max(h.added, h.removed) for h in self.hunks])))
837 break
838 fp.write(h)
839
840 def write(self, fp):
841 fp.write(''.join(self.header))
842
843 def allhunks(self):
844 return util.any(self.allhunks_re.match(h) for h in self.header)
845
846 def files(self):
847 match = self.diffgit_re.match(self.header[0])
848 if match:
849 fromfile, tofile = match.groups()
850 if fromfile == tofile:
851 return [fromfile]
852 return [fromfile, tofile]
853 else:
854 return self.diff_re.match(self.header[0]).groups()
855
856 def filename(self):
857 return self.files()[-1]
858
859 def __repr__(self):
860 return '<header %s>' % (' '.join(map(repr, self.files())))
861
862 def special(self):
863 return util.any(self.special_re.match(h) for h in self.header)
864
807 865 class hunk(object):
808 866 def __init__(self, desc, num, lr, context):
809 867 self.number = num
810 868 self.desc = desc
811 869 self.hunk = [desc]
812 870 self.a = []
813 871 self.b = []
814 872 self.starta = self.lena = None
815 873 self.startb = self.lenb = None
816 874 if lr is not None:
817 875 if context:
818 876 self.read_context_hunk(lr)
819 877 else:
820 878 self.read_unified_hunk(lr)
821 879
822 880 def getnormalized(self):
823 881 """Return a copy with line endings normalized to LF."""
824 882
825 883 def normalize(lines):
826 884 nlines = []
827 885 for line in lines:
828 886 if line.endswith('\r\n'):
829 887 line = line[:-2] + '\n'
830 888 nlines.append(line)
831 889 return nlines
832 890
833 891 # Dummy object, it is rebuilt manually
834 892 nh = hunk(self.desc, self.number, None, None)
835 893 nh.number = self.number
836 894 nh.desc = self.desc
837 895 nh.hunk = self.hunk
838 896 nh.a = normalize(self.a)
839 897 nh.b = normalize(self.b)
840 898 nh.starta = self.starta
841 899 nh.startb = self.startb
842 900 nh.lena = self.lena
843 901 nh.lenb = self.lenb
844 902 return nh
845 903
846 904 def read_unified_hunk(self, lr):
847 905 m = unidesc.match(self.desc)
848 906 if not m:
849 907 raise PatchError(_("bad hunk #%d") % self.number)
850 908 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 909 if self.lena is None:
852 910 self.lena = 1
853 911 else:
854 912 self.lena = int(self.lena)
855 913 if self.lenb is None:
856 914 self.lenb = 1
857 915 else:
858 916 self.lenb = int(self.lenb)
859 917 self.starta = int(self.starta)
860 918 self.startb = int(self.startb)
861 919 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 920 self.b)
863 921 # if we hit eof before finishing out the hunk, the last line will
864 922 # be zero length. Lets try to fix it up.
865 923 while len(self.hunk[-1]) == 0:
866 924 del self.hunk[-1]
867 925 del self.a[-1]
868 926 del self.b[-1]
869 927 self.lena -= 1
870 928 self.lenb -= 1
871 929 self._fixnewline(lr)
872 930
873 931 def read_context_hunk(self, lr):
874 932 self.desc = lr.readline()
875 933 m = contextdesc.match(self.desc)
876 934 if not m:
877 935 raise PatchError(_("bad hunk #%d") % self.number)
878 936 self.starta, aend = m.groups()
879 937 self.starta = int(self.starta)
880 938 if aend is None:
881 939 aend = self.starta
882 940 self.lena = int(aend) - self.starta
883 941 if self.starta:
884 942 self.lena += 1
885 943 for x in xrange(self.lena):
886 944 l = lr.readline()
887 945 if l.startswith('---'):
888 946 # lines addition, old block is empty
889 947 lr.push(l)
890 948 break
891 949 s = l[2:]
892 950 if l.startswith('- ') or l.startswith('! '):
893 951 u = '-' + s
894 952 elif l.startswith(' '):
895 953 u = ' ' + s
896 954 else:
897 955 raise PatchError(_("bad hunk #%d old text line %d") %
898 956 (self.number, x))
899 957 self.a.append(u)
900 958 self.hunk.append(u)
901 959
902 960 l = lr.readline()
903 961 if l.startswith('\ '):
904 962 s = self.a[-1][:-1]
905 963 self.a[-1] = s
906 964 self.hunk[-1] = s
907 965 l = lr.readline()
908 966 m = contextdesc.match(l)
909 967 if not m:
910 968 raise PatchError(_("bad hunk #%d") % self.number)
911 969 self.startb, bend = m.groups()
912 970 self.startb = int(self.startb)
913 971 if bend is None:
914 972 bend = self.startb
915 973 self.lenb = int(bend) - self.startb
916 974 if self.startb:
917 975 self.lenb += 1
918 976 hunki = 1
919 977 for x in xrange(self.lenb):
920 978 l = lr.readline()
921 979 if l.startswith('\ '):
922 980 # XXX: the only way to hit this is with an invalid line range.
923 981 # The no-eol marker is not counted in the line range, but I
924 982 # guess there are diff(1) out there which behave differently.
925 983 s = self.b[-1][:-1]
926 984 self.b[-1] = s
927 985 self.hunk[hunki - 1] = s
928 986 continue
929 987 if not l:
930 988 # line deletions, new block is empty and we hit EOF
931 989 lr.push(l)
932 990 break
933 991 s = l[2:]
934 992 if l.startswith('+ ') or l.startswith('! '):
935 993 u = '+' + s
936 994 elif l.startswith(' '):
937 995 u = ' ' + s
938 996 elif len(self.b) == 0:
939 997 # line deletions, new block is empty
940 998 lr.push(l)
941 999 break
942 1000 else:
943 1001 raise PatchError(_("bad hunk #%d old text line %d") %
944 1002 (self.number, x))
945 1003 self.b.append(s)
946 1004 while True:
947 1005 if hunki >= len(self.hunk):
948 1006 h = ""
949 1007 else:
950 1008 h = self.hunk[hunki]
951 1009 hunki += 1
952 1010 if h == u:
953 1011 break
954 1012 elif h.startswith('-'):
955 1013 continue
956 1014 else:
957 1015 self.hunk.insert(hunki - 1, u)
958 1016 break
959 1017
960 1018 if not self.a:
961 1019 # this happens when lines were only added to the hunk
962 1020 for x in self.hunk:
963 1021 if x.startswith('-') or x.startswith(' '):
964 1022 self.a.append(x)
965 1023 if not self.b:
966 1024 # this happens when lines were only deleted from the hunk
967 1025 for x in self.hunk:
968 1026 if x.startswith('+') or x.startswith(' '):
969 1027 self.b.append(x[1:])
970 1028 # @@ -start,len +start,len @@
971 1029 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 1030 self.startb, self.lenb)
973 1031 self.hunk[0] = self.desc
974 1032 self._fixnewline(lr)
975 1033
976 1034 def _fixnewline(self, lr):
977 1035 l = lr.readline()
978 1036 if l.startswith('\ '):
979 1037 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 1038 else:
981 1039 lr.push(l)
982 1040
983 1041 def complete(self):
984 1042 return len(self.a) == self.lena and len(self.b) == self.lenb
985 1043
986 1044 def _fuzzit(self, old, new, fuzz, toponly):
987 1045 # this removes context lines from the top and bottom of list 'l'. It
988 1046 # checks the hunk to make sure only context lines are removed, and then
989 1047 # returns a new shortened list of lines.
990 1048 fuzz = min(fuzz, len(old))
991 1049 if fuzz:
992 1050 top = 0
993 1051 bot = 0
994 1052 hlen = len(self.hunk)
995 1053 for x in xrange(hlen - 1):
996 1054 # the hunk starts with the @@ line, so use x+1
997 1055 if self.hunk[x + 1][0] == ' ':
998 1056 top += 1
999 1057 else:
1000 1058 break
1001 1059 if not toponly:
1002 1060 for x in xrange(hlen - 1):
1003 1061 if self.hunk[hlen - bot - 1][0] == ' ':
1004 1062 bot += 1
1005 1063 else:
1006 1064 break
1007 1065
1008 1066 bot = min(fuzz, bot)
1009 1067 top = min(fuzz, top)
1010 1068 return old[top:len(old) - bot], new[top:len(new) - bot], top
1011 1069 return old, new, 0
1012 1070
1013 1071 def fuzzit(self, fuzz, toponly):
1014 1072 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 1073 oldstart = self.starta + top
1016 1074 newstart = self.startb + top
1017 1075 # zero length hunk ranges already have their start decremented
1018 1076 if self.lena and oldstart > 0:
1019 1077 oldstart -= 1
1020 1078 if self.lenb and newstart > 0:
1021 1079 newstart -= 1
1022 1080 return old, oldstart, new, newstart
1023 1081
1024 1082 class binhunk(object):
1025 1083 'A binary patch file.'
1026 1084 def __init__(self, lr, fname):
1027 1085 self.text = None
1028 1086 self.delta = False
1029 1087 self.hunk = ['GIT binary patch\n']
1030 1088 self._fname = fname
1031 1089 self._read(lr)
1032 1090
1033 1091 def complete(self):
1034 1092 return self.text is not None
1035 1093
1036 1094 def new(self, lines):
1037 1095 if self.delta:
1038 1096 return [applybindelta(self.text, ''.join(lines))]
1039 1097 return [self.text]
1040 1098
1041 1099 def _read(self, lr):
1042 1100 def getline(lr, hunk):
1043 1101 l = lr.readline()
1044 1102 hunk.append(l)
1045 1103 return l.rstrip('\r\n')
1046 1104
1047 1105 size = 0
1048 1106 while True:
1049 1107 line = getline(lr, self.hunk)
1050 1108 if not line:
1051 1109 raise PatchError(_('could not extract "%s" binary data')
1052 1110 % self._fname)
1053 1111 if line.startswith('literal '):
1054 1112 size = int(line[8:].rstrip())
1055 1113 break
1056 1114 if line.startswith('delta '):
1057 1115 size = int(line[6:].rstrip())
1058 1116 self.delta = True
1059 1117 break
1060 1118 dec = []
1061 1119 line = getline(lr, self.hunk)
1062 1120 while len(line) > 1:
1063 1121 l = line[0]
1064 1122 if l <= 'Z' and l >= 'A':
1065 1123 l = ord(l) - ord('A') + 1
1066 1124 else:
1067 1125 l = ord(l) - ord('a') + 27
1068 1126 try:
1069 1127 dec.append(base85.b85decode(line[1:])[:l])
1070 1128 except ValueError, e:
1071 1129 raise PatchError(_('could not decode "%s" binary patch: %s')
1072 1130 % (self._fname, str(e)))
1073 1131 line = getline(lr, self.hunk)
1074 1132 text = zlib.decompress(''.join(dec))
1075 1133 if len(text) != size:
1076 1134 raise PatchError(_('"%s" length is %d bytes, should be %d')
1077 1135 % (self._fname, len(text), size))
1078 1136 self.text = text
1079 1137
1080 1138 def parsefilename(str):
1081 1139 # --- filename \t|space stuff
1082 1140 s = str[4:].rstrip('\r\n')
1083 1141 i = s.find('\t')
1084 1142 if i < 0:
1085 1143 i = s.find(' ')
1086 1144 if i < 0:
1087 1145 return s
1088 1146 return s[:i]
1089 1147
1090 1148 def pathtransform(path, strip, prefix):
1091 1149 '''turn a path from a patch into a path suitable for the repository
1092 1150
1093 1151 prefix, if not empty, is expected to be normalized with a / at the end.
1094 1152
1095 1153 Returns (stripped components, path in repository).
1096 1154
1097 1155 >>> pathtransform('a/b/c', 0, '')
1098 1156 ('', 'a/b/c')
1099 1157 >>> pathtransform(' a/b/c ', 0, '')
1100 1158 ('', ' a/b/c')
1101 1159 >>> pathtransform(' a/b/c ', 2, '')
1102 1160 ('a/b/', 'c')
1103 1161 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1104 1162 ('a//b/', 'd/e/c')
1105 1163 >>> pathtransform('a/b/c', 3, '')
1106 1164 Traceback (most recent call last):
1107 1165 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1108 1166 '''
1109 1167 pathlen = len(path)
1110 1168 i = 0
1111 1169 if strip == 0:
1112 1170 return '', path.rstrip()
1113 1171 count = strip
1114 1172 while count > 0:
1115 1173 i = path.find('/', i)
1116 1174 if i == -1:
1117 1175 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1118 1176 (count, strip, path))
1119 1177 i += 1
1120 1178 # consume '//' in the path
1121 1179 while i < pathlen - 1 and path[i] == '/':
1122 1180 i += 1
1123 1181 count -= 1
1124 1182 return path[:i].lstrip(), prefix + path[i:].rstrip()
1125 1183
1126 1184 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1127 1185 nulla = afile_orig == "/dev/null"
1128 1186 nullb = bfile_orig == "/dev/null"
1129 1187 create = nulla and hunk.starta == 0 and hunk.lena == 0
1130 1188 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1131 1189 abase, afile = pathtransform(afile_orig, strip, prefix)
1132 1190 gooda = not nulla and backend.exists(afile)
1133 1191 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1134 1192 if afile == bfile:
1135 1193 goodb = gooda
1136 1194 else:
1137 1195 goodb = not nullb and backend.exists(bfile)
1138 1196 missing = not goodb and not gooda and not create
1139 1197
1140 1198 # some diff programs apparently produce patches where the afile is
1141 1199 # not /dev/null, but afile starts with bfile
1142 1200 abasedir = afile[:afile.rfind('/') + 1]
1143 1201 bbasedir = bfile[:bfile.rfind('/') + 1]
1144 1202 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1145 1203 and hunk.starta == 0 and hunk.lena == 0):
1146 1204 create = True
1147 1205 missing = False
1148 1206
1149 1207 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1150 1208 # diff is between a file and its backup. In this case, the original
1151 1209 # file should be patched (see original mpatch code).
1152 1210 isbackup = (abase == bbase and bfile.startswith(afile))
1153 1211 fname = None
1154 1212 if not missing:
1155 1213 if gooda and goodb:
1156 1214 fname = isbackup and afile or bfile
1157 1215 elif gooda:
1158 1216 fname = afile
1159 1217
1160 1218 if not fname:
1161 1219 if not nullb:
1162 1220 fname = isbackup and afile or bfile
1163 1221 elif not nulla:
1164 1222 fname = afile
1165 1223 else:
1166 1224 raise PatchError(_("undefined source and destination files"))
1167 1225
1168 1226 gp = patchmeta(fname)
1169 1227 if create:
1170 1228 gp.op = 'ADD'
1171 1229 elif remove:
1172 1230 gp.op = 'DELETE'
1173 1231 return gp
1174 1232
1175 1233 def scangitpatch(lr, firstline):
1176 1234 """
1177 1235 Git patches can emit:
1178 1236 - rename a to b
1179 1237 - change b
1180 1238 - copy a to c
1181 1239 - change c
1182 1240
1183 1241 We cannot apply this sequence as-is, the renamed 'a' could not be
1184 1242 found for it would have been renamed already. And we cannot copy
1185 1243 from 'b' instead because 'b' would have been changed already. So
1186 1244 we scan the git patch for copy and rename commands so we can
1187 1245 perform the copies ahead of time.
1188 1246 """
1189 1247 pos = 0
1190 1248 try:
1191 1249 pos = lr.fp.tell()
1192 1250 fp = lr.fp
1193 1251 except IOError:
1194 1252 fp = cStringIO.StringIO(lr.fp.read())
1195 1253 gitlr = linereader(fp)
1196 1254 gitlr.push(firstline)
1197 1255 gitpatches = readgitpatch(gitlr)
1198 1256 fp.seek(pos)
1199 1257 return gitpatches
1200 1258
1201 1259 def iterhunks(fp):
1202 1260 """Read a patch and yield the following events:
1203 1261 - ("file", afile, bfile, firsthunk): select a new target file.
1204 1262 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1205 1263 "file" event.
1206 1264 - ("git", gitchanges): current diff is in git format, gitchanges
1207 1265 maps filenames to gitpatch records. Unique event.
1208 1266 """
1209 1267 afile = ""
1210 1268 bfile = ""
1211 1269 state = None
1212 1270 hunknum = 0
1213 1271 emitfile = newfile = False
1214 1272 gitpatches = None
1215 1273
1216 1274 # our states
1217 1275 BFILE = 1
1218 1276 context = None
1219 1277 lr = linereader(fp)
1220 1278
1221 1279 while True:
1222 1280 x = lr.readline()
1223 1281 if not x:
1224 1282 break
1225 1283 if state == BFILE and (
1226 1284 (not context and x[0] == '@')
1227 1285 or (context is not False and x.startswith('***************'))
1228 1286 or x.startswith('GIT binary patch')):
1229 1287 gp = None
1230 1288 if (gitpatches and
1231 1289 gitpatches[-1].ispatching(afile, bfile)):
1232 1290 gp = gitpatches.pop()
1233 1291 if x.startswith('GIT binary patch'):
1234 1292 h = binhunk(lr, gp.path)
1235 1293 else:
1236 1294 if context is None and x.startswith('***************'):
1237 1295 context = True
1238 1296 h = hunk(x, hunknum + 1, lr, context)
1239 1297 hunknum += 1
1240 1298 if emitfile:
1241 1299 emitfile = False
1242 1300 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1243 1301 yield 'hunk', h
1244 1302 elif x.startswith('diff --git a/'):
1245 1303 m = gitre.match(x.rstrip(' \r\n'))
1246 1304 if not m:
1247 1305 continue
1248 1306 if gitpatches is None:
1249 1307 # scan whole input for git metadata
1250 1308 gitpatches = scangitpatch(lr, x)
1251 1309 yield 'git', [g.copy() for g in gitpatches
1252 1310 if g.op in ('COPY', 'RENAME')]
1253 1311 gitpatches.reverse()
1254 1312 afile = 'a/' + m.group(1)
1255 1313 bfile = 'b/' + m.group(2)
1256 1314 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1257 1315 gp = gitpatches.pop()
1258 1316 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1259 1317 if not gitpatches:
1260 1318 raise PatchError(_('failed to synchronize metadata for "%s"')
1261 1319 % afile[2:])
1262 1320 gp = gitpatches[-1]
1263 1321 newfile = True
1264 1322 elif x.startswith('---'):
1265 1323 # check for a unified diff
1266 1324 l2 = lr.readline()
1267 1325 if not l2.startswith('+++'):
1268 1326 lr.push(l2)
1269 1327 continue
1270 1328 newfile = True
1271 1329 context = False
1272 1330 afile = parsefilename(x)
1273 1331 bfile = parsefilename(l2)
1274 1332 elif x.startswith('***'):
1275 1333 # check for a context diff
1276 1334 l2 = lr.readline()
1277 1335 if not l2.startswith('---'):
1278 1336 lr.push(l2)
1279 1337 continue
1280 1338 l3 = lr.readline()
1281 1339 lr.push(l3)
1282 1340 if not l3.startswith("***************"):
1283 1341 lr.push(l2)
1284 1342 continue
1285 1343 newfile = True
1286 1344 context = True
1287 1345 afile = parsefilename(x)
1288 1346 bfile = parsefilename(l2)
1289 1347
1290 1348 if newfile:
1291 1349 newfile = False
1292 1350 emitfile = True
1293 1351 state = BFILE
1294 1352 hunknum = 0
1295 1353
1296 1354 while gitpatches:
1297 1355 gp = gitpatches.pop()
1298 1356 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1299 1357
1300 1358 def applybindelta(binchunk, data):
1301 1359 """Apply a binary delta hunk
1302 1360 The algorithm used is the algorithm from git's patch-delta.c
1303 1361 """
1304 1362 def deltahead(binchunk):
1305 1363 i = 0
1306 1364 for c in binchunk:
1307 1365 i += 1
1308 1366 if not (ord(c) & 0x80):
1309 1367 return i
1310 1368 return i
1311 1369 out = ""
1312 1370 s = deltahead(binchunk)
1313 1371 binchunk = binchunk[s:]
1314 1372 s = deltahead(binchunk)
1315 1373 binchunk = binchunk[s:]
1316 1374 i = 0
1317 1375 while i < len(binchunk):
1318 1376 cmd = ord(binchunk[i])
1319 1377 i += 1
1320 1378 if (cmd & 0x80):
1321 1379 offset = 0
1322 1380 size = 0
1323 1381 if (cmd & 0x01):
1324 1382 offset = ord(binchunk[i])
1325 1383 i += 1
1326 1384 if (cmd & 0x02):
1327 1385 offset |= ord(binchunk[i]) << 8
1328 1386 i += 1
1329 1387 if (cmd & 0x04):
1330 1388 offset |= ord(binchunk[i]) << 16
1331 1389 i += 1
1332 1390 if (cmd & 0x08):
1333 1391 offset |= ord(binchunk[i]) << 24
1334 1392 i += 1
1335 1393 if (cmd & 0x10):
1336 1394 size = ord(binchunk[i])
1337 1395 i += 1
1338 1396 if (cmd & 0x20):
1339 1397 size |= ord(binchunk[i]) << 8
1340 1398 i += 1
1341 1399 if (cmd & 0x40):
1342 1400 size |= ord(binchunk[i]) << 16
1343 1401 i += 1
1344 1402 if size == 0:
1345 1403 size = 0x10000
1346 1404 offset_end = offset + size
1347 1405 out += data[offset:offset_end]
1348 1406 elif cmd != 0:
1349 1407 offset_end = i + cmd
1350 1408 out += binchunk[i:offset_end]
1351 1409 i += cmd
1352 1410 else:
1353 1411 raise PatchError(_('unexpected delta opcode 0'))
1354 1412 return out
1355 1413
1356 1414 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1357 1415 """Reads a patch from fp and tries to apply it.
1358 1416
1359 1417 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1360 1418 there was any fuzz.
1361 1419
1362 1420 If 'eolmode' is 'strict', the patch content and patched file are
1363 1421 read in binary mode. Otherwise, line endings are ignored when
1364 1422 patching then normalized according to 'eolmode'.
1365 1423 """
1366 1424 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1367 1425 prefix=prefix, eolmode=eolmode)
1368 1426
1369 1427 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1370 1428 eolmode='strict'):
1371 1429
1372 1430 if prefix:
1373 1431 # clean up double slashes, lack of trailing slashes, etc
1374 1432 prefix = util.normpath(prefix) + '/'
1375 1433 def pstrip(p):
1376 1434 return pathtransform(p, strip - 1, prefix)[1]
1377 1435
1378 1436 rejects = 0
1379 1437 err = 0
1380 1438 current_file = None
1381 1439
1382 1440 for state, values in iterhunks(fp):
1383 1441 if state == 'hunk':
1384 1442 if not current_file:
1385 1443 continue
1386 1444 ret = current_file.apply(values)
1387 1445 if ret > 0:
1388 1446 err = 1
1389 1447 elif state == 'file':
1390 1448 if current_file:
1391 1449 rejects += current_file.close()
1392 1450 current_file = None
1393 1451 afile, bfile, first_hunk, gp = values
1394 1452 if gp:
1395 1453 gp.path = pstrip(gp.path)
1396 1454 if gp.oldpath:
1397 1455 gp.oldpath = pstrip(gp.oldpath)
1398 1456 else:
1399 1457 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1400 1458 prefix)
1401 1459 if gp.op == 'RENAME':
1402 1460 backend.unlink(gp.oldpath)
1403 1461 if not first_hunk:
1404 1462 if gp.op == 'DELETE':
1405 1463 backend.unlink(gp.path)
1406 1464 continue
1407 1465 data, mode = None, None
1408 1466 if gp.op in ('RENAME', 'COPY'):
1409 1467 data, mode = store.getfile(gp.oldpath)[:2]
1410 1468 # FIXME: failing getfile has never been handled here
1411 1469 assert data is not None
1412 1470 if gp.mode:
1413 1471 mode = gp.mode
1414 1472 if gp.op == 'ADD':
1415 1473 # Added files without content have no hunk and
1416 1474 # must be created
1417 1475 data = ''
1418 1476 if data or mode:
1419 1477 if (gp.op in ('ADD', 'RENAME', 'COPY')
1420 1478 and backend.exists(gp.path)):
1421 1479 raise PatchError(_("cannot create %s: destination "
1422 1480 "already exists") % gp.path)
1423 1481 backend.setfile(gp.path, data, mode, gp.oldpath)
1424 1482 continue
1425 1483 try:
1426 1484 current_file = patcher(ui, gp, backend, store,
1427 1485 eolmode=eolmode)
1428 1486 except PatchError, inst:
1429 1487 ui.warn(str(inst) + '\n')
1430 1488 current_file = None
1431 1489 rejects += 1
1432 1490 continue
1433 1491 elif state == 'git':
1434 1492 for gp in values:
1435 1493 path = pstrip(gp.oldpath)
1436 1494 data, mode = backend.getfile(path)
1437 1495 if data is None:
1438 1496 # The error ignored here will trigger a getfile()
1439 1497 # error in a place more appropriate for error
1440 1498 # handling, and will not interrupt the patching
1441 1499 # process.
1442 1500 pass
1443 1501 else:
1444 1502 store.setfile(path, data, mode)
1445 1503 else:
1446 1504 raise util.Abort(_('unsupported parser state: %s') % state)
1447 1505
1448 1506 if current_file:
1449 1507 rejects += current_file.close()
1450 1508
1451 1509 if rejects:
1452 1510 return -1
1453 1511 return err
1454 1512
1455 1513 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1456 1514 similarity):
1457 1515 """use <patcher> to apply <patchname> to the working directory.
1458 1516 returns whether patch was applied with fuzz factor."""
1459 1517
1460 1518 fuzz = False
1461 1519 args = []
1462 1520 cwd = repo.root
1463 1521 if cwd:
1464 1522 args.append('-d %s' % util.shellquote(cwd))
1465 1523 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1466 1524 util.shellquote(patchname)))
1467 1525 try:
1468 1526 for line in fp:
1469 1527 line = line.rstrip()
1470 1528 ui.note(line + '\n')
1471 1529 if line.startswith('patching file '):
1472 1530 pf = util.parsepatchoutput(line)
1473 1531 printed_file = False
1474 1532 files.add(pf)
1475 1533 elif line.find('with fuzz') >= 0:
1476 1534 fuzz = True
1477 1535 if not printed_file:
1478 1536 ui.warn(pf + '\n')
1479 1537 printed_file = True
1480 1538 ui.warn(line + '\n')
1481 1539 elif line.find('saving rejects to file') >= 0:
1482 1540 ui.warn(line + '\n')
1483 1541 elif line.find('FAILED') >= 0:
1484 1542 if not printed_file:
1485 1543 ui.warn(pf + '\n')
1486 1544 printed_file = True
1487 1545 ui.warn(line + '\n')
1488 1546 finally:
1489 1547 if files:
1490 1548 scmutil.marktouched(repo, files, similarity)
1491 1549 code = fp.close()
1492 1550 if code:
1493 1551 raise PatchError(_("patch command failed: %s") %
1494 1552 util.explainexit(code)[0])
1495 1553 return fuzz
1496 1554
1497 1555 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1498 1556 eolmode='strict'):
1499 1557 if files is None:
1500 1558 files = set()
1501 1559 if eolmode is None:
1502 1560 eolmode = ui.config('patch', 'eol', 'strict')
1503 1561 if eolmode.lower() not in eolmodes:
1504 1562 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1505 1563 eolmode = eolmode.lower()
1506 1564
1507 1565 store = filestore()
1508 1566 try:
1509 1567 fp = open(patchobj, 'rb')
1510 1568 except TypeError:
1511 1569 fp = patchobj
1512 1570 try:
1513 1571 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1514 1572 eolmode=eolmode)
1515 1573 finally:
1516 1574 if fp != patchobj:
1517 1575 fp.close()
1518 1576 files.update(backend.close())
1519 1577 store.close()
1520 1578 if ret < 0:
1521 1579 raise PatchError(_('patch failed to apply'))
1522 1580 return ret > 0
1523 1581
1524 1582 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1525 1583 eolmode='strict', similarity=0):
1526 1584 """use builtin patch to apply <patchobj> to the working directory.
1527 1585 returns whether patch was applied with fuzz factor."""
1528 1586 backend = workingbackend(ui, repo, similarity)
1529 1587 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1530 1588
1531 1589 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1532 1590 eolmode='strict'):
1533 1591 backend = repobackend(ui, repo, ctx, store)
1534 1592 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1535 1593
1536 1594 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1537 1595 similarity=0):
1538 1596 """Apply <patchname> to the working directory.
1539 1597
1540 1598 'eolmode' specifies how end of lines should be handled. It can be:
1541 1599 - 'strict': inputs are read in binary mode, EOLs are preserved
1542 1600 - 'crlf': EOLs are ignored when patching and reset to CRLF
1543 1601 - 'lf': EOLs are ignored when patching and reset to LF
1544 1602 - None: get it from user settings, default to 'strict'
1545 1603 'eolmode' is ignored when using an external patcher program.
1546 1604
1547 1605 Returns whether patch was applied with fuzz factor.
1548 1606 """
1549 1607 patcher = ui.config('ui', 'patch')
1550 1608 if files is None:
1551 1609 files = set()
1552 1610 if patcher:
1553 1611 return _externalpatch(ui, repo, patcher, patchname, strip,
1554 1612 files, similarity)
1555 1613 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1556 1614 similarity)
1557 1615
1558 1616 def changedfiles(ui, repo, patchpath, strip=1):
1559 1617 backend = fsbackend(ui, repo.root)
1560 1618 fp = open(patchpath, 'rb')
1561 1619 try:
1562 1620 changed = set()
1563 1621 for state, values in iterhunks(fp):
1564 1622 if state == 'file':
1565 1623 afile, bfile, first_hunk, gp = values
1566 1624 if gp:
1567 1625 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1568 1626 if gp.oldpath:
1569 1627 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1570 1628 else:
1571 1629 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1572 1630 '')
1573 1631 changed.add(gp.path)
1574 1632 if gp.op == 'RENAME':
1575 1633 changed.add(gp.oldpath)
1576 1634 elif state not in ('hunk', 'git'):
1577 1635 raise util.Abort(_('unsupported parser state: %s') % state)
1578 1636 return changed
1579 1637 finally:
1580 1638 fp.close()
1581 1639
1582 1640 class GitDiffRequired(Exception):
1583 1641 pass
1584 1642
1585 1643 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1586 1644 '''return diffopts with all features supported and parsed'''
1587 1645 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1588 1646 git=True, whitespace=True, formatchanging=True)
1589 1647
1590 1648 diffopts = diffallopts
1591 1649
1592 1650 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1593 1651 whitespace=False, formatchanging=False):
1594 1652 '''return diffopts with only opted-in features parsed
1595 1653
1596 1654 Features:
1597 1655 - git: git-style diffs
1598 1656 - whitespace: whitespace options like ignoreblanklines and ignorews
1599 1657 - formatchanging: options that will likely break or cause correctness issues
1600 1658 with most diff parsers
1601 1659 '''
1602 1660 def get(key, name=None, getter=ui.configbool, forceplain=None):
1603 1661 if opts:
1604 1662 v = opts.get(key)
1605 1663 if v:
1606 1664 return v
1607 1665 if forceplain is not None and ui.plain():
1608 1666 return forceplain
1609 1667 return getter(section, name or key, None, untrusted=untrusted)
1610 1668
1611 1669 # core options, expected to be understood by every diff parser
1612 1670 buildopts = {
1613 1671 'nodates': get('nodates'),
1614 1672 'showfunc': get('show_function', 'showfunc'),
1615 1673 'context': get('unified', getter=ui.config),
1616 1674 }
1617 1675
1618 1676 if git:
1619 1677 buildopts['git'] = get('git')
1620 1678 if whitespace:
1621 1679 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1622 1680 buildopts['ignorewsamount'] = get('ignore_space_change',
1623 1681 'ignorewsamount')
1624 1682 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1625 1683 'ignoreblanklines')
1626 1684 if formatchanging:
1627 1685 buildopts['text'] = opts and opts.get('text')
1628 1686 buildopts['nobinary'] = get('nobinary')
1629 1687 buildopts['noprefix'] = get('noprefix', forceplain=False)
1630 1688
1631 1689 return mdiff.diffopts(**buildopts)
1632 1690
1633 1691 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1634 1692 losedatafn=None, prefix=''):
1635 1693 '''yields diff of changes to files between two nodes, or node and
1636 1694 working directory.
1637 1695
1638 1696 if node1 is None, use first dirstate parent instead.
1639 1697 if node2 is None, compare node1 with working directory.
1640 1698
1641 1699 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1642 1700 every time some change cannot be represented with the current
1643 1701 patch format. Return False to upgrade to git patch format, True to
1644 1702 accept the loss or raise an exception to abort the diff. It is
1645 1703 called with the name of current file being diffed as 'fn'. If set
1646 1704 to None, patches will always be upgraded to git format when
1647 1705 necessary.
1648 1706
1649 1707 prefix is a filename prefix that is prepended to all filenames on
1650 1708 display (used for subrepos).
1651 1709 '''
1652 1710
1653 1711 if opts is None:
1654 1712 opts = mdiff.defaultopts
1655 1713
1656 1714 if not node1 and not node2:
1657 1715 node1 = repo.dirstate.p1()
1658 1716
1659 1717 def lrugetfilectx():
1660 1718 cache = {}
1661 1719 order = util.deque()
1662 1720 def getfilectx(f, ctx):
1663 1721 fctx = ctx.filectx(f, filelog=cache.get(f))
1664 1722 if f not in cache:
1665 1723 if len(cache) > 20:
1666 1724 del cache[order.popleft()]
1667 1725 cache[f] = fctx.filelog()
1668 1726 else:
1669 1727 order.remove(f)
1670 1728 order.append(f)
1671 1729 return fctx
1672 1730 return getfilectx
1673 1731 getfilectx = lrugetfilectx()
1674 1732
1675 1733 ctx1 = repo[node1]
1676 1734 ctx2 = repo[node2]
1677 1735
1678 1736 if not changes:
1679 1737 changes = repo.status(ctx1, ctx2, match=match)
1680 1738 modified, added, removed = changes[:3]
1681 1739
1682 1740 if not modified and not added and not removed:
1683 1741 return []
1684 1742
1685 1743 hexfunc = repo.ui.debugflag and hex or short
1686 1744 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1687 1745
1688 1746 copy = {}
1689 1747 if opts.git or opts.upgrade:
1690 1748 copy = copies.pathcopies(ctx1, ctx2)
1691 1749
1692 1750 def difffn(opts, losedata):
1693 1751 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1694 1752 copy, getfilectx, opts, losedata, prefix)
1695 1753 if opts.upgrade and not opts.git:
1696 1754 try:
1697 1755 def losedata(fn):
1698 1756 if not losedatafn or not losedatafn(fn=fn):
1699 1757 raise GitDiffRequired
1700 1758 # Buffer the whole output until we are sure it can be generated
1701 1759 return list(difffn(opts.copy(git=False), losedata))
1702 1760 except GitDiffRequired:
1703 1761 return difffn(opts.copy(git=True), None)
1704 1762 else:
1705 1763 return difffn(opts, None)
1706 1764
1707 1765 def difflabel(func, *args, **kw):
1708 1766 '''yields 2-tuples of (output, label) based on the output of func()'''
1709 1767 headprefixes = [('diff', 'diff.diffline'),
1710 1768 ('copy', 'diff.extended'),
1711 1769 ('rename', 'diff.extended'),
1712 1770 ('old', 'diff.extended'),
1713 1771 ('new', 'diff.extended'),
1714 1772 ('deleted', 'diff.extended'),
1715 1773 ('---', 'diff.file_a'),
1716 1774 ('+++', 'diff.file_b')]
1717 1775 textprefixes = [('@', 'diff.hunk'),
1718 1776 ('-', 'diff.deleted'),
1719 1777 ('+', 'diff.inserted')]
1720 1778 head = False
1721 1779 for chunk in func(*args, **kw):
1722 1780 lines = chunk.split('\n')
1723 1781 for i, line in enumerate(lines):
1724 1782 if i != 0:
1725 1783 yield ('\n', '')
1726 1784 if head:
1727 1785 if line.startswith('@'):
1728 1786 head = False
1729 1787 else:
1730 1788 if line and line[0] not in ' +-@\\':
1731 1789 head = True
1732 1790 stripline = line
1733 1791 diffline = False
1734 1792 if not head and line and line[0] in '+-':
1735 1793 # highlight tabs and trailing whitespace, but only in
1736 1794 # changed lines
1737 1795 stripline = line.rstrip()
1738 1796 diffline = True
1739 1797
1740 1798 prefixes = textprefixes
1741 1799 if head:
1742 1800 prefixes = headprefixes
1743 1801 for prefix, label in prefixes:
1744 1802 if stripline.startswith(prefix):
1745 1803 if diffline:
1746 1804 for token in tabsplitter.findall(stripline):
1747 1805 if '\t' == token[0]:
1748 1806 yield (token, 'diff.tab')
1749 1807 else:
1750 1808 yield (token, label)
1751 1809 else:
1752 1810 yield (stripline, label)
1753 1811 break
1754 1812 else:
1755 1813 yield (line, '')
1756 1814 if line != stripline:
1757 1815 yield (line[len(stripline):], 'diff.trailingwhitespace')
1758 1816
1759 1817 def diffui(*args, **kw):
1760 1818 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1761 1819 return difflabel(diff, *args, **kw)
1762 1820
1763 1821 def _filepairs(ctx1, modified, added, removed, copy, opts):
1764 1822 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1765 1823 before and f2 is the the name after. For added files, f1 will be None,
1766 1824 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1767 1825 or 'rename' (the latter two only if opts.git is set).'''
1768 1826 gone = set()
1769 1827
1770 1828 copyto = dict([(v, k) for k, v in copy.items()])
1771 1829
1772 1830 addedset, removedset = set(added), set(removed)
1773 1831 # Fix up added, since merged-in additions appear as
1774 1832 # modifications during merges
1775 1833 for f in modified:
1776 1834 if f not in ctx1:
1777 1835 addedset.add(f)
1778 1836
1779 1837 for f in sorted(modified + added + removed):
1780 1838 copyop = None
1781 1839 f1, f2 = f, f
1782 1840 if f in addedset:
1783 1841 f1 = None
1784 1842 if f in copy:
1785 1843 if opts.git:
1786 1844 f1 = copy[f]
1787 1845 if f1 in removedset and f1 not in gone:
1788 1846 copyop = 'rename'
1789 1847 gone.add(f1)
1790 1848 else:
1791 1849 copyop = 'copy'
1792 1850 elif f in removedset:
1793 1851 f2 = None
1794 1852 if opts.git:
1795 1853 # have we already reported a copy above?
1796 1854 if (f in copyto and copyto[f] in addedset
1797 1855 and copy[copyto[f]] == f):
1798 1856 continue
1799 1857 yield f1, f2, copyop
1800 1858
1801 1859 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1802 1860 copy, getfilectx, opts, losedatafn, prefix):
1803 1861
1804 1862 def gitindex(text):
1805 1863 if not text:
1806 1864 text = ""
1807 1865 l = len(text)
1808 1866 s = util.sha1('blob %d\0' % l)
1809 1867 s.update(text)
1810 1868 return s.hexdigest()
1811 1869
1812 1870 if opts.noprefix:
1813 1871 aprefix = bprefix = ''
1814 1872 else:
1815 1873 aprefix = 'a/'
1816 1874 bprefix = 'b/'
1817 1875
1818 1876 def diffline(f, revs):
1819 1877 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1820 1878 return 'diff %s %s' % (revinfo, f)
1821 1879
1822 1880 date1 = util.datestr(ctx1.date())
1823 1881 date2 = util.datestr(ctx2.date())
1824 1882
1825 1883 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1826 1884
1827 1885 for f1, f2, copyop in _filepairs(
1828 1886 ctx1, modified, added, removed, copy, opts):
1829 1887 content1 = None
1830 1888 content2 = None
1831 1889 flag1 = None
1832 1890 flag2 = None
1833 1891 if f1:
1834 1892 content1 = getfilectx(f1, ctx1).data()
1835 1893 if opts.git or losedatafn:
1836 1894 flag1 = ctx1.flags(f1)
1837 1895 if f2:
1838 1896 content2 = getfilectx(f2, ctx2).data()
1839 1897 if opts.git or losedatafn:
1840 1898 flag2 = ctx2.flags(f2)
1841 1899 binary = False
1842 1900 if opts.git or losedatafn:
1843 1901 binary = util.binary(content1) or util.binary(content2)
1844 1902
1845 1903 if losedatafn and not opts.git:
1846 1904 if (binary or
1847 1905 # copy/rename
1848 1906 f2 in copy or
1849 1907 # empty file creation
1850 1908 (not f1 and not content2) or
1851 1909 # empty file deletion
1852 1910 (not content1 and not f2) or
1853 1911 # create with flags
1854 1912 (not f1 and flag2) or
1855 1913 # change flags
1856 1914 (f1 and f2 and flag1 != flag2)):
1857 1915 losedatafn(f2 or f1)
1858 1916
1859 1917 path1 = posixpath.join(prefix, f1 or f2)
1860 1918 path2 = posixpath.join(prefix, f2 or f1)
1861 1919 header = []
1862 1920 if opts.git:
1863 1921 header.append('diff --git %s%s %s%s' %
1864 1922 (aprefix, path1, bprefix, path2))
1865 1923 if not f1: # added
1866 1924 header.append('new file mode %s' % gitmode[flag2])
1867 1925 elif not f2: # removed
1868 1926 header.append('deleted file mode %s' % gitmode[flag1])
1869 1927 else: # modified/copied/renamed
1870 1928 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1871 1929 if mode1 != mode2:
1872 1930 header.append('old mode %s' % mode1)
1873 1931 header.append('new mode %s' % mode2)
1874 1932 if copyop is not None:
1875 1933 header.append('%s from %s' % (copyop, path1))
1876 1934 header.append('%s to %s' % (copyop, path2))
1877 1935 elif revs and not repo.ui.quiet:
1878 1936 header.append(diffline(path1, revs))
1879 1937
1880 1938 if binary and opts.git and not opts.nobinary:
1881 1939 text = mdiff.b85diff(content1, content2)
1882 1940 if text:
1883 1941 header.append('index %s..%s' %
1884 1942 (gitindex(content1), gitindex(content2)))
1885 1943 else:
1886 1944 text = mdiff.unidiff(content1, date1,
1887 1945 content2, date2,
1888 1946 path1, path2, opts=opts)
1889 1947 if header and (text or len(header) > 1):
1890 1948 yield '\n'.join(header) + '\n'
1891 1949 if text:
1892 1950 yield text
1893 1951
1894 1952 def diffstatsum(stats):
1895 1953 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1896 1954 for f, a, r, b in stats:
1897 1955 maxfile = max(maxfile, encoding.colwidth(f))
1898 1956 maxtotal = max(maxtotal, a + r)
1899 1957 addtotal += a
1900 1958 removetotal += r
1901 1959 binary = binary or b
1902 1960
1903 1961 return maxfile, maxtotal, addtotal, removetotal, binary
1904 1962
1905 1963 def diffstatdata(lines):
1906 1964 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1907 1965
1908 1966 results = []
1909 1967 filename, adds, removes, isbinary = None, 0, 0, False
1910 1968
1911 1969 def addresult():
1912 1970 if filename:
1913 1971 results.append((filename, adds, removes, isbinary))
1914 1972
1915 1973 for line in lines:
1916 1974 if line.startswith('diff'):
1917 1975 addresult()
1918 1976 # set numbers to 0 anyway when starting new file
1919 1977 adds, removes, isbinary = 0, 0, False
1920 1978 if line.startswith('diff --git a/'):
1921 1979 filename = gitre.search(line).group(2)
1922 1980 elif line.startswith('diff -r'):
1923 1981 # format: "diff -r ... -r ... filename"
1924 1982 filename = diffre.search(line).group(1)
1925 1983 elif line.startswith('+') and not line.startswith('+++ '):
1926 1984 adds += 1
1927 1985 elif line.startswith('-') and not line.startswith('--- '):
1928 1986 removes += 1
1929 1987 elif (line.startswith('GIT binary patch') or
1930 1988 line.startswith('Binary file')):
1931 1989 isbinary = True
1932 1990 addresult()
1933 1991 return results
1934 1992
1935 1993 def diffstat(lines, width=80, git=False):
1936 1994 output = []
1937 1995 stats = diffstatdata(lines)
1938 1996 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1939 1997
1940 1998 countwidth = len(str(maxtotal))
1941 1999 if hasbinary and countwidth < 3:
1942 2000 countwidth = 3
1943 2001 graphwidth = width - countwidth - maxname - 6
1944 2002 if graphwidth < 10:
1945 2003 graphwidth = 10
1946 2004
1947 2005 def scale(i):
1948 2006 if maxtotal <= graphwidth:
1949 2007 return i
1950 2008 # If diffstat runs out of room it doesn't print anything,
1951 2009 # which isn't very useful, so always print at least one + or -
1952 2010 # if there were at least some changes.
1953 2011 return max(i * graphwidth // maxtotal, int(bool(i)))
1954 2012
1955 2013 for filename, adds, removes, isbinary in stats:
1956 2014 if isbinary:
1957 2015 count = 'Bin'
1958 2016 else:
1959 2017 count = adds + removes
1960 2018 pluses = '+' * scale(adds)
1961 2019 minuses = '-' * scale(removes)
1962 2020 output.append(' %s%s | %*s %s%s\n' %
1963 2021 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1964 2022 countwidth, count, pluses, minuses))
1965 2023
1966 2024 if stats:
1967 2025 output.append(_(' %d files changed, %d insertions(+), '
1968 2026 '%d deletions(-)\n')
1969 2027 % (len(stats), totaladds, totalremoves))
1970 2028
1971 2029 return ''.join(output)
1972 2030
1973 2031 def diffstatui(*args, **kw):
1974 2032 '''like diffstat(), but yields 2-tuples of (output, label) for
1975 2033 ui.write()
1976 2034 '''
1977 2035
1978 2036 for line in diffstat(*args, **kw).splitlines():
1979 2037 if line and line[-1] in '+-':
1980 2038 name, graph = line.rsplit(' ', 1)
1981 2039 yield (name + ' ', '')
1982 2040 m = re.search(r'\++', graph)
1983 2041 if m:
1984 2042 yield (m.group(0), 'diffstat.inserted')
1985 2043 m = re.search(r'-+', graph)
1986 2044 if m:
1987 2045 yield (m.group(0), 'diffstat.deleted')
1988 2046 else:
1989 2047 yield (line, '')
1990 2048 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now