##// END OF EJS Templates
diff: don't crash when merged-in addition is copied...
Martin von Zweigbergk -
r27902:51b6ce25 default
parent child Browse files
Show More
@@ -1,2587 +1,2592 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import cStringIO
12 12 import collections
13 13 import copy
14 14 import email
15 15 import errno
16 16 import os
17 17 import posixpath
18 18 import re
19 19 import shutil
20 20 import tempfile
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 base85,
30 30 copies,
31 31 diffhelpers,
32 32 encoding,
33 33 error,
34 34 mdiff,
35 35 pathutil,
36 36 scmutil,
37 37 util,
38 38 )
39 39
40 40 gitre = re.compile('diff --git a/(.*) b/(.*)')
41 41 tabsplitter = re.compile(r'(\t+|[^\t]+)')
42 42
43 43 class PatchError(Exception):
44 44 pass
45 45
46 46
47 47 # public functions
48 48
49 49 def split(stream):
50 50 '''return an iterator of individual patches from a stream'''
51 51 def isheader(line, inheader):
52 52 if inheader and line[0] in (' ', '\t'):
53 53 # continuation
54 54 return True
55 55 if line[0] in (' ', '-', '+'):
56 56 # diff line - don't check for header pattern in there
57 57 return False
58 58 l = line.split(': ', 1)
59 59 return len(l) == 2 and ' ' not in l[0]
60 60
61 61 def chunk(lines):
62 62 return cStringIO.StringIO(''.join(lines))
63 63
64 64 def hgsplit(stream, cur):
65 65 inheader = True
66 66
67 67 for line in stream:
68 68 if not line.strip():
69 69 inheader = False
70 70 if not inheader and line.startswith('# HG changeset patch'):
71 71 yield chunk(cur)
72 72 cur = []
73 73 inheader = True
74 74
75 75 cur.append(line)
76 76
77 77 if cur:
78 78 yield chunk(cur)
79 79
80 80 def mboxsplit(stream, cur):
81 81 for line in stream:
82 82 if line.startswith('From '):
83 83 for c in split(chunk(cur[1:])):
84 84 yield c
85 85 cur = []
86 86
87 87 cur.append(line)
88 88
89 89 if cur:
90 90 for c in split(chunk(cur[1:])):
91 91 yield c
92 92
93 93 def mimesplit(stream, cur):
94 94 def msgfp(m):
95 95 fp = cStringIO.StringIO()
96 96 g = email.Generator.Generator(fp, mangle_from_=False)
97 97 g.flatten(m)
98 98 fp.seek(0)
99 99 return fp
100 100
101 101 for line in stream:
102 102 cur.append(line)
103 103 c = chunk(cur)
104 104
105 105 m = email.Parser.Parser().parse(c)
106 106 if not m.is_multipart():
107 107 yield msgfp(m)
108 108 else:
109 109 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
110 110 for part in m.walk():
111 111 ct = part.get_content_type()
112 112 if ct not in ok_types:
113 113 continue
114 114 yield msgfp(part)
115 115
116 116 def headersplit(stream, cur):
117 117 inheader = False
118 118
119 119 for line in stream:
120 120 if not inheader and isheader(line, inheader):
121 121 yield chunk(cur)
122 122 cur = []
123 123 inheader = True
124 124 if inheader and not isheader(line, inheader):
125 125 inheader = False
126 126
127 127 cur.append(line)
128 128
129 129 if cur:
130 130 yield chunk(cur)
131 131
132 132 def remainder(cur):
133 133 yield chunk(cur)
134 134
135 135 class fiter(object):
136 136 def __init__(self, fp):
137 137 self.fp = fp
138 138
139 139 def __iter__(self):
140 140 return self
141 141
142 142 def next(self):
143 143 l = self.fp.readline()
144 144 if not l:
145 145 raise StopIteration
146 146 return l
147 147
148 148 inheader = False
149 149 cur = []
150 150
151 151 mimeheaders = ['content-type']
152 152
153 153 if not util.safehasattr(stream, 'next'):
154 154 # http responses, for example, have readline but not next
155 155 stream = fiter(stream)
156 156
157 157 for line in stream:
158 158 cur.append(line)
159 159 if line.startswith('# HG changeset patch'):
160 160 return hgsplit(stream, cur)
161 161 elif line.startswith('From '):
162 162 return mboxsplit(stream, cur)
163 163 elif isheader(line, inheader):
164 164 inheader = True
165 165 if line.split(':', 1)[0].lower() in mimeheaders:
166 166 # let email parser handle this
167 167 return mimesplit(stream, cur)
168 168 elif line.startswith('--- ') and inheader:
169 169 # No evil headers seen by diff start, split by hand
170 170 return headersplit(stream, cur)
171 171 # Not enough info, keep reading
172 172
173 173 # if we are here, we have a very plain patch
174 174 return remainder(cur)
175 175
176 176 ## Some facility for extensible patch parsing:
177 177 # list of pairs ("header to match", "data key")
178 178 patchheadermap = [('Date', 'date'),
179 179 ('Branch', 'branch'),
180 180 ('Node ID', 'nodeid'),
181 181 ]
182 182
183 183 def extract(ui, fileobj):
184 184 '''extract patch from data read from fileobj.
185 185
186 186 patch can be a normal patch or contained in an email message.
187 187
188 188 return a dictionary. Standard keys are:
189 189 - filename,
190 190 - message,
191 191 - user,
192 192 - date,
193 193 - branch,
194 194 - node,
195 195 - p1,
196 196 - p2.
197 197 Any item can be missing from the dictionary. If filename is missing,
198 198 fileobj did not contain a patch. Caller must unlink filename when done.'''
199 199
200 200 # attempt to detect the start of a patch
201 201 # (this heuristic is borrowed from quilt)
202 202 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
203 203 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
204 204 r'---[ \t].*?^\+\+\+[ \t]|'
205 205 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
206 206
207 207 data = {}
208 208 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
209 209 tmpfp = os.fdopen(fd, 'w')
210 210 try:
211 211 msg = email.Parser.Parser().parse(fileobj)
212 212
213 213 subject = msg['Subject']
214 214 data['user'] = msg['From']
215 215 if not subject and not data['user']:
216 216 # Not an email, restore parsed headers if any
217 217 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
218 218
219 219 # should try to parse msg['Date']
220 220 parents = []
221 221
222 222 if subject:
223 223 if subject.startswith('[PATCH'):
224 224 pend = subject.find(']')
225 225 if pend >= 0:
226 226 subject = subject[pend + 1:].lstrip()
227 227 subject = re.sub(r'\n[ \t]+', ' ', subject)
228 228 ui.debug('Subject: %s\n' % subject)
229 229 if data['user']:
230 230 ui.debug('From: %s\n' % data['user'])
231 231 diffs_seen = 0
232 232 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
233 233 message = ''
234 234 for part in msg.walk():
235 235 content_type = part.get_content_type()
236 236 ui.debug('Content-Type: %s\n' % content_type)
237 237 if content_type not in ok_types:
238 238 continue
239 239 payload = part.get_payload(decode=True)
240 240 m = diffre.search(payload)
241 241 if m:
242 242 hgpatch = False
243 243 hgpatchheader = False
244 244 ignoretext = False
245 245
246 246 ui.debug('found patch at byte %d\n' % m.start(0))
247 247 diffs_seen += 1
248 248 cfp = cStringIO.StringIO()
249 249 for line in payload[:m.start(0)].splitlines():
250 250 if line.startswith('# HG changeset patch') and not hgpatch:
251 251 ui.debug('patch generated by hg export\n')
252 252 hgpatch = True
253 253 hgpatchheader = True
254 254 # drop earlier commit message content
255 255 cfp.seek(0)
256 256 cfp.truncate()
257 257 subject = None
258 258 elif hgpatchheader:
259 259 if line.startswith('# User '):
260 260 data['user'] = line[7:]
261 261 ui.debug('From: %s\n' % data['user'])
262 262 elif line.startswith("# Parent "):
263 263 parents.append(line[9:].lstrip())
264 264 elif line.startswith("# "):
265 265 for header, key in patchheadermap:
266 266 prefix = '# %s ' % header
267 267 if line.startswith(prefix):
268 268 data[key] = line[len(prefix):]
269 269 else:
270 270 hgpatchheader = False
271 271 elif line == '---':
272 272 ignoretext = True
273 273 if not hgpatchheader and not ignoretext:
274 274 cfp.write(line)
275 275 cfp.write('\n')
276 276 message = cfp.getvalue()
277 277 if tmpfp:
278 278 tmpfp.write(payload)
279 279 if not payload.endswith('\n'):
280 280 tmpfp.write('\n')
281 281 elif not diffs_seen and message and content_type == 'text/plain':
282 282 message += '\n' + payload
283 283 except: # re-raises
284 284 tmpfp.close()
285 285 os.unlink(tmpname)
286 286 raise
287 287
288 288 if subject and not message.startswith(subject):
289 289 message = '%s\n%s' % (subject, message)
290 290 data['message'] = message
291 291 tmpfp.close()
292 292 if parents:
293 293 data['p1'] = parents.pop(0)
294 294 if parents:
295 295 data['p2'] = parents.pop(0)
296 296
297 297 if diffs_seen:
298 298 data['filename'] = tmpname
299 299 else:
300 300 os.unlink(tmpname)
301 301 return data
302 302
303 303 class patchmeta(object):
304 304 """Patched file metadata
305 305
306 306 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
307 307 or COPY. 'path' is patched file path. 'oldpath' is set to the
308 308 origin file when 'op' is either COPY or RENAME, None otherwise. If
309 309 file mode is changed, 'mode' is a tuple (islink, isexec) where
310 310 'islink' is True if the file is a symlink and 'isexec' is True if
311 311 the file is executable. Otherwise, 'mode' is None.
312 312 """
313 313 def __init__(self, path):
314 314 self.path = path
315 315 self.oldpath = None
316 316 self.mode = None
317 317 self.op = 'MODIFY'
318 318 self.binary = False
319 319
320 320 def setmode(self, mode):
321 321 islink = mode & 0o20000
322 322 isexec = mode & 0o100
323 323 self.mode = (islink, isexec)
324 324
325 325 def copy(self):
326 326 other = patchmeta(self.path)
327 327 other.oldpath = self.oldpath
328 328 other.mode = self.mode
329 329 other.op = self.op
330 330 other.binary = self.binary
331 331 return other
332 332
333 333 def _ispatchinga(self, afile):
334 334 if afile == '/dev/null':
335 335 return self.op == 'ADD'
336 336 return afile == 'a/' + (self.oldpath or self.path)
337 337
338 338 def _ispatchingb(self, bfile):
339 339 if bfile == '/dev/null':
340 340 return self.op == 'DELETE'
341 341 return bfile == 'b/' + self.path
342 342
343 343 def ispatching(self, afile, bfile):
344 344 return self._ispatchinga(afile) and self._ispatchingb(bfile)
345 345
346 346 def __repr__(self):
347 347 return "<patchmeta %s %r>" % (self.op, self.path)
348 348
349 349 def readgitpatch(lr):
350 350 """extract git-style metadata about patches from <patchname>"""
351 351
352 352 # Filter patch for git information
353 353 gp = None
354 354 gitpatches = []
355 355 for line in lr:
356 356 line = line.rstrip(' \r\n')
357 357 if line.startswith('diff --git a/'):
358 358 m = gitre.match(line)
359 359 if m:
360 360 if gp:
361 361 gitpatches.append(gp)
362 362 dst = m.group(2)
363 363 gp = patchmeta(dst)
364 364 elif gp:
365 365 if line.startswith('--- '):
366 366 gitpatches.append(gp)
367 367 gp = None
368 368 continue
369 369 if line.startswith('rename from '):
370 370 gp.op = 'RENAME'
371 371 gp.oldpath = line[12:]
372 372 elif line.startswith('rename to '):
373 373 gp.path = line[10:]
374 374 elif line.startswith('copy from '):
375 375 gp.op = 'COPY'
376 376 gp.oldpath = line[10:]
377 377 elif line.startswith('copy to '):
378 378 gp.path = line[8:]
379 379 elif line.startswith('deleted file'):
380 380 gp.op = 'DELETE'
381 381 elif line.startswith('new file mode '):
382 382 gp.op = 'ADD'
383 383 gp.setmode(int(line[-6:], 8))
384 384 elif line.startswith('new mode '):
385 385 gp.setmode(int(line[-6:], 8))
386 386 elif line.startswith('GIT binary patch'):
387 387 gp.binary = True
388 388 if gp:
389 389 gitpatches.append(gp)
390 390
391 391 return gitpatches
392 392
393 393 class linereader(object):
394 394 # simple class to allow pushing lines back into the input stream
395 395 def __init__(self, fp):
396 396 self.fp = fp
397 397 self.buf = []
398 398
399 399 def push(self, line):
400 400 if line is not None:
401 401 self.buf.append(line)
402 402
403 403 def readline(self):
404 404 if self.buf:
405 405 l = self.buf[0]
406 406 del self.buf[0]
407 407 return l
408 408 return self.fp.readline()
409 409
410 410 def __iter__(self):
411 411 while True:
412 412 l = self.readline()
413 413 if not l:
414 414 break
415 415 yield l
416 416
417 417 class abstractbackend(object):
418 418 def __init__(self, ui):
419 419 self.ui = ui
420 420
421 421 def getfile(self, fname):
422 422 """Return target file data and flags as a (data, (islink,
423 423 isexec)) tuple. Data is None if file is missing/deleted.
424 424 """
425 425 raise NotImplementedError
426 426
427 427 def setfile(self, fname, data, mode, copysource):
428 428 """Write data to target file fname and set its mode. mode is a
429 429 (islink, isexec) tuple. If data is None, the file content should
430 430 be left unchanged. If the file is modified after being copied,
431 431 copysource is set to the original file name.
432 432 """
433 433 raise NotImplementedError
434 434
435 435 def unlink(self, fname):
436 436 """Unlink target file."""
437 437 raise NotImplementedError
438 438
439 439 def writerej(self, fname, failed, total, lines):
440 440 """Write rejected lines for fname. total is the number of hunks
441 441 which failed to apply and total the total number of hunks for this
442 442 files.
443 443 """
444 444 pass
445 445
446 446 def exists(self, fname):
447 447 raise NotImplementedError
448 448
449 449 class fsbackend(abstractbackend):
450 450 def __init__(self, ui, basedir):
451 451 super(fsbackend, self).__init__(ui)
452 452 self.opener = scmutil.opener(basedir)
453 453
454 454 def _join(self, f):
455 455 return os.path.join(self.opener.base, f)
456 456
457 457 def getfile(self, fname):
458 458 if self.opener.islink(fname):
459 459 return (self.opener.readlink(fname), (True, False))
460 460
461 461 isexec = False
462 462 try:
463 463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 464 except OSError as e:
465 465 if e.errno != errno.ENOENT:
466 466 raise
467 467 try:
468 468 return (self.opener.read(fname), (False, isexec))
469 469 except IOError as e:
470 470 if e.errno != errno.ENOENT:
471 471 raise
472 472 return None, None
473 473
474 474 def setfile(self, fname, data, mode, copysource):
475 475 islink, isexec = mode
476 476 if data is None:
477 477 self.opener.setflags(fname, islink, isexec)
478 478 return
479 479 if islink:
480 480 self.opener.symlink(data, fname)
481 481 else:
482 482 self.opener.write(fname, data)
483 483 if isexec:
484 484 self.opener.setflags(fname, False, True)
485 485
486 486 def unlink(self, fname):
487 487 self.opener.unlinkpath(fname, ignoremissing=True)
488 488
489 489 def writerej(self, fname, failed, total, lines):
490 490 fname = fname + ".rej"
491 491 self.ui.warn(
492 492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 493 (failed, total, fname))
494 494 fp = self.opener(fname, 'w')
495 495 fp.writelines(lines)
496 496 fp.close()
497 497
498 498 def exists(self, fname):
499 499 return self.opener.lexists(fname)
500 500
501 501 class workingbackend(fsbackend):
502 502 def __init__(self, ui, repo, similarity):
503 503 super(workingbackend, self).__init__(ui, repo.root)
504 504 self.repo = repo
505 505 self.similarity = similarity
506 506 self.removed = set()
507 507 self.changed = set()
508 508 self.copied = []
509 509
510 510 def _checkknown(self, fname):
511 511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513 513
514 514 def setfile(self, fname, data, mode, copysource):
515 515 self._checkknown(fname)
516 516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 517 if copysource is not None:
518 518 self.copied.append((copysource, fname))
519 519 self.changed.add(fname)
520 520
521 521 def unlink(self, fname):
522 522 self._checkknown(fname)
523 523 super(workingbackend, self).unlink(fname)
524 524 self.removed.add(fname)
525 525 self.changed.add(fname)
526 526
527 527 def close(self):
528 528 wctx = self.repo[None]
529 529 changed = set(self.changed)
530 530 for src, dst in self.copied:
531 531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 532 if self.removed:
533 533 wctx.forget(sorted(self.removed))
534 534 for f in self.removed:
535 535 if f not in self.repo.dirstate:
536 536 # File was deleted and no longer belongs to the
537 537 # dirstate, it was probably marked added then
538 538 # deleted, and should not be considered by
539 539 # marktouched().
540 540 changed.discard(f)
541 541 if changed:
542 542 scmutil.marktouched(self.repo, changed, self.similarity)
543 543 return sorted(self.changed)
544 544
545 545 class filestore(object):
546 546 def __init__(self, maxsize=None):
547 547 self.opener = None
548 548 self.files = {}
549 549 self.created = 0
550 550 self.maxsize = maxsize
551 551 if self.maxsize is None:
552 552 self.maxsize = 4*(2**20)
553 553 self.size = 0
554 554 self.data = {}
555 555
556 556 def setfile(self, fname, data, mode, copied=None):
557 557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 558 self.data[fname] = (data, mode, copied)
559 559 self.size += len(data)
560 560 else:
561 561 if self.opener is None:
562 562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 563 self.opener = scmutil.opener(root)
564 564 # Avoid filename issues with these simple names
565 565 fn = str(self.created)
566 566 self.opener.write(fn, data)
567 567 self.created += 1
568 568 self.files[fname] = (fn, mode, copied)
569 569
570 570 def getfile(self, fname):
571 571 if fname in self.data:
572 572 return self.data[fname]
573 573 if not self.opener or fname not in self.files:
574 574 return None, None, None
575 575 fn, mode, copied = self.files[fname]
576 576 return self.opener.read(fn), mode, copied
577 577
578 578 def close(self):
579 579 if self.opener:
580 580 shutil.rmtree(self.opener.base)
581 581
582 582 class repobackend(abstractbackend):
583 583 def __init__(self, ui, repo, ctx, store):
584 584 super(repobackend, self).__init__(ui)
585 585 self.repo = repo
586 586 self.ctx = ctx
587 587 self.store = store
588 588 self.changed = set()
589 589 self.removed = set()
590 590 self.copied = {}
591 591
592 592 def _checkknown(self, fname):
593 593 if fname not in self.ctx:
594 594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595 595
596 596 def getfile(self, fname):
597 597 try:
598 598 fctx = self.ctx[fname]
599 599 except error.LookupError:
600 600 return None, None
601 601 flags = fctx.flags()
602 602 return fctx.data(), ('l' in flags, 'x' in flags)
603 603
604 604 def setfile(self, fname, data, mode, copysource):
605 605 if copysource:
606 606 self._checkknown(copysource)
607 607 if data is None:
608 608 data = self.ctx[fname].data()
609 609 self.store.setfile(fname, data, mode, copysource)
610 610 self.changed.add(fname)
611 611 if copysource:
612 612 self.copied[fname] = copysource
613 613
614 614 def unlink(self, fname):
615 615 self._checkknown(fname)
616 616 self.removed.add(fname)
617 617
618 618 def exists(self, fname):
619 619 return fname in self.ctx
620 620
621 621 def close(self):
622 622 return self.changed | self.removed
623 623
624 624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628 628
629 629 class patchfile(object):
630 630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 631 self.fname = gp.path
632 632 self.eolmode = eolmode
633 633 self.eol = None
634 634 self.backend = backend
635 635 self.ui = ui
636 636 self.lines = []
637 637 self.exists = False
638 638 self.missing = True
639 639 self.mode = gp.mode
640 640 self.copysource = gp.oldpath
641 641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 642 self.remove = gp.op == 'DELETE'
643 643 if self.copysource is None:
644 644 data, mode = backend.getfile(self.fname)
645 645 else:
646 646 data, mode = store.getfile(self.copysource)[:2]
647 647 if data is not None:
648 648 self.exists = self.copysource is None or backend.exists(self.fname)
649 649 self.missing = False
650 650 if data:
651 651 self.lines = mdiff.splitnewlines(data)
652 652 if self.mode is None:
653 653 self.mode = mode
654 654 if self.lines:
655 655 # Normalize line endings
656 656 if self.lines[0].endswith('\r\n'):
657 657 self.eol = '\r\n'
658 658 elif self.lines[0].endswith('\n'):
659 659 self.eol = '\n'
660 660 if eolmode != 'strict':
661 661 nlines = []
662 662 for l in self.lines:
663 663 if l.endswith('\r\n'):
664 664 l = l[:-2] + '\n'
665 665 nlines.append(l)
666 666 self.lines = nlines
667 667 else:
668 668 if self.create:
669 669 self.missing = False
670 670 if self.mode is None:
671 671 self.mode = (False, False)
672 672 if self.missing:
673 673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 674
675 675 self.hash = {}
676 676 self.dirty = 0
677 677 self.offset = 0
678 678 self.skew = 0
679 679 self.rej = []
680 680 self.fileprinted = False
681 681 self.printfile(False)
682 682 self.hunks = 0
683 683
684 684 def writelines(self, fname, lines, mode):
685 685 if self.eolmode == 'auto':
686 686 eol = self.eol
687 687 elif self.eolmode == 'crlf':
688 688 eol = '\r\n'
689 689 else:
690 690 eol = '\n'
691 691
692 692 if self.eolmode != 'strict' and eol and eol != '\n':
693 693 rawlines = []
694 694 for l in lines:
695 695 if l and l[-1] == '\n':
696 696 l = l[:-1] + eol
697 697 rawlines.append(l)
698 698 lines = rawlines
699 699
700 700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701 701
702 702 def printfile(self, warn):
703 703 if self.fileprinted:
704 704 return
705 705 if warn or self.ui.verbose:
706 706 self.fileprinted = True
707 707 s = _("patching file %s\n") % self.fname
708 708 if warn:
709 709 self.ui.warn(s)
710 710 else:
711 711 self.ui.note(s)
712 712
713 713
714 714 def findlines(self, l, linenum):
715 715 # looks through the hash and finds candidate lines. The
716 716 # result is a list of line numbers sorted based on distance
717 717 # from linenum
718 718
719 719 cand = self.hash.get(l, [])
720 720 if len(cand) > 1:
721 721 # resort our list of potentials forward then back.
722 722 cand.sort(key=lambda x: abs(x - linenum))
723 723 return cand
724 724
725 725 def write_rej(self):
726 726 # our rejects are a little different from patch(1). This always
727 727 # creates rejects in the same form as the original patch. A file
728 728 # header is inserted so that you can run the reject through patch again
729 729 # without having to type the filename.
730 730 if not self.rej:
731 731 return
732 732 base = os.path.basename(self.fname)
733 733 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 734 for x in self.rej:
735 735 for l in x.hunk:
736 736 lines.append(l)
737 737 if l[-1] != '\n':
738 738 lines.append("\n\ No newline at end of file\n")
739 739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740 740
741 741 def apply(self, h):
742 742 if not h.complete():
743 743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 745 h.lenb))
746 746
747 747 self.hunks += 1
748 748
749 749 if self.missing:
750 750 self.rej.append(h)
751 751 return -1
752 752
753 753 if self.exists and self.create:
754 754 if self.copysource:
755 755 self.ui.warn(_("cannot create %s: destination already "
756 756 "exists\n") % self.fname)
757 757 else:
758 758 self.ui.warn(_("file %s already exists\n") % self.fname)
759 759 self.rej.append(h)
760 760 return -1
761 761
762 762 if isinstance(h, binhunk):
763 763 if self.remove:
764 764 self.backend.unlink(self.fname)
765 765 else:
766 766 l = h.new(self.lines)
767 767 self.lines[:] = l
768 768 self.offset += len(l)
769 769 self.dirty = True
770 770 return 0
771 771
772 772 horig = h
773 773 if (self.eolmode in ('crlf', 'lf')
774 774 or self.eolmode == 'auto' and self.eol):
775 775 # If new eols are going to be normalized, then normalize
776 776 # hunk data before patching. Otherwise, preserve input
777 777 # line-endings.
778 778 h = h.getnormalized()
779 779
780 780 # fast case first, no offsets, no fuzz
781 781 old, oldstart, new, newstart = h.fuzzit(0, False)
782 782 oldstart += self.offset
783 783 orig_start = oldstart
784 784 # if there's skew we want to emit the "(offset %d lines)" even
785 785 # when the hunk cleanly applies at start + skew, so skip the
786 786 # fast case code
787 787 if (self.skew == 0 and
788 788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 789 if self.remove:
790 790 self.backend.unlink(self.fname)
791 791 else:
792 792 self.lines[oldstart:oldstart + len(old)] = new
793 793 self.offset += len(new) - len(old)
794 794 self.dirty = True
795 795 return 0
796 796
797 797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 798 self.hash = {}
799 799 for x, s in enumerate(self.lines):
800 800 self.hash.setdefault(s, []).append(x)
801 801
802 802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 803 for toponly in [True, False]:
804 804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 805 oldstart = oldstart + self.offset + self.skew
806 806 oldstart = min(oldstart, len(self.lines))
807 807 if old:
808 808 cand = self.findlines(old[0][1:], oldstart)
809 809 else:
810 810 # Only adding lines with no or fuzzed context, just
811 811 # take the skew in account
812 812 cand = [oldstart]
813 813
814 814 for l in cand:
815 815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 816 self.lines[l : l + len(old)] = new
817 817 self.offset += len(new) - len(old)
818 818 self.skew = l - orig_start
819 819 self.dirty = True
820 820 offset = l - orig_start - fuzzlen
821 821 if fuzzlen:
822 822 msg = _("Hunk #%d succeeded at %d "
823 823 "with fuzz %d "
824 824 "(offset %d lines).\n")
825 825 self.printfile(True)
826 826 self.ui.warn(msg %
827 827 (h.number, l + 1, fuzzlen, offset))
828 828 else:
829 829 msg = _("Hunk #%d succeeded at %d "
830 830 "(offset %d lines).\n")
831 831 self.ui.note(msg % (h.number, l + 1, offset))
832 832 return fuzzlen
833 833 self.printfile(True)
834 834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 835 self.rej.append(horig)
836 836 return -1
837 837
838 838 def close(self):
839 839 if self.dirty:
840 840 self.writelines(self.fname, self.lines, self.mode)
841 841 self.write_rej()
842 842 return len(self.rej)
843 843
844 844 class header(object):
845 845 """patch header
846 846 """
847 847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 848 diff_re = re.compile('diff -r .* (.*)$')
849 849 allhunks_re = re.compile('(?:index|deleted file) ')
850 850 pretty_re = re.compile('(?:new file|deleted file) ')
851 851 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 852 newfile_re = re.compile('(?:new file)')
853 853
854 854 def __init__(self, header):
855 855 self.header = header
856 856 self.hunks = []
857 857
858 858 def binary(self):
859 859 return any(h.startswith('index ') for h in self.header)
860 860
861 861 def pretty(self, fp):
862 862 for h in self.header:
863 863 if h.startswith('index '):
864 864 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 865 break
866 866 if self.pretty_re.match(h):
867 867 fp.write(h)
868 868 if self.binary():
869 869 fp.write(_('this is a binary file\n'))
870 870 break
871 871 if h.startswith('---'):
872 872 fp.write(_('%d hunks, %d lines changed\n') %
873 873 (len(self.hunks),
874 874 sum([max(h.added, h.removed) for h in self.hunks])))
875 875 break
876 876 fp.write(h)
877 877
878 878 def write(self, fp):
879 879 fp.write(''.join(self.header))
880 880
881 881 def allhunks(self):
882 882 return any(self.allhunks_re.match(h) for h in self.header)
883 883
884 884 def files(self):
885 885 match = self.diffgit_re.match(self.header[0])
886 886 if match:
887 887 fromfile, tofile = match.groups()
888 888 if fromfile == tofile:
889 889 return [fromfile]
890 890 return [fromfile, tofile]
891 891 else:
892 892 return self.diff_re.match(self.header[0]).groups()
893 893
894 894 def filename(self):
895 895 return self.files()[-1]
896 896
897 897 def __repr__(self):
898 898 return '<header %s>' % (' '.join(map(repr, self.files())))
899 899
900 900 def isnewfile(self):
901 901 return any(self.newfile_re.match(h) for h in self.header)
902 902
903 903 def special(self):
904 904 # Special files are shown only at the header level and not at the hunk
905 905 # level for example a file that has been deleted is a special file.
906 906 # The user cannot change the content of the operation, in the case of
907 907 # the deleted file he has to take the deletion or not take it, he
908 908 # cannot take some of it.
909 909 # Newly added files are special if they are empty, they are not special
910 910 # if they have some content as we want to be able to change it
911 911 nocontent = len(self.header) == 2
912 912 emptynewfile = self.isnewfile() and nocontent
913 913 return emptynewfile or \
914 914 any(self.special_re.match(h) for h in self.header)
915 915
916 916 class recordhunk(object):
917 917 """patch hunk
918 918
919 919 XXX shouldn't we merge this with the other hunk class?
920 920 """
921 921 maxcontext = 3
922 922
923 923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 924 def trimcontext(number, lines):
925 925 delta = len(lines) - self.maxcontext
926 926 if False and delta > 0:
927 927 return number + delta, lines[:self.maxcontext]
928 928 return number, lines
929 929
930 930 self.header = header
931 931 self.fromline, self.before = trimcontext(fromline, before)
932 932 self.toline, self.after = trimcontext(toline, after)
933 933 self.proc = proc
934 934 self.hunk = hunk
935 935 self.added, self.removed = self.countchanges(self.hunk)
936 936
937 937 def __eq__(self, v):
938 938 if not isinstance(v, recordhunk):
939 939 return False
940 940
941 941 return ((v.hunk == self.hunk) and
942 942 (v.proc == self.proc) and
943 943 (self.fromline == v.fromline) and
944 944 (self.header.files() == v.header.files()))
945 945
946 946 def __hash__(self):
947 947 return hash((tuple(self.hunk),
948 948 tuple(self.header.files()),
949 949 self.fromline,
950 950 self.proc))
951 951
952 952 def countchanges(self, hunk):
953 953 """hunk -> (n+,n-)"""
954 954 add = len([h for h in hunk if h[0] == '+'])
955 955 rem = len([h for h in hunk if h[0] == '-'])
956 956 return add, rem
957 957
958 958 def write(self, fp):
959 959 delta = len(self.before) + len(self.after)
960 960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 961 delta -= 1
962 962 fromlen = delta + self.removed
963 963 tolen = delta + self.added
964 964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 965 (self.fromline, fromlen, self.toline, tolen,
966 966 self.proc and (' ' + self.proc)))
967 967 fp.write(''.join(self.before + self.hunk + self.after))
968 968
969 969 pretty = write
970 970
971 971 def filename(self):
972 972 return self.header.filename()
973 973
974 974 def __repr__(self):
975 975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976 976
977 977 def filterpatch(ui, headers, operation=None):
978 978 """Interactively filter patch chunks into applied-only chunks"""
979 979 if operation is None:
980 980 operation = _('record')
981 981
982 982 def prompt(skipfile, skipall, query, chunk):
983 983 """prompt query, and process base inputs
984 984
985 985 - y/n for the rest of file
986 986 - y/n for the rest
987 987 - ? (help)
988 988 - q (quit)
989 989
990 990 Return True/False and possibly updated skipfile and skipall.
991 991 """
992 992 newpatches = None
993 993 if skipall is not None:
994 994 return skipall, skipfile, skipall, newpatches
995 995 if skipfile is not None:
996 996 return skipfile, skipfile, skipall, newpatches
997 997 while True:
998 998 resps = _('[Ynesfdaq?]'
999 999 '$$ &Yes, record this change'
1000 1000 '$$ &No, skip this change'
1001 1001 '$$ &Edit this change manually'
1002 1002 '$$ &Skip remaining changes to this file'
1003 1003 '$$ Record remaining changes to this &file'
1004 1004 '$$ &Done, skip remaining changes and files'
1005 1005 '$$ Record &all changes to all remaining files'
1006 1006 '$$ &Quit, recording no changes'
1007 1007 '$$ &? (display help)')
1008 1008 r = ui.promptchoice("%s %s" % (query, resps))
1009 1009 ui.write("\n")
1010 1010 if r == 8: # ?
1011 1011 for c, t in ui.extractchoices(resps)[1]:
1012 1012 ui.write('%s - %s\n' % (c, t.lower()))
1013 1013 continue
1014 1014 elif r == 0: # yes
1015 1015 ret = True
1016 1016 elif r == 1: # no
1017 1017 ret = False
1018 1018 elif r == 2: # Edit patch
1019 1019 if chunk is None:
1020 1020 ui.write(_('cannot edit patch for whole file'))
1021 1021 ui.write("\n")
1022 1022 continue
1023 1023 if chunk.header.binary():
1024 1024 ui.write(_('cannot edit patch for binary file'))
1025 1025 ui.write("\n")
1026 1026 continue
1027 1027 # Patch comment based on the Git one (based on comment at end of
1028 1028 # https://mercurial-scm.org/wiki/RecordExtension)
1029 1029 phelp = '---' + _("""
1030 1030 To remove '-' lines, make them ' ' lines (context).
1031 1031 To remove '+' lines, delete them.
1032 1032 Lines starting with # will be removed from the patch.
1033 1033
1034 1034 If the patch applies cleanly, the edited hunk will immediately be
1035 1035 added to the record list. If it does not apply cleanly, a rejects
1036 1036 file will be generated: you can use that when you try again. If
1037 1037 all lines of the hunk are removed, then the edit is aborted and
1038 1038 the hunk is left unchanged.
1039 1039 """)
1040 1040 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1041 1041 suffix=".diff", text=True)
1042 1042 ncpatchfp = None
1043 1043 try:
1044 1044 # Write the initial patch
1045 1045 f = os.fdopen(patchfd, "w")
1046 1046 chunk.header.write(f)
1047 1047 chunk.write(f)
1048 1048 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1049 1049 f.close()
1050 1050 # Start the editor and wait for it to complete
1051 1051 editor = ui.geteditor()
1052 1052 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1053 1053 environ={'HGUSER': ui.username()})
1054 1054 if ret != 0:
1055 1055 ui.warn(_("editor exited with exit code %d\n") % ret)
1056 1056 continue
1057 1057 # Remove comment lines
1058 1058 patchfp = open(patchfn)
1059 1059 ncpatchfp = cStringIO.StringIO()
1060 1060 for line in patchfp:
1061 1061 if not line.startswith('#'):
1062 1062 ncpatchfp.write(line)
1063 1063 patchfp.close()
1064 1064 ncpatchfp.seek(0)
1065 1065 newpatches = parsepatch(ncpatchfp)
1066 1066 finally:
1067 1067 os.unlink(patchfn)
1068 1068 del ncpatchfp
1069 1069 # Signal that the chunk shouldn't be applied as-is, but
1070 1070 # provide the new patch to be used instead.
1071 1071 ret = False
1072 1072 elif r == 3: # Skip
1073 1073 ret = skipfile = False
1074 1074 elif r == 4: # file (Record remaining)
1075 1075 ret = skipfile = True
1076 1076 elif r == 5: # done, skip remaining
1077 1077 ret = skipall = False
1078 1078 elif r == 6: # all
1079 1079 ret = skipall = True
1080 1080 elif r == 7: # quit
1081 1081 raise error.Abort(_('user quit'))
1082 1082 return ret, skipfile, skipall, newpatches
1083 1083
1084 1084 seen = set()
1085 1085 applied = {} # 'filename' -> [] of chunks
1086 1086 skipfile, skipall = None, None
1087 1087 pos, total = 1, sum(len(h.hunks) for h in headers)
1088 1088 for h in headers:
1089 1089 pos += len(h.hunks)
1090 1090 skipfile = None
1091 1091 fixoffset = 0
1092 1092 hdr = ''.join(h.header)
1093 1093 if hdr in seen:
1094 1094 continue
1095 1095 seen.add(hdr)
1096 1096 if skipall is None:
1097 1097 h.pretty(ui)
1098 1098 msg = (_('examine changes to %s?') %
1099 1099 _(' and ').join("'%s'" % f for f in h.files()))
1100 1100 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1101 1101 if not r:
1102 1102 continue
1103 1103 applied[h.filename()] = [h]
1104 1104 if h.allhunks():
1105 1105 applied[h.filename()] += h.hunks
1106 1106 continue
1107 1107 for i, chunk in enumerate(h.hunks):
1108 1108 if skipfile is None and skipall is None:
1109 1109 chunk.pretty(ui)
1110 1110 if total == 1:
1111 1111 msg = _("record this change to '%s'?") % chunk.filename()
1112 1112 else:
1113 1113 idx = pos - len(h.hunks) + i
1114 1114 msg = _("record change %d/%d to '%s'?") % (idx, total,
1115 1115 chunk.filename())
1116 1116 r, skipfile, skipall, newpatches = prompt(skipfile,
1117 1117 skipall, msg, chunk)
1118 1118 if r:
1119 1119 if fixoffset:
1120 1120 chunk = copy.copy(chunk)
1121 1121 chunk.toline += fixoffset
1122 1122 applied[chunk.filename()].append(chunk)
1123 1123 elif newpatches is not None:
1124 1124 for newpatch in newpatches:
1125 1125 for newhunk in newpatch.hunks:
1126 1126 if fixoffset:
1127 1127 newhunk.toline += fixoffset
1128 1128 applied[newhunk.filename()].append(newhunk)
1129 1129 else:
1130 1130 fixoffset += chunk.removed - chunk.added
1131 1131 return (sum([h for h in applied.itervalues()
1132 1132 if h[0].special() or len(h) > 1], []), {})
1133 1133 class hunk(object):
1134 1134 def __init__(self, desc, num, lr, context):
1135 1135 self.number = num
1136 1136 self.desc = desc
1137 1137 self.hunk = [desc]
1138 1138 self.a = []
1139 1139 self.b = []
1140 1140 self.starta = self.lena = None
1141 1141 self.startb = self.lenb = None
1142 1142 if lr is not None:
1143 1143 if context:
1144 1144 self.read_context_hunk(lr)
1145 1145 else:
1146 1146 self.read_unified_hunk(lr)
1147 1147
1148 1148 def getnormalized(self):
1149 1149 """Return a copy with line endings normalized to LF."""
1150 1150
1151 1151 def normalize(lines):
1152 1152 nlines = []
1153 1153 for line in lines:
1154 1154 if line.endswith('\r\n'):
1155 1155 line = line[:-2] + '\n'
1156 1156 nlines.append(line)
1157 1157 return nlines
1158 1158
1159 1159 # Dummy object, it is rebuilt manually
1160 1160 nh = hunk(self.desc, self.number, None, None)
1161 1161 nh.number = self.number
1162 1162 nh.desc = self.desc
1163 1163 nh.hunk = self.hunk
1164 1164 nh.a = normalize(self.a)
1165 1165 nh.b = normalize(self.b)
1166 1166 nh.starta = self.starta
1167 1167 nh.startb = self.startb
1168 1168 nh.lena = self.lena
1169 1169 nh.lenb = self.lenb
1170 1170 return nh
1171 1171
1172 1172 def read_unified_hunk(self, lr):
1173 1173 m = unidesc.match(self.desc)
1174 1174 if not m:
1175 1175 raise PatchError(_("bad hunk #%d") % self.number)
1176 1176 self.starta, self.lena, self.startb, self.lenb = m.groups()
1177 1177 if self.lena is None:
1178 1178 self.lena = 1
1179 1179 else:
1180 1180 self.lena = int(self.lena)
1181 1181 if self.lenb is None:
1182 1182 self.lenb = 1
1183 1183 else:
1184 1184 self.lenb = int(self.lenb)
1185 1185 self.starta = int(self.starta)
1186 1186 self.startb = int(self.startb)
1187 1187 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1188 1188 self.b)
1189 1189 # if we hit eof before finishing out the hunk, the last line will
1190 1190 # be zero length. Lets try to fix it up.
1191 1191 while len(self.hunk[-1]) == 0:
1192 1192 del self.hunk[-1]
1193 1193 del self.a[-1]
1194 1194 del self.b[-1]
1195 1195 self.lena -= 1
1196 1196 self.lenb -= 1
1197 1197 self._fixnewline(lr)
1198 1198
1199 1199 def read_context_hunk(self, lr):
1200 1200 self.desc = lr.readline()
1201 1201 m = contextdesc.match(self.desc)
1202 1202 if not m:
1203 1203 raise PatchError(_("bad hunk #%d") % self.number)
1204 1204 self.starta, aend = m.groups()
1205 1205 self.starta = int(self.starta)
1206 1206 if aend is None:
1207 1207 aend = self.starta
1208 1208 self.lena = int(aend) - self.starta
1209 1209 if self.starta:
1210 1210 self.lena += 1
1211 1211 for x in xrange(self.lena):
1212 1212 l = lr.readline()
1213 1213 if l.startswith('---'):
1214 1214 # lines addition, old block is empty
1215 1215 lr.push(l)
1216 1216 break
1217 1217 s = l[2:]
1218 1218 if l.startswith('- ') or l.startswith('! '):
1219 1219 u = '-' + s
1220 1220 elif l.startswith(' '):
1221 1221 u = ' ' + s
1222 1222 else:
1223 1223 raise PatchError(_("bad hunk #%d old text line %d") %
1224 1224 (self.number, x))
1225 1225 self.a.append(u)
1226 1226 self.hunk.append(u)
1227 1227
1228 1228 l = lr.readline()
1229 1229 if l.startswith('\ '):
1230 1230 s = self.a[-1][:-1]
1231 1231 self.a[-1] = s
1232 1232 self.hunk[-1] = s
1233 1233 l = lr.readline()
1234 1234 m = contextdesc.match(l)
1235 1235 if not m:
1236 1236 raise PatchError(_("bad hunk #%d") % self.number)
1237 1237 self.startb, bend = m.groups()
1238 1238 self.startb = int(self.startb)
1239 1239 if bend is None:
1240 1240 bend = self.startb
1241 1241 self.lenb = int(bend) - self.startb
1242 1242 if self.startb:
1243 1243 self.lenb += 1
1244 1244 hunki = 1
1245 1245 for x in xrange(self.lenb):
1246 1246 l = lr.readline()
1247 1247 if l.startswith('\ '):
1248 1248 # XXX: the only way to hit this is with an invalid line range.
1249 1249 # The no-eol marker is not counted in the line range, but I
1250 1250 # guess there are diff(1) out there which behave differently.
1251 1251 s = self.b[-1][:-1]
1252 1252 self.b[-1] = s
1253 1253 self.hunk[hunki - 1] = s
1254 1254 continue
1255 1255 if not l:
1256 1256 # line deletions, new block is empty and we hit EOF
1257 1257 lr.push(l)
1258 1258 break
1259 1259 s = l[2:]
1260 1260 if l.startswith('+ ') or l.startswith('! '):
1261 1261 u = '+' + s
1262 1262 elif l.startswith(' '):
1263 1263 u = ' ' + s
1264 1264 elif len(self.b) == 0:
1265 1265 # line deletions, new block is empty
1266 1266 lr.push(l)
1267 1267 break
1268 1268 else:
1269 1269 raise PatchError(_("bad hunk #%d old text line %d") %
1270 1270 (self.number, x))
1271 1271 self.b.append(s)
1272 1272 while True:
1273 1273 if hunki >= len(self.hunk):
1274 1274 h = ""
1275 1275 else:
1276 1276 h = self.hunk[hunki]
1277 1277 hunki += 1
1278 1278 if h == u:
1279 1279 break
1280 1280 elif h.startswith('-'):
1281 1281 continue
1282 1282 else:
1283 1283 self.hunk.insert(hunki - 1, u)
1284 1284 break
1285 1285
1286 1286 if not self.a:
1287 1287 # this happens when lines were only added to the hunk
1288 1288 for x in self.hunk:
1289 1289 if x.startswith('-') or x.startswith(' '):
1290 1290 self.a.append(x)
1291 1291 if not self.b:
1292 1292 # this happens when lines were only deleted from the hunk
1293 1293 for x in self.hunk:
1294 1294 if x.startswith('+') or x.startswith(' '):
1295 1295 self.b.append(x[1:])
1296 1296 # @@ -start,len +start,len @@
1297 1297 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1298 1298 self.startb, self.lenb)
1299 1299 self.hunk[0] = self.desc
1300 1300 self._fixnewline(lr)
1301 1301
1302 1302 def _fixnewline(self, lr):
1303 1303 l = lr.readline()
1304 1304 if l.startswith('\ '):
1305 1305 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1306 1306 else:
1307 1307 lr.push(l)
1308 1308
1309 1309 def complete(self):
1310 1310 return len(self.a) == self.lena and len(self.b) == self.lenb
1311 1311
1312 1312 def _fuzzit(self, old, new, fuzz, toponly):
1313 1313 # this removes context lines from the top and bottom of list 'l'. It
1314 1314 # checks the hunk to make sure only context lines are removed, and then
1315 1315 # returns a new shortened list of lines.
1316 1316 fuzz = min(fuzz, len(old))
1317 1317 if fuzz:
1318 1318 top = 0
1319 1319 bot = 0
1320 1320 hlen = len(self.hunk)
1321 1321 for x in xrange(hlen - 1):
1322 1322 # the hunk starts with the @@ line, so use x+1
1323 1323 if self.hunk[x + 1][0] == ' ':
1324 1324 top += 1
1325 1325 else:
1326 1326 break
1327 1327 if not toponly:
1328 1328 for x in xrange(hlen - 1):
1329 1329 if self.hunk[hlen - bot - 1][0] == ' ':
1330 1330 bot += 1
1331 1331 else:
1332 1332 break
1333 1333
1334 1334 bot = min(fuzz, bot)
1335 1335 top = min(fuzz, top)
1336 1336 return old[top:len(old) - bot], new[top:len(new) - bot], top
1337 1337 return old, new, 0
1338 1338
1339 1339 def fuzzit(self, fuzz, toponly):
1340 1340 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1341 1341 oldstart = self.starta + top
1342 1342 newstart = self.startb + top
1343 1343 # zero length hunk ranges already have their start decremented
1344 1344 if self.lena and oldstart > 0:
1345 1345 oldstart -= 1
1346 1346 if self.lenb and newstart > 0:
1347 1347 newstart -= 1
1348 1348 return old, oldstart, new, newstart
1349 1349
1350 1350 class binhunk(object):
1351 1351 'A binary patch file.'
1352 1352 def __init__(self, lr, fname):
1353 1353 self.text = None
1354 1354 self.delta = False
1355 1355 self.hunk = ['GIT binary patch\n']
1356 1356 self._fname = fname
1357 1357 self._read(lr)
1358 1358
1359 1359 def complete(self):
1360 1360 return self.text is not None
1361 1361
1362 1362 def new(self, lines):
1363 1363 if self.delta:
1364 1364 return [applybindelta(self.text, ''.join(lines))]
1365 1365 return [self.text]
1366 1366
1367 1367 def _read(self, lr):
1368 1368 def getline(lr, hunk):
1369 1369 l = lr.readline()
1370 1370 hunk.append(l)
1371 1371 return l.rstrip('\r\n')
1372 1372
1373 1373 size = 0
1374 1374 while True:
1375 1375 line = getline(lr, self.hunk)
1376 1376 if not line:
1377 1377 raise PatchError(_('could not extract "%s" binary data')
1378 1378 % self._fname)
1379 1379 if line.startswith('literal '):
1380 1380 size = int(line[8:].rstrip())
1381 1381 break
1382 1382 if line.startswith('delta '):
1383 1383 size = int(line[6:].rstrip())
1384 1384 self.delta = True
1385 1385 break
1386 1386 dec = []
1387 1387 line = getline(lr, self.hunk)
1388 1388 while len(line) > 1:
1389 1389 l = line[0]
1390 1390 if l <= 'Z' and l >= 'A':
1391 1391 l = ord(l) - ord('A') + 1
1392 1392 else:
1393 1393 l = ord(l) - ord('a') + 27
1394 1394 try:
1395 1395 dec.append(base85.b85decode(line[1:])[:l])
1396 1396 except ValueError as e:
1397 1397 raise PatchError(_('could not decode "%s" binary patch: %s')
1398 1398 % (self._fname, str(e)))
1399 1399 line = getline(lr, self.hunk)
1400 1400 text = zlib.decompress(''.join(dec))
1401 1401 if len(text) != size:
1402 1402 raise PatchError(_('"%s" length is %d bytes, should be %d')
1403 1403 % (self._fname, len(text), size))
1404 1404 self.text = text
1405 1405
1406 1406 def parsefilename(str):
1407 1407 # --- filename \t|space stuff
1408 1408 s = str[4:].rstrip('\r\n')
1409 1409 i = s.find('\t')
1410 1410 if i < 0:
1411 1411 i = s.find(' ')
1412 1412 if i < 0:
1413 1413 return s
1414 1414 return s[:i]
1415 1415
1416 1416 def reversehunks(hunks):
1417 1417 '''reverse the signs in the hunks given as argument
1418 1418
1419 1419 This function operates on hunks coming out of patch.filterpatch, that is
1420 1420 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1421 1421
1422 1422 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1423 1423 ... --- a/folder1/g
1424 1424 ... +++ b/folder1/g
1425 1425 ... @@ -1,7 +1,7 @@
1426 1426 ... +firstline
1427 1427 ... c
1428 1428 ... 1
1429 1429 ... 2
1430 1430 ... + 3
1431 1431 ... -4
1432 1432 ... 5
1433 1433 ... d
1434 1434 ... +lastline"""
1435 1435 >>> hunks = parsepatch(rawpatch)
1436 1436 >>> hunkscomingfromfilterpatch = []
1437 1437 >>> for h in hunks:
1438 1438 ... hunkscomingfromfilterpatch.append(h)
1439 1439 ... hunkscomingfromfilterpatch.extend(h.hunks)
1440 1440
1441 1441 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1442 1442 >>> fp = cStringIO.StringIO()
1443 1443 >>> for c in reversedhunks:
1444 1444 ... c.write(fp)
1445 1445 >>> fp.seek(0)
1446 1446 >>> reversedpatch = fp.read()
1447 1447 >>> print reversedpatch
1448 1448 diff --git a/folder1/g b/folder1/g
1449 1449 --- a/folder1/g
1450 1450 +++ b/folder1/g
1451 1451 @@ -1,4 +1,3 @@
1452 1452 -firstline
1453 1453 c
1454 1454 1
1455 1455 2
1456 1456 @@ -1,6 +2,6 @@
1457 1457 c
1458 1458 1
1459 1459 2
1460 1460 - 3
1461 1461 +4
1462 1462 5
1463 1463 d
1464 1464 @@ -5,3 +6,2 @@
1465 1465 5
1466 1466 d
1467 1467 -lastline
1468 1468
1469 1469 '''
1470 1470
1471 1471 from . import crecord as crecordmod
1472 1472 newhunks = []
1473 1473 for c in hunks:
1474 1474 if isinstance(c, crecordmod.uihunk):
1475 1475 # curses hunks encapsulate the record hunk in _hunk
1476 1476 c = c._hunk
1477 1477 if isinstance(c, recordhunk):
1478 1478 for j, line in enumerate(c.hunk):
1479 1479 if line.startswith("-"):
1480 1480 c.hunk[j] = "+" + c.hunk[j][1:]
1481 1481 elif line.startswith("+"):
1482 1482 c.hunk[j] = "-" + c.hunk[j][1:]
1483 1483 c.added, c.removed = c.removed, c.added
1484 1484 newhunks.append(c)
1485 1485 return newhunks
1486 1486
1487 1487 def parsepatch(originalchunks):
1488 1488 """patch -> [] of headers -> [] of hunks """
1489 1489 class parser(object):
1490 1490 """patch parsing state machine"""
1491 1491 def __init__(self):
1492 1492 self.fromline = 0
1493 1493 self.toline = 0
1494 1494 self.proc = ''
1495 1495 self.header = None
1496 1496 self.context = []
1497 1497 self.before = []
1498 1498 self.hunk = []
1499 1499 self.headers = []
1500 1500
1501 1501 def addrange(self, limits):
1502 1502 fromstart, fromend, tostart, toend, proc = limits
1503 1503 self.fromline = int(fromstart)
1504 1504 self.toline = int(tostart)
1505 1505 self.proc = proc
1506 1506
1507 1507 def addcontext(self, context):
1508 1508 if self.hunk:
1509 1509 h = recordhunk(self.header, self.fromline, self.toline,
1510 1510 self.proc, self.before, self.hunk, context)
1511 1511 self.header.hunks.append(h)
1512 1512 self.fromline += len(self.before) + h.removed
1513 1513 self.toline += len(self.before) + h.added
1514 1514 self.before = []
1515 1515 self.hunk = []
1516 1516 self.context = context
1517 1517
1518 1518 def addhunk(self, hunk):
1519 1519 if self.context:
1520 1520 self.before = self.context
1521 1521 self.context = []
1522 1522 self.hunk = hunk
1523 1523
1524 1524 def newfile(self, hdr):
1525 1525 self.addcontext([])
1526 1526 h = header(hdr)
1527 1527 self.headers.append(h)
1528 1528 self.header = h
1529 1529
1530 1530 def addother(self, line):
1531 1531 pass # 'other' lines are ignored
1532 1532
1533 1533 def finished(self):
1534 1534 self.addcontext([])
1535 1535 return self.headers
1536 1536
1537 1537 transitions = {
1538 1538 'file': {'context': addcontext,
1539 1539 'file': newfile,
1540 1540 'hunk': addhunk,
1541 1541 'range': addrange},
1542 1542 'context': {'file': newfile,
1543 1543 'hunk': addhunk,
1544 1544 'range': addrange,
1545 1545 'other': addother},
1546 1546 'hunk': {'context': addcontext,
1547 1547 'file': newfile,
1548 1548 'range': addrange},
1549 1549 'range': {'context': addcontext,
1550 1550 'hunk': addhunk},
1551 1551 'other': {'other': addother},
1552 1552 }
1553 1553
1554 1554 p = parser()
1555 1555 fp = cStringIO.StringIO()
1556 1556 fp.write(''.join(originalchunks))
1557 1557 fp.seek(0)
1558 1558
1559 1559 state = 'context'
1560 1560 for newstate, data in scanpatch(fp):
1561 1561 try:
1562 1562 p.transitions[state][newstate](p, data)
1563 1563 except KeyError:
1564 1564 raise PatchError('unhandled transition: %s -> %s' %
1565 1565 (state, newstate))
1566 1566 state = newstate
1567 1567 del fp
1568 1568 return p.finished()
1569 1569
1570 1570 def pathtransform(path, strip, prefix):
1571 1571 '''turn a path from a patch into a path suitable for the repository
1572 1572
1573 1573 prefix, if not empty, is expected to be normalized with a / at the end.
1574 1574
1575 1575 Returns (stripped components, path in repository).
1576 1576
1577 1577 >>> pathtransform('a/b/c', 0, '')
1578 1578 ('', 'a/b/c')
1579 1579 >>> pathtransform(' a/b/c ', 0, '')
1580 1580 ('', ' a/b/c')
1581 1581 >>> pathtransform(' a/b/c ', 2, '')
1582 1582 ('a/b/', 'c')
1583 1583 >>> pathtransform('a/b/c', 0, 'd/e/')
1584 1584 ('', 'd/e/a/b/c')
1585 1585 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1586 1586 ('a//b/', 'd/e/c')
1587 1587 >>> pathtransform('a/b/c', 3, '')
1588 1588 Traceback (most recent call last):
1589 1589 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1590 1590 '''
1591 1591 pathlen = len(path)
1592 1592 i = 0
1593 1593 if strip == 0:
1594 1594 return '', prefix + path.rstrip()
1595 1595 count = strip
1596 1596 while count > 0:
1597 1597 i = path.find('/', i)
1598 1598 if i == -1:
1599 1599 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1600 1600 (count, strip, path))
1601 1601 i += 1
1602 1602 # consume '//' in the path
1603 1603 while i < pathlen - 1 and path[i] == '/':
1604 1604 i += 1
1605 1605 count -= 1
1606 1606 return path[:i].lstrip(), prefix + path[i:].rstrip()
1607 1607
1608 1608 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1609 1609 nulla = afile_orig == "/dev/null"
1610 1610 nullb = bfile_orig == "/dev/null"
1611 1611 create = nulla and hunk.starta == 0 and hunk.lena == 0
1612 1612 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1613 1613 abase, afile = pathtransform(afile_orig, strip, prefix)
1614 1614 gooda = not nulla and backend.exists(afile)
1615 1615 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1616 1616 if afile == bfile:
1617 1617 goodb = gooda
1618 1618 else:
1619 1619 goodb = not nullb and backend.exists(bfile)
1620 1620 missing = not goodb and not gooda and not create
1621 1621
1622 1622 # some diff programs apparently produce patches where the afile is
1623 1623 # not /dev/null, but afile starts with bfile
1624 1624 abasedir = afile[:afile.rfind('/') + 1]
1625 1625 bbasedir = bfile[:bfile.rfind('/') + 1]
1626 1626 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1627 1627 and hunk.starta == 0 and hunk.lena == 0):
1628 1628 create = True
1629 1629 missing = False
1630 1630
1631 1631 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1632 1632 # diff is between a file and its backup. In this case, the original
1633 1633 # file should be patched (see original mpatch code).
1634 1634 isbackup = (abase == bbase and bfile.startswith(afile))
1635 1635 fname = None
1636 1636 if not missing:
1637 1637 if gooda and goodb:
1638 1638 if isbackup:
1639 1639 fname = afile
1640 1640 else:
1641 1641 fname = bfile
1642 1642 elif gooda:
1643 1643 fname = afile
1644 1644
1645 1645 if not fname:
1646 1646 if not nullb:
1647 1647 if isbackup:
1648 1648 fname = afile
1649 1649 else:
1650 1650 fname = bfile
1651 1651 elif not nulla:
1652 1652 fname = afile
1653 1653 else:
1654 1654 raise PatchError(_("undefined source and destination files"))
1655 1655
1656 1656 gp = patchmeta(fname)
1657 1657 if create:
1658 1658 gp.op = 'ADD'
1659 1659 elif remove:
1660 1660 gp.op = 'DELETE'
1661 1661 return gp
1662 1662
1663 1663 def scanpatch(fp):
1664 1664 """like patch.iterhunks, but yield different events
1665 1665
1666 1666 - ('file', [header_lines + fromfile + tofile])
1667 1667 - ('context', [context_lines])
1668 1668 - ('hunk', [hunk_lines])
1669 1669 - ('range', (-start,len, +start,len, proc))
1670 1670 """
1671 1671 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1672 1672 lr = linereader(fp)
1673 1673
1674 1674 def scanwhile(first, p):
1675 1675 """scan lr while predicate holds"""
1676 1676 lines = [first]
1677 1677 while True:
1678 1678 line = lr.readline()
1679 1679 if not line:
1680 1680 break
1681 1681 if p(line):
1682 1682 lines.append(line)
1683 1683 else:
1684 1684 lr.push(line)
1685 1685 break
1686 1686 return lines
1687 1687
1688 1688 while True:
1689 1689 line = lr.readline()
1690 1690 if not line:
1691 1691 break
1692 1692 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1693 1693 def notheader(line):
1694 1694 s = line.split(None, 1)
1695 1695 return not s or s[0] not in ('---', 'diff')
1696 1696 header = scanwhile(line, notheader)
1697 1697 fromfile = lr.readline()
1698 1698 if fromfile.startswith('---'):
1699 1699 tofile = lr.readline()
1700 1700 header += [fromfile, tofile]
1701 1701 else:
1702 1702 lr.push(fromfile)
1703 1703 yield 'file', header
1704 1704 elif line[0] == ' ':
1705 1705 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1706 1706 elif line[0] in '-+':
1707 1707 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1708 1708 else:
1709 1709 m = lines_re.match(line)
1710 1710 if m:
1711 1711 yield 'range', m.groups()
1712 1712 else:
1713 1713 yield 'other', line
1714 1714
1715 1715 def scangitpatch(lr, firstline):
1716 1716 """
1717 1717 Git patches can emit:
1718 1718 - rename a to b
1719 1719 - change b
1720 1720 - copy a to c
1721 1721 - change c
1722 1722
1723 1723 We cannot apply this sequence as-is, the renamed 'a' could not be
1724 1724 found for it would have been renamed already. And we cannot copy
1725 1725 from 'b' instead because 'b' would have been changed already. So
1726 1726 we scan the git patch for copy and rename commands so we can
1727 1727 perform the copies ahead of time.
1728 1728 """
1729 1729 pos = 0
1730 1730 try:
1731 1731 pos = lr.fp.tell()
1732 1732 fp = lr.fp
1733 1733 except IOError:
1734 1734 fp = cStringIO.StringIO(lr.fp.read())
1735 1735 gitlr = linereader(fp)
1736 1736 gitlr.push(firstline)
1737 1737 gitpatches = readgitpatch(gitlr)
1738 1738 fp.seek(pos)
1739 1739 return gitpatches
1740 1740
1741 1741 def iterhunks(fp):
1742 1742 """Read a patch and yield the following events:
1743 1743 - ("file", afile, bfile, firsthunk): select a new target file.
1744 1744 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1745 1745 "file" event.
1746 1746 - ("git", gitchanges): current diff is in git format, gitchanges
1747 1747 maps filenames to gitpatch records. Unique event.
1748 1748 """
1749 1749 afile = ""
1750 1750 bfile = ""
1751 1751 state = None
1752 1752 hunknum = 0
1753 1753 emitfile = newfile = False
1754 1754 gitpatches = None
1755 1755
1756 1756 # our states
1757 1757 BFILE = 1
1758 1758 context = None
1759 1759 lr = linereader(fp)
1760 1760
1761 1761 while True:
1762 1762 x = lr.readline()
1763 1763 if not x:
1764 1764 break
1765 1765 if state == BFILE and (
1766 1766 (not context and x[0] == '@')
1767 1767 or (context is not False and x.startswith('***************'))
1768 1768 or x.startswith('GIT binary patch')):
1769 1769 gp = None
1770 1770 if (gitpatches and
1771 1771 gitpatches[-1].ispatching(afile, bfile)):
1772 1772 gp = gitpatches.pop()
1773 1773 if x.startswith('GIT binary patch'):
1774 1774 h = binhunk(lr, gp.path)
1775 1775 else:
1776 1776 if context is None and x.startswith('***************'):
1777 1777 context = True
1778 1778 h = hunk(x, hunknum + 1, lr, context)
1779 1779 hunknum += 1
1780 1780 if emitfile:
1781 1781 emitfile = False
1782 1782 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1783 1783 yield 'hunk', h
1784 1784 elif x.startswith('diff --git a/'):
1785 1785 m = gitre.match(x.rstrip(' \r\n'))
1786 1786 if not m:
1787 1787 continue
1788 1788 if gitpatches is None:
1789 1789 # scan whole input for git metadata
1790 1790 gitpatches = scangitpatch(lr, x)
1791 1791 yield 'git', [g.copy() for g in gitpatches
1792 1792 if g.op in ('COPY', 'RENAME')]
1793 1793 gitpatches.reverse()
1794 1794 afile = 'a/' + m.group(1)
1795 1795 bfile = 'b/' + m.group(2)
1796 1796 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1797 1797 gp = gitpatches.pop()
1798 1798 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1799 1799 if not gitpatches:
1800 1800 raise PatchError(_('failed to synchronize metadata for "%s"')
1801 1801 % afile[2:])
1802 1802 gp = gitpatches[-1]
1803 1803 newfile = True
1804 1804 elif x.startswith('---'):
1805 1805 # check for a unified diff
1806 1806 l2 = lr.readline()
1807 1807 if not l2.startswith('+++'):
1808 1808 lr.push(l2)
1809 1809 continue
1810 1810 newfile = True
1811 1811 context = False
1812 1812 afile = parsefilename(x)
1813 1813 bfile = parsefilename(l2)
1814 1814 elif x.startswith('***'):
1815 1815 # check for a context diff
1816 1816 l2 = lr.readline()
1817 1817 if not l2.startswith('---'):
1818 1818 lr.push(l2)
1819 1819 continue
1820 1820 l3 = lr.readline()
1821 1821 lr.push(l3)
1822 1822 if not l3.startswith("***************"):
1823 1823 lr.push(l2)
1824 1824 continue
1825 1825 newfile = True
1826 1826 context = True
1827 1827 afile = parsefilename(x)
1828 1828 bfile = parsefilename(l2)
1829 1829
1830 1830 if newfile:
1831 1831 newfile = False
1832 1832 emitfile = True
1833 1833 state = BFILE
1834 1834 hunknum = 0
1835 1835
1836 1836 while gitpatches:
1837 1837 gp = gitpatches.pop()
1838 1838 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1839 1839
1840 1840 def applybindelta(binchunk, data):
1841 1841 """Apply a binary delta hunk
1842 1842 The algorithm used is the algorithm from git's patch-delta.c
1843 1843 """
1844 1844 def deltahead(binchunk):
1845 1845 i = 0
1846 1846 for c in binchunk:
1847 1847 i += 1
1848 1848 if not (ord(c) & 0x80):
1849 1849 return i
1850 1850 return i
1851 1851 out = ""
1852 1852 s = deltahead(binchunk)
1853 1853 binchunk = binchunk[s:]
1854 1854 s = deltahead(binchunk)
1855 1855 binchunk = binchunk[s:]
1856 1856 i = 0
1857 1857 while i < len(binchunk):
1858 1858 cmd = ord(binchunk[i])
1859 1859 i += 1
1860 1860 if (cmd & 0x80):
1861 1861 offset = 0
1862 1862 size = 0
1863 1863 if (cmd & 0x01):
1864 1864 offset = ord(binchunk[i])
1865 1865 i += 1
1866 1866 if (cmd & 0x02):
1867 1867 offset |= ord(binchunk[i]) << 8
1868 1868 i += 1
1869 1869 if (cmd & 0x04):
1870 1870 offset |= ord(binchunk[i]) << 16
1871 1871 i += 1
1872 1872 if (cmd & 0x08):
1873 1873 offset |= ord(binchunk[i]) << 24
1874 1874 i += 1
1875 1875 if (cmd & 0x10):
1876 1876 size = ord(binchunk[i])
1877 1877 i += 1
1878 1878 if (cmd & 0x20):
1879 1879 size |= ord(binchunk[i]) << 8
1880 1880 i += 1
1881 1881 if (cmd & 0x40):
1882 1882 size |= ord(binchunk[i]) << 16
1883 1883 i += 1
1884 1884 if size == 0:
1885 1885 size = 0x10000
1886 1886 offset_end = offset + size
1887 1887 out += data[offset:offset_end]
1888 1888 elif cmd != 0:
1889 1889 offset_end = i + cmd
1890 1890 out += binchunk[i:offset_end]
1891 1891 i += cmd
1892 1892 else:
1893 1893 raise PatchError(_('unexpected delta opcode 0'))
1894 1894 return out
1895 1895
1896 1896 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1897 1897 """Reads a patch from fp and tries to apply it.
1898 1898
1899 1899 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1900 1900 there was any fuzz.
1901 1901
1902 1902 If 'eolmode' is 'strict', the patch content and patched file are
1903 1903 read in binary mode. Otherwise, line endings are ignored when
1904 1904 patching then normalized according to 'eolmode'.
1905 1905 """
1906 1906 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1907 1907 prefix=prefix, eolmode=eolmode)
1908 1908
1909 1909 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1910 1910 eolmode='strict'):
1911 1911
1912 1912 if prefix:
1913 1913 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1914 1914 prefix)
1915 1915 if prefix != '':
1916 1916 prefix += '/'
1917 1917 def pstrip(p):
1918 1918 return pathtransform(p, strip - 1, prefix)[1]
1919 1919
1920 1920 rejects = 0
1921 1921 err = 0
1922 1922 current_file = None
1923 1923
1924 1924 for state, values in iterhunks(fp):
1925 1925 if state == 'hunk':
1926 1926 if not current_file:
1927 1927 continue
1928 1928 ret = current_file.apply(values)
1929 1929 if ret > 0:
1930 1930 err = 1
1931 1931 elif state == 'file':
1932 1932 if current_file:
1933 1933 rejects += current_file.close()
1934 1934 current_file = None
1935 1935 afile, bfile, first_hunk, gp = values
1936 1936 if gp:
1937 1937 gp.path = pstrip(gp.path)
1938 1938 if gp.oldpath:
1939 1939 gp.oldpath = pstrip(gp.oldpath)
1940 1940 else:
1941 1941 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1942 1942 prefix)
1943 1943 if gp.op == 'RENAME':
1944 1944 backend.unlink(gp.oldpath)
1945 1945 if not first_hunk:
1946 1946 if gp.op == 'DELETE':
1947 1947 backend.unlink(gp.path)
1948 1948 continue
1949 1949 data, mode = None, None
1950 1950 if gp.op in ('RENAME', 'COPY'):
1951 1951 data, mode = store.getfile(gp.oldpath)[:2]
1952 1952 # FIXME: failing getfile has never been handled here
1953 1953 assert data is not None
1954 1954 if gp.mode:
1955 1955 mode = gp.mode
1956 1956 if gp.op == 'ADD':
1957 1957 # Added files without content have no hunk and
1958 1958 # must be created
1959 1959 data = ''
1960 1960 if data or mode:
1961 1961 if (gp.op in ('ADD', 'RENAME', 'COPY')
1962 1962 and backend.exists(gp.path)):
1963 1963 raise PatchError(_("cannot create %s: destination "
1964 1964 "already exists") % gp.path)
1965 1965 backend.setfile(gp.path, data, mode, gp.oldpath)
1966 1966 continue
1967 1967 try:
1968 1968 current_file = patcher(ui, gp, backend, store,
1969 1969 eolmode=eolmode)
1970 1970 except PatchError as inst:
1971 1971 ui.warn(str(inst) + '\n')
1972 1972 current_file = None
1973 1973 rejects += 1
1974 1974 continue
1975 1975 elif state == 'git':
1976 1976 for gp in values:
1977 1977 path = pstrip(gp.oldpath)
1978 1978 data, mode = backend.getfile(path)
1979 1979 if data is None:
1980 1980 # The error ignored here will trigger a getfile()
1981 1981 # error in a place more appropriate for error
1982 1982 # handling, and will not interrupt the patching
1983 1983 # process.
1984 1984 pass
1985 1985 else:
1986 1986 store.setfile(path, data, mode)
1987 1987 else:
1988 1988 raise error.Abort(_('unsupported parser state: %s') % state)
1989 1989
1990 1990 if current_file:
1991 1991 rejects += current_file.close()
1992 1992
1993 1993 if rejects:
1994 1994 return -1
1995 1995 return err
1996 1996
1997 1997 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1998 1998 similarity):
1999 1999 """use <patcher> to apply <patchname> to the working directory.
2000 2000 returns whether patch was applied with fuzz factor."""
2001 2001
2002 2002 fuzz = False
2003 2003 args = []
2004 2004 cwd = repo.root
2005 2005 if cwd:
2006 2006 args.append('-d %s' % util.shellquote(cwd))
2007 2007 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2008 2008 util.shellquote(patchname)))
2009 2009 try:
2010 2010 for line in fp:
2011 2011 line = line.rstrip()
2012 2012 ui.note(line + '\n')
2013 2013 if line.startswith('patching file '):
2014 2014 pf = util.parsepatchoutput(line)
2015 2015 printed_file = False
2016 2016 files.add(pf)
2017 2017 elif line.find('with fuzz') >= 0:
2018 2018 fuzz = True
2019 2019 if not printed_file:
2020 2020 ui.warn(pf + '\n')
2021 2021 printed_file = True
2022 2022 ui.warn(line + '\n')
2023 2023 elif line.find('saving rejects to file') >= 0:
2024 2024 ui.warn(line + '\n')
2025 2025 elif line.find('FAILED') >= 0:
2026 2026 if not printed_file:
2027 2027 ui.warn(pf + '\n')
2028 2028 printed_file = True
2029 2029 ui.warn(line + '\n')
2030 2030 finally:
2031 2031 if files:
2032 2032 scmutil.marktouched(repo, files, similarity)
2033 2033 code = fp.close()
2034 2034 if code:
2035 2035 raise PatchError(_("patch command failed: %s") %
2036 2036 util.explainexit(code)[0])
2037 2037 return fuzz
2038 2038
2039 2039 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2040 2040 eolmode='strict'):
2041 2041 if files is None:
2042 2042 files = set()
2043 2043 if eolmode is None:
2044 2044 eolmode = ui.config('patch', 'eol', 'strict')
2045 2045 if eolmode.lower() not in eolmodes:
2046 2046 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2047 2047 eolmode = eolmode.lower()
2048 2048
2049 2049 store = filestore()
2050 2050 try:
2051 2051 fp = open(patchobj, 'rb')
2052 2052 except TypeError:
2053 2053 fp = patchobj
2054 2054 try:
2055 2055 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2056 2056 eolmode=eolmode)
2057 2057 finally:
2058 2058 if fp != patchobj:
2059 2059 fp.close()
2060 2060 files.update(backend.close())
2061 2061 store.close()
2062 2062 if ret < 0:
2063 2063 raise PatchError(_('patch failed to apply'))
2064 2064 return ret > 0
2065 2065
2066 2066 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2067 2067 eolmode='strict', similarity=0):
2068 2068 """use builtin patch to apply <patchobj> to the working directory.
2069 2069 returns whether patch was applied with fuzz factor."""
2070 2070 backend = workingbackend(ui, repo, similarity)
2071 2071 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2072 2072
2073 2073 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2074 2074 eolmode='strict'):
2075 2075 backend = repobackend(ui, repo, ctx, store)
2076 2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077 2077
2078 2078 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2079 2079 similarity=0):
2080 2080 """Apply <patchname> to the working directory.
2081 2081
2082 2082 'eolmode' specifies how end of lines should be handled. It can be:
2083 2083 - 'strict': inputs are read in binary mode, EOLs are preserved
2084 2084 - 'crlf': EOLs are ignored when patching and reset to CRLF
2085 2085 - 'lf': EOLs are ignored when patching and reset to LF
2086 2086 - None: get it from user settings, default to 'strict'
2087 2087 'eolmode' is ignored when using an external patcher program.
2088 2088
2089 2089 Returns whether patch was applied with fuzz factor.
2090 2090 """
2091 2091 patcher = ui.config('ui', 'patch')
2092 2092 if files is None:
2093 2093 files = set()
2094 2094 if patcher:
2095 2095 return _externalpatch(ui, repo, patcher, patchname, strip,
2096 2096 files, similarity)
2097 2097 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2098 2098 similarity)
2099 2099
2100 2100 def changedfiles(ui, repo, patchpath, strip=1):
2101 2101 backend = fsbackend(ui, repo.root)
2102 2102 with open(patchpath, 'rb') as fp:
2103 2103 changed = set()
2104 2104 for state, values in iterhunks(fp):
2105 2105 if state == 'file':
2106 2106 afile, bfile, first_hunk, gp = values
2107 2107 if gp:
2108 2108 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2109 2109 if gp.oldpath:
2110 2110 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2111 2111 else:
2112 2112 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2113 2113 '')
2114 2114 changed.add(gp.path)
2115 2115 if gp.op == 'RENAME':
2116 2116 changed.add(gp.oldpath)
2117 2117 elif state not in ('hunk', 'git'):
2118 2118 raise error.Abort(_('unsupported parser state: %s') % state)
2119 2119 return changed
2120 2120
2121 2121 class GitDiffRequired(Exception):
2122 2122 pass
2123 2123
2124 2124 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2125 2125 '''return diffopts with all features supported and parsed'''
2126 2126 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2127 2127 git=True, whitespace=True, formatchanging=True)
2128 2128
2129 2129 diffopts = diffallopts
2130 2130
2131 2131 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2132 2132 whitespace=False, formatchanging=False):
2133 2133 '''return diffopts with only opted-in features parsed
2134 2134
2135 2135 Features:
2136 2136 - git: git-style diffs
2137 2137 - whitespace: whitespace options like ignoreblanklines and ignorews
2138 2138 - formatchanging: options that will likely break or cause correctness issues
2139 2139 with most diff parsers
2140 2140 '''
2141 2141 def get(key, name=None, getter=ui.configbool, forceplain=None):
2142 2142 if opts:
2143 2143 v = opts.get(key)
2144 2144 if v:
2145 2145 return v
2146 2146 if forceplain is not None and ui.plain():
2147 2147 return forceplain
2148 2148 return getter(section, name or key, None, untrusted=untrusted)
2149 2149
2150 2150 # core options, expected to be understood by every diff parser
2151 2151 buildopts = {
2152 2152 'nodates': get('nodates'),
2153 2153 'showfunc': get('show_function', 'showfunc'),
2154 2154 'context': get('unified', getter=ui.config),
2155 2155 }
2156 2156
2157 2157 if git:
2158 2158 buildopts['git'] = get('git')
2159 2159 if whitespace:
2160 2160 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2161 2161 buildopts['ignorewsamount'] = get('ignore_space_change',
2162 2162 'ignorewsamount')
2163 2163 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2164 2164 'ignoreblanklines')
2165 2165 if formatchanging:
2166 2166 buildopts['text'] = opts and opts.get('text')
2167 2167 buildopts['nobinary'] = get('nobinary', forceplain=False)
2168 2168 buildopts['noprefix'] = get('noprefix', forceplain=False)
2169 2169
2170 2170 return mdiff.diffopts(**buildopts)
2171 2171
2172 2172 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2173 2173 losedatafn=None, prefix='', relroot=''):
2174 2174 '''yields diff of changes to files between two nodes, or node and
2175 2175 working directory.
2176 2176
2177 2177 if node1 is None, use first dirstate parent instead.
2178 2178 if node2 is None, compare node1 with working directory.
2179 2179
2180 2180 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2181 2181 every time some change cannot be represented with the current
2182 2182 patch format. Return False to upgrade to git patch format, True to
2183 2183 accept the loss or raise an exception to abort the diff. It is
2184 2184 called with the name of current file being diffed as 'fn'. If set
2185 2185 to None, patches will always be upgraded to git format when
2186 2186 necessary.
2187 2187
2188 2188 prefix is a filename prefix that is prepended to all filenames on
2189 2189 display (used for subrepos).
2190 2190
2191 2191 relroot, if not empty, must be normalized with a trailing /. Any match
2192 2192 patterns that fall outside it will be ignored.'''
2193 2193
2194 2194 if opts is None:
2195 2195 opts = mdiff.defaultopts
2196 2196
2197 2197 if not node1 and not node2:
2198 2198 node1 = repo.dirstate.p1()
2199 2199
2200 2200 def lrugetfilectx():
2201 2201 cache = {}
2202 2202 order = collections.deque()
2203 2203 def getfilectx(f, ctx):
2204 2204 fctx = ctx.filectx(f, filelog=cache.get(f))
2205 2205 if f not in cache:
2206 2206 if len(cache) > 20:
2207 2207 del cache[order.popleft()]
2208 2208 cache[f] = fctx.filelog()
2209 2209 else:
2210 2210 order.remove(f)
2211 2211 order.append(f)
2212 2212 return fctx
2213 2213 return getfilectx
2214 2214 getfilectx = lrugetfilectx()
2215 2215
2216 2216 ctx1 = repo[node1]
2217 2217 ctx2 = repo[node2]
2218 2218
2219 2219 relfiltered = False
2220 2220 if relroot != '' and match.always():
2221 2221 # as a special case, create a new matcher with just the relroot
2222 2222 pats = [relroot]
2223 2223 match = scmutil.match(ctx2, pats, default='path')
2224 2224 relfiltered = True
2225 2225
2226 2226 if not changes:
2227 2227 changes = repo.status(ctx1, ctx2, match=match)
2228 2228 modified, added, removed = changes[:3]
2229 2229
2230 2230 if not modified and not added and not removed:
2231 2231 return []
2232 2232
2233 2233 if repo.ui.debugflag:
2234 2234 hexfunc = hex
2235 2235 else:
2236 2236 hexfunc = short
2237 2237 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2238 2238
2239 2239 copy = {}
2240 2240 if opts.git or opts.upgrade:
2241 2241 copy = copies.pathcopies(ctx1, ctx2, match=match)
2242 2242
2243 2243 if relroot is not None:
2244 2244 if not relfiltered:
2245 2245 # XXX this would ideally be done in the matcher, but that is
2246 2246 # generally meant to 'or' patterns, not 'and' them. In this case we
2247 2247 # need to 'and' all the patterns from the matcher with relroot.
2248 2248 def filterrel(l):
2249 2249 return [f for f in l if f.startswith(relroot)]
2250 2250 modified = filterrel(modified)
2251 2251 added = filterrel(added)
2252 2252 removed = filterrel(removed)
2253 2253 relfiltered = True
2254 2254 # filter out copies where either side isn't inside the relative root
2255 2255 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2256 2256 if dst.startswith(relroot)
2257 2257 and src.startswith(relroot)))
2258 2258
2259 2259 modifiedset = set(modified)
2260 2260 addedset = set(added)
2261 2261 removedset = set(removed)
2262 2262 for f in modified:
2263 2263 if f not in ctx1:
2264 2264 # Fix up added, since merged-in additions appear as
2265 2265 # modifications during merges
2266 2266 modifiedset.remove(f)
2267 2267 addedset.add(f)
2268 2268 for f in removed:
2269 2269 if f not in ctx1:
2270 2270 # Merged-in additions that are then removed are reported as removed.
2271 2271 # They are not in ctx1, so We don't want to show them in the diff.
2272 2272 removedset.remove(f)
2273 2273 modified = sorted(modifiedset)
2274 2274 added = sorted(addedset)
2275 2275 removed = sorted(removedset)
2276 for dst, src in copy.items():
2277 if src not in ctx1:
2278 # Files merged in during a merge and then copied/renamed are
2279 # reported as copies. We want to show them in the diff as additions.
2280 del copy[dst]
2276 2281
2277 2282 def difffn(opts, losedata):
2278 2283 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2279 2284 copy, getfilectx, opts, losedata, prefix, relroot)
2280 2285 if opts.upgrade and not opts.git:
2281 2286 try:
2282 2287 def losedata(fn):
2283 2288 if not losedatafn or not losedatafn(fn=fn):
2284 2289 raise GitDiffRequired
2285 2290 # Buffer the whole output until we are sure it can be generated
2286 2291 return list(difffn(opts.copy(git=False), losedata))
2287 2292 except GitDiffRequired:
2288 2293 return difffn(opts.copy(git=True), None)
2289 2294 else:
2290 2295 return difffn(opts, None)
2291 2296
2292 2297 def difflabel(func, *args, **kw):
2293 2298 '''yields 2-tuples of (output, label) based on the output of func()'''
2294 2299 headprefixes = [('diff', 'diff.diffline'),
2295 2300 ('copy', 'diff.extended'),
2296 2301 ('rename', 'diff.extended'),
2297 2302 ('old', 'diff.extended'),
2298 2303 ('new', 'diff.extended'),
2299 2304 ('deleted', 'diff.extended'),
2300 2305 ('---', 'diff.file_a'),
2301 2306 ('+++', 'diff.file_b')]
2302 2307 textprefixes = [('@', 'diff.hunk'),
2303 2308 ('-', 'diff.deleted'),
2304 2309 ('+', 'diff.inserted')]
2305 2310 head = False
2306 2311 for chunk in func(*args, **kw):
2307 2312 lines = chunk.split('\n')
2308 2313 for i, line in enumerate(lines):
2309 2314 if i != 0:
2310 2315 yield ('\n', '')
2311 2316 if head:
2312 2317 if line.startswith('@'):
2313 2318 head = False
2314 2319 else:
2315 2320 if line and line[0] not in ' +-@\\':
2316 2321 head = True
2317 2322 stripline = line
2318 2323 diffline = False
2319 2324 if not head and line and line[0] in '+-':
2320 2325 # highlight tabs and trailing whitespace, but only in
2321 2326 # changed lines
2322 2327 stripline = line.rstrip()
2323 2328 diffline = True
2324 2329
2325 2330 prefixes = textprefixes
2326 2331 if head:
2327 2332 prefixes = headprefixes
2328 2333 for prefix, label in prefixes:
2329 2334 if stripline.startswith(prefix):
2330 2335 if diffline:
2331 2336 for token in tabsplitter.findall(stripline):
2332 2337 if '\t' == token[0]:
2333 2338 yield (token, 'diff.tab')
2334 2339 else:
2335 2340 yield (token, label)
2336 2341 else:
2337 2342 yield (stripline, label)
2338 2343 break
2339 2344 else:
2340 2345 yield (line, '')
2341 2346 if line != stripline:
2342 2347 yield (line[len(stripline):], 'diff.trailingwhitespace')
2343 2348
2344 2349 def diffui(*args, **kw):
2345 2350 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2346 2351 return difflabel(diff, *args, **kw)
2347 2352
2348 2353 def _filepairs(modified, added, removed, copy, opts):
2349 2354 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2350 2355 before and f2 is the the name after. For added files, f1 will be None,
2351 2356 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2352 2357 or 'rename' (the latter two only if opts.git is set).'''
2353 2358 gone = set()
2354 2359
2355 2360 copyto = dict([(v, k) for k, v in copy.items()])
2356 2361
2357 2362 addedset, removedset = set(added), set(removed)
2358 2363
2359 2364 for f in sorted(modified + added + removed):
2360 2365 copyop = None
2361 2366 f1, f2 = f, f
2362 2367 if f in addedset:
2363 2368 f1 = None
2364 2369 if f in copy:
2365 2370 if opts.git:
2366 2371 f1 = copy[f]
2367 2372 if f1 in removedset and f1 not in gone:
2368 2373 copyop = 'rename'
2369 2374 gone.add(f1)
2370 2375 else:
2371 2376 copyop = 'copy'
2372 2377 elif f in removedset:
2373 2378 f2 = None
2374 2379 if opts.git:
2375 2380 # have we already reported a copy above?
2376 2381 if (f in copyto and copyto[f] in addedset
2377 2382 and copy[copyto[f]] == f):
2378 2383 continue
2379 2384 yield f1, f2, copyop
2380 2385
2381 2386 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2382 2387 copy, getfilectx, opts, losedatafn, prefix, relroot):
2383 2388 '''given input data, generate a diff and yield it in blocks
2384 2389
2385 2390 If generating a diff would lose data like flags or binary data and
2386 2391 losedatafn is not None, it will be called.
2387 2392
2388 2393 relroot is removed and prefix is added to every path in the diff output.
2389 2394
2390 2395 If relroot is not empty, this function expects every path in modified,
2391 2396 added, removed and copy to start with it.'''
2392 2397
2393 2398 def gitindex(text):
2394 2399 if not text:
2395 2400 text = ""
2396 2401 l = len(text)
2397 2402 s = util.sha1('blob %d\0' % l)
2398 2403 s.update(text)
2399 2404 return s.hexdigest()
2400 2405
2401 2406 if opts.noprefix:
2402 2407 aprefix = bprefix = ''
2403 2408 else:
2404 2409 aprefix = 'a/'
2405 2410 bprefix = 'b/'
2406 2411
2407 2412 def diffline(f, revs):
2408 2413 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2409 2414 return 'diff %s %s' % (revinfo, f)
2410 2415
2411 2416 date1 = util.datestr(ctx1.date())
2412 2417 date2 = util.datestr(ctx2.date())
2413 2418
2414 2419 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2415 2420
2416 2421 if relroot != '' and (repo.ui.configbool('devel', 'all')
2417 2422 or repo.ui.configbool('devel', 'check-relroot')):
2418 2423 for f in modified + added + removed + copy.keys() + copy.values():
2419 2424 if f is not None and not f.startswith(relroot):
2420 2425 raise AssertionError(
2421 2426 "file %s doesn't start with relroot %s" % (f, relroot))
2422 2427
2423 2428 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2424 2429 content1 = None
2425 2430 content2 = None
2426 2431 flag1 = None
2427 2432 flag2 = None
2428 2433 if f1:
2429 2434 content1 = getfilectx(f1, ctx1).data()
2430 2435 if opts.git or losedatafn:
2431 2436 flag1 = ctx1.flags(f1)
2432 2437 if f2:
2433 2438 content2 = getfilectx(f2, ctx2).data()
2434 2439 if opts.git or losedatafn:
2435 2440 flag2 = ctx2.flags(f2)
2436 2441 binary = False
2437 2442 if opts.git or losedatafn:
2438 2443 binary = util.binary(content1) or util.binary(content2)
2439 2444
2440 2445 if losedatafn and not opts.git:
2441 2446 if (binary or
2442 2447 # copy/rename
2443 2448 f2 in copy or
2444 2449 # empty file creation
2445 2450 (not f1 and not content2) or
2446 2451 # empty file deletion
2447 2452 (not content1 and not f2) or
2448 2453 # create with flags
2449 2454 (not f1 and flag2) or
2450 2455 # change flags
2451 2456 (f1 and f2 and flag1 != flag2)):
2452 2457 losedatafn(f2 or f1)
2453 2458
2454 2459 path1 = f1 or f2
2455 2460 path2 = f2 or f1
2456 2461 path1 = posixpath.join(prefix, path1[len(relroot):])
2457 2462 path2 = posixpath.join(prefix, path2[len(relroot):])
2458 2463 header = []
2459 2464 if opts.git:
2460 2465 header.append('diff --git %s%s %s%s' %
2461 2466 (aprefix, path1, bprefix, path2))
2462 2467 if not f1: # added
2463 2468 header.append('new file mode %s' % gitmode[flag2])
2464 2469 elif not f2: # removed
2465 2470 header.append('deleted file mode %s' % gitmode[flag1])
2466 2471 else: # modified/copied/renamed
2467 2472 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2468 2473 if mode1 != mode2:
2469 2474 header.append('old mode %s' % mode1)
2470 2475 header.append('new mode %s' % mode2)
2471 2476 if copyop is not None:
2472 2477 header.append('%s from %s' % (copyop, path1))
2473 2478 header.append('%s to %s' % (copyop, path2))
2474 2479 elif revs and not repo.ui.quiet:
2475 2480 header.append(diffline(path1, revs))
2476 2481
2477 2482 if binary and opts.git and not opts.nobinary:
2478 2483 text = mdiff.b85diff(content1, content2)
2479 2484 if text:
2480 2485 header.append('index %s..%s' %
2481 2486 (gitindex(content1), gitindex(content2)))
2482 2487 else:
2483 2488 text = mdiff.unidiff(content1, date1,
2484 2489 content2, date2,
2485 2490 path1, path2, opts=opts)
2486 2491 if header and (text or len(header) > 1):
2487 2492 yield '\n'.join(header) + '\n'
2488 2493 if text:
2489 2494 yield text
2490 2495
2491 2496 def diffstatsum(stats):
2492 2497 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2493 2498 for f, a, r, b in stats:
2494 2499 maxfile = max(maxfile, encoding.colwidth(f))
2495 2500 maxtotal = max(maxtotal, a + r)
2496 2501 addtotal += a
2497 2502 removetotal += r
2498 2503 binary = binary or b
2499 2504
2500 2505 return maxfile, maxtotal, addtotal, removetotal, binary
2501 2506
2502 2507 def diffstatdata(lines):
2503 2508 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2504 2509
2505 2510 results = []
2506 2511 filename, adds, removes, isbinary = None, 0, 0, False
2507 2512
2508 2513 def addresult():
2509 2514 if filename:
2510 2515 results.append((filename, adds, removes, isbinary))
2511 2516
2512 2517 for line in lines:
2513 2518 if line.startswith('diff'):
2514 2519 addresult()
2515 2520 # set numbers to 0 anyway when starting new file
2516 2521 adds, removes, isbinary = 0, 0, False
2517 2522 if line.startswith('diff --git a/'):
2518 2523 filename = gitre.search(line).group(2)
2519 2524 elif line.startswith('diff -r'):
2520 2525 # format: "diff -r ... -r ... filename"
2521 2526 filename = diffre.search(line).group(1)
2522 2527 elif line.startswith('+') and not line.startswith('+++ '):
2523 2528 adds += 1
2524 2529 elif line.startswith('-') and not line.startswith('--- '):
2525 2530 removes += 1
2526 2531 elif (line.startswith('GIT binary patch') or
2527 2532 line.startswith('Binary file')):
2528 2533 isbinary = True
2529 2534 addresult()
2530 2535 return results
2531 2536
2532 2537 def diffstat(lines, width=80, git=False):
2533 2538 output = []
2534 2539 stats = diffstatdata(lines)
2535 2540 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2536 2541
2537 2542 countwidth = len(str(maxtotal))
2538 2543 if hasbinary and countwidth < 3:
2539 2544 countwidth = 3
2540 2545 graphwidth = width - countwidth - maxname - 6
2541 2546 if graphwidth < 10:
2542 2547 graphwidth = 10
2543 2548
2544 2549 def scale(i):
2545 2550 if maxtotal <= graphwidth:
2546 2551 return i
2547 2552 # If diffstat runs out of room it doesn't print anything,
2548 2553 # which isn't very useful, so always print at least one + or -
2549 2554 # if there were at least some changes.
2550 2555 return max(i * graphwidth // maxtotal, int(bool(i)))
2551 2556
2552 2557 for filename, adds, removes, isbinary in stats:
2553 2558 if isbinary:
2554 2559 count = 'Bin'
2555 2560 else:
2556 2561 count = adds + removes
2557 2562 pluses = '+' * scale(adds)
2558 2563 minuses = '-' * scale(removes)
2559 2564 output.append(' %s%s | %*s %s%s\n' %
2560 2565 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2561 2566 countwidth, count, pluses, minuses))
2562 2567
2563 2568 if stats:
2564 2569 output.append(_(' %d files changed, %d insertions(+), '
2565 2570 '%d deletions(-)\n')
2566 2571 % (len(stats), totaladds, totalremoves))
2567 2572
2568 2573 return ''.join(output)
2569 2574
2570 2575 def diffstatui(*args, **kw):
2571 2576 '''like diffstat(), but yields 2-tuples of (output, label) for
2572 2577 ui.write()
2573 2578 '''
2574 2579
2575 2580 for line in diffstat(*args, **kw).splitlines():
2576 2581 if line and line[-1] in '+-':
2577 2582 name, graph = line.rsplit(' ', 1)
2578 2583 yield (name + ' ', '')
2579 2584 m = re.search(r'\++', graph)
2580 2585 if m:
2581 2586 yield (m.group(0), 'diffstat.inserted')
2582 2587 m = re.search(r'-+', graph)
2583 2588 if m:
2584 2589 yield (m.group(0), 'diffstat.deleted')
2585 2590 else:
2586 2591 yield (line, '')
2587 2592 yield ('\n', '')
@@ -1,70 +1,77 b''
1 1 $ hg init
2 2 $ touch a
3 3 $ hg add a
4 4 $ hg ci -m "a"
5 5
6 6 $ echo 123 > b
7 7 $ hg add b
8 8 $ hg diff --nodates
9 9 diff -r 3903775176ed b
10 10 --- /dev/null
11 11 +++ b/b
12 12 @@ -0,0 +1,1 @@
13 13 +123
14 14
15 15 $ hg diff --nodates -r tip
16 16 diff -r 3903775176ed b
17 17 --- /dev/null
18 18 +++ b/b
19 19 @@ -0,0 +1,1 @@
20 20 +123
21 21
22 22 $ echo foo > a
23 23 $ hg diff --nodates
24 24 diff -r 3903775176ed a
25 25 --- a/a
26 26 +++ b/a
27 27 @@ -0,0 +1,1 @@
28 28 +foo
29 29 diff -r 3903775176ed b
30 30 --- /dev/null
31 31 +++ b/b
32 32 @@ -0,0 +1,1 @@
33 33 +123
34 34
35 35 $ hg diff -r ""
36 36 hg: parse error: empty query
37 37 [255]
38 38 $ hg diff -r tip -r ""
39 39 hg: parse error: empty query
40 40 [255]
41 41
42 42 Remove a file that was added via merge. Since the file is not in parent 1,
43 43 it should not be in the diff.
44 44
45 45 $ hg ci -m 'a=foo' a
46 46 $ hg co -Cq null
47 47 $ echo 123 > b
48 48 $ hg add b
49 49 $ hg ci -m "b"
50 50 created new head
51 51 $ hg merge 1
52 52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 53 (branch merge, don't forget to commit)
54 54 $ hg rm -f a
55 55 $ hg diff --nodates
56 56
57 57 Rename a file that was added via merge. Since the rename source is not in
58 58 parent 1, the diff should be relative to /dev/null
59 59
60 60 $ hg co -Cq 2
61 61 $ hg merge 1
62 62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 63 (branch merge, don't forget to commit)
64 64 $ hg mv a a2
65 65 $ hg diff --nodates
66 66 diff -r cf44b38435e5 a2
67 67 --- /dev/null
68 68 +++ b/a2
69 69 @@ -0,0 +1,1 @@
70 70 +foo
71 $ hg diff --nodates --git
72 diff --git a/a2 b/a2
73 new file mode 100644
74 --- /dev/null
75 +++ b/a2
76 @@ -0,0 +1,1 @@
77 +foo
General Comments 0
You need to be logged in to leave comments. Login now