##// END OF EJS Templates
patch: reverse _inlinediff output for consistency
Matthieu Laneuville -
r35311:10cce12f default
parent child Browse files
Show More
@@ -1,2895 +1,2895 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import copy
13 13 import difflib
14 14 import email
15 15 import errno
16 16 import hashlib
17 17 import os
18 18 import posixpath
19 19 import re
20 20 import shutil
21 21 import tempfile
22 22 import zlib
23 23
24 24 from .i18n import _
25 25 from .node import (
26 26 hex,
27 27 short,
28 28 )
29 29 from . import (
30 30 copies,
31 31 encoding,
32 32 error,
33 33 mail,
34 34 mdiff,
35 35 pathutil,
36 36 policy,
37 37 pycompat,
38 38 scmutil,
39 39 similar,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43
44 44 diffhelpers = policy.importmod(r'diffhelpers')
45 45 stringio = util.stringio
46 46
47 47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 49
50 50 PatchError = error.PatchError
51 51
52 52 # public functions
53 53
54 54 def split(stream):
55 55 '''return an iterator of individual patches from a stream'''
56 56 def isheader(line, inheader):
57 57 if inheader and line[0] in (' ', '\t'):
58 58 # continuation
59 59 return True
60 60 if line[0] in (' ', '-', '+'):
61 61 # diff line - don't check for header pattern in there
62 62 return False
63 63 l = line.split(': ', 1)
64 64 return len(l) == 2 and ' ' not in l[0]
65 65
66 66 def chunk(lines):
67 67 return stringio(''.join(lines))
68 68
69 69 def hgsplit(stream, cur):
70 70 inheader = True
71 71
72 72 for line in stream:
73 73 if not line.strip():
74 74 inheader = False
75 75 if not inheader and line.startswith('# HG changeset patch'):
76 76 yield chunk(cur)
77 77 cur = []
78 78 inheader = True
79 79
80 80 cur.append(line)
81 81
82 82 if cur:
83 83 yield chunk(cur)
84 84
85 85 def mboxsplit(stream, cur):
86 86 for line in stream:
87 87 if line.startswith('From '):
88 88 for c in split(chunk(cur[1:])):
89 89 yield c
90 90 cur = []
91 91
92 92 cur.append(line)
93 93
94 94 if cur:
95 95 for c in split(chunk(cur[1:])):
96 96 yield c
97 97
98 98 def mimesplit(stream, cur):
99 99 def msgfp(m):
100 100 fp = stringio()
101 101 g = email.Generator.Generator(fp, mangle_from_=False)
102 102 g.flatten(m)
103 103 fp.seek(0)
104 104 return fp
105 105
106 106 for line in stream:
107 107 cur.append(line)
108 108 c = chunk(cur)
109 109
110 110 m = email.Parser.Parser().parse(c)
111 111 if not m.is_multipart():
112 112 yield msgfp(m)
113 113 else:
114 114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 115 for part in m.walk():
116 116 ct = part.get_content_type()
117 117 if ct not in ok_types:
118 118 continue
119 119 yield msgfp(part)
120 120
121 121 def headersplit(stream, cur):
122 122 inheader = False
123 123
124 124 for line in stream:
125 125 if not inheader and isheader(line, inheader):
126 126 yield chunk(cur)
127 127 cur = []
128 128 inheader = True
129 129 if inheader and not isheader(line, inheader):
130 130 inheader = False
131 131
132 132 cur.append(line)
133 133
134 134 if cur:
135 135 yield chunk(cur)
136 136
137 137 def remainder(cur):
138 138 yield chunk(cur)
139 139
140 140 class fiter(object):
141 141 def __init__(self, fp):
142 142 self.fp = fp
143 143
144 144 def __iter__(self):
145 145 return self
146 146
147 147 def next(self):
148 148 l = self.fp.readline()
149 149 if not l:
150 150 raise StopIteration
151 151 return l
152 152
153 153 __next__ = next
154 154
155 155 inheader = False
156 156 cur = []
157 157
158 158 mimeheaders = ['content-type']
159 159
160 160 if not util.safehasattr(stream, 'next'):
161 161 # http responses, for example, have readline but not next
162 162 stream = fiter(stream)
163 163
164 164 for line in stream:
165 165 cur.append(line)
166 166 if line.startswith('# HG changeset patch'):
167 167 return hgsplit(stream, cur)
168 168 elif line.startswith('From '):
169 169 return mboxsplit(stream, cur)
170 170 elif isheader(line, inheader):
171 171 inheader = True
172 172 if line.split(':', 1)[0].lower() in mimeheaders:
173 173 # let email parser handle this
174 174 return mimesplit(stream, cur)
175 175 elif line.startswith('--- ') and inheader:
176 176 # No evil headers seen by diff start, split by hand
177 177 return headersplit(stream, cur)
178 178 # Not enough info, keep reading
179 179
180 180 # if we are here, we have a very plain patch
181 181 return remainder(cur)
182 182
183 183 ## Some facility for extensible patch parsing:
184 184 # list of pairs ("header to match", "data key")
185 185 patchheadermap = [('Date', 'date'),
186 186 ('Branch', 'branch'),
187 187 ('Node ID', 'nodeid'),
188 188 ]
189 189
190 190 def extract(ui, fileobj):
191 191 '''extract patch from data read from fileobj.
192 192
193 193 patch can be a normal patch or contained in an email message.
194 194
195 195 return a dictionary. Standard keys are:
196 196 - filename,
197 197 - message,
198 198 - user,
199 199 - date,
200 200 - branch,
201 201 - node,
202 202 - p1,
203 203 - p2.
204 204 Any item can be missing from the dictionary. If filename is missing,
205 205 fileobj did not contain a patch. Caller must unlink filename when done.'''
206 206
207 207 # attempt to detect the start of a patch
208 208 # (this heuristic is borrowed from quilt)
209 209 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
210 210 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
211 211 br'---[ \t].*?^\+\+\+[ \t]|'
212 212 br'\*\*\*[ \t].*?^---[ \t])',
213 213 re.MULTILINE | re.DOTALL)
214 214
215 215 data = {}
216 216 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
217 217 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
218 218 try:
219 219 msg = email.Parser.Parser().parse(fileobj)
220 220
221 221 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
222 222 data['user'] = msg['From'] and mail.headdecode(msg['From'])
223 223 if not subject and not data['user']:
224 224 # Not an email, restore parsed headers if any
225 225 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
226 226
227 227 # should try to parse msg['Date']
228 228 parents = []
229 229
230 230 if subject:
231 231 if subject.startswith('[PATCH'):
232 232 pend = subject.find(']')
233 233 if pend >= 0:
234 234 subject = subject[pend + 1:].lstrip()
235 235 subject = re.sub(br'\n[ \t]+', ' ', subject)
236 236 ui.debug('Subject: %s\n' % subject)
237 237 if data['user']:
238 238 ui.debug('From: %s\n' % data['user'])
239 239 diffs_seen = 0
240 240 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
241 241 message = ''
242 242 for part in msg.walk():
243 243 content_type = part.get_content_type()
244 244 ui.debug('Content-Type: %s\n' % content_type)
245 245 if content_type not in ok_types:
246 246 continue
247 247 payload = part.get_payload(decode=True)
248 248 m = diffre.search(payload)
249 249 if m:
250 250 hgpatch = False
251 251 hgpatchheader = False
252 252 ignoretext = False
253 253
254 254 ui.debug('found patch at byte %d\n' % m.start(0))
255 255 diffs_seen += 1
256 256 cfp = stringio()
257 257 for line in payload[:m.start(0)].splitlines():
258 258 if line.startswith('# HG changeset patch') and not hgpatch:
259 259 ui.debug('patch generated by hg export\n')
260 260 hgpatch = True
261 261 hgpatchheader = True
262 262 # drop earlier commit message content
263 263 cfp.seek(0)
264 264 cfp.truncate()
265 265 subject = None
266 266 elif hgpatchheader:
267 267 if line.startswith('# User '):
268 268 data['user'] = line[7:]
269 269 ui.debug('From: %s\n' % data['user'])
270 270 elif line.startswith("# Parent "):
271 271 parents.append(line[9:].lstrip())
272 272 elif line.startswith("# "):
273 273 for header, key in patchheadermap:
274 274 prefix = '# %s ' % header
275 275 if line.startswith(prefix):
276 276 data[key] = line[len(prefix):]
277 277 else:
278 278 hgpatchheader = False
279 279 elif line == '---':
280 280 ignoretext = True
281 281 if not hgpatchheader and not ignoretext:
282 282 cfp.write(line)
283 283 cfp.write('\n')
284 284 message = cfp.getvalue()
285 285 if tmpfp:
286 286 tmpfp.write(payload)
287 287 if not payload.endswith('\n'):
288 288 tmpfp.write('\n')
289 289 elif not diffs_seen and message and content_type == 'text/plain':
290 290 message += '\n' + payload
291 291 except: # re-raises
292 292 tmpfp.close()
293 293 os.unlink(tmpname)
294 294 raise
295 295
296 296 if subject and not message.startswith(subject):
297 297 message = '%s\n%s' % (subject, message)
298 298 data['message'] = message
299 299 tmpfp.close()
300 300 if parents:
301 301 data['p1'] = parents.pop(0)
302 302 if parents:
303 303 data['p2'] = parents.pop(0)
304 304
305 305 if diffs_seen:
306 306 data['filename'] = tmpname
307 307 else:
308 308 os.unlink(tmpname)
309 309 return data
310 310
311 311 class patchmeta(object):
312 312 """Patched file metadata
313 313
314 314 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
315 315 or COPY. 'path' is patched file path. 'oldpath' is set to the
316 316 origin file when 'op' is either COPY or RENAME, None otherwise. If
317 317 file mode is changed, 'mode' is a tuple (islink, isexec) where
318 318 'islink' is True if the file is a symlink and 'isexec' is True if
319 319 the file is executable. Otherwise, 'mode' is None.
320 320 """
321 321 def __init__(self, path):
322 322 self.path = path
323 323 self.oldpath = None
324 324 self.mode = None
325 325 self.op = 'MODIFY'
326 326 self.binary = False
327 327
328 328 def setmode(self, mode):
329 329 islink = mode & 0o20000
330 330 isexec = mode & 0o100
331 331 self.mode = (islink, isexec)
332 332
333 333 def copy(self):
334 334 other = patchmeta(self.path)
335 335 other.oldpath = self.oldpath
336 336 other.mode = self.mode
337 337 other.op = self.op
338 338 other.binary = self.binary
339 339 return other
340 340
341 341 def _ispatchinga(self, afile):
342 342 if afile == '/dev/null':
343 343 return self.op == 'ADD'
344 344 return afile == 'a/' + (self.oldpath or self.path)
345 345
346 346 def _ispatchingb(self, bfile):
347 347 if bfile == '/dev/null':
348 348 return self.op == 'DELETE'
349 349 return bfile == 'b/' + self.path
350 350
351 351 def ispatching(self, afile, bfile):
352 352 return self._ispatchinga(afile) and self._ispatchingb(bfile)
353 353
354 354 def __repr__(self):
355 355 return "<patchmeta %s %r>" % (self.op, self.path)
356 356
357 357 def readgitpatch(lr):
358 358 """extract git-style metadata about patches from <patchname>"""
359 359
360 360 # Filter patch for git information
361 361 gp = None
362 362 gitpatches = []
363 363 for line in lr:
364 364 line = line.rstrip(' \r\n')
365 365 if line.startswith('diff --git a/'):
366 366 m = gitre.match(line)
367 367 if m:
368 368 if gp:
369 369 gitpatches.append(gp)
370 370 dst = m.group(2)
371 371 gp = patchmeta(dst)
372 372 elif gp:
373 373 if line.startswith('--- '):
374 374 gitpatches.append(gp)
375 375 gp = None
376 376 continue
377 377 if line.startswith('rename from '):
378 378 gp.op = 'RENAME'
379 379 gp.oldpath = line[12:]
380 380 elif line.startswith('rename to '):
381 381 gp.path = line[10:]
382 382 elif line.startswith('copy from '):
383 383 gp.op = 'COPY'
384 384 gp.oldpath = line[10:]
385 385 elif line.startswith('copy to '):
386 386 gp.path = line[8:]
387 387 elif line.startswith('deleted file'):
388 388 gp.op = 'DELETE'
389 389 elif line.startswith('new file mode '):
390 390 gp.op = 'ADD'
391 391 gp.setmode(int(line[-6:], 8))
392 392 elif line.startswith('new mode '):
393 393 gp.setmode(int(line[-6:], 8))
394 394 elif line.startswith('GIT binary patch'):
395 395 gp.binary = True
396 396 if gp:
397 397 gitpatches.append(gp)
398 398
399 399 return gitpatches
400 400
401 401 class linereader(object):
402 402 # simple class to allow pushing lines back into the input stream
403 403 def __init__(self, fp):
404 404 self.fp = fp
405 405 self.buf = []
406 406
407 407 def push(self, line):
408 408 if line is not None:
409 409 self.buf.append(line)
410 410
411 411 def readline(self):
412 412 if self.buf:
413 413 l = self.buf[0]
414 414 del self.buf[0]
415 415 return l
416 416 return self.fp.readline()
417 417
418 418 def __iter__(self):
419 419 return iter(self.readline, '')
420 420
421 421 class abstractbackend(object):
422 422 def __init__(self, ui):
423 423 self.ui = ui
424 424
425 425 def getfile(self, fname):
426 426 """Return target file data and flags as a (data, (islink,
427 427 isexec)) tuple. Data is None if file is missing/deleted.
428 428 """
429 429 raise NotImplementedError
430 430
431 431 def setfile(self, fname, data, mode, copysource):
432 432 """Write data to target file fname and set its mode. mode is a
433 433 (islink, isexec) tuple. If data is None, the file content should
434 434 be left unchanged. If the file is modified after being copied,
435 435 copysource is set to the original file name.
436 436 """
437 437 raise NotImplementedError
438 438
439 439 def unlink(self, fname):
440 440 """Unlink target file."""
441 441 raise NotImplementedError
442 442
443 443 def writerej(self, fname, failed, total, lines):
444 444 """Write rejected lines for fname. total is the number of hunks
445 445 which failed to apply and total the total number of hunks for this
446 446 files.
447 447 """
448 448
449 449 def exists(self, fname):
450 450 raise NotImplementedError
451 451
452 452 def close(self):
453 453 raise NotImplementedError
454 454
455 455 class fsbackend(abstractbackend):
456 456 def __init__(self, ui, basedir):
457 457 super(fsbackend, self).__init__(ui)
458 458 self.opener = vfsmod.vfs(basedir)
459 459
460 460 def getfile(self, fname):
461 461 if self.opener.islink(fname):
462 462 return (self.opener.readlink(fname), (True, False))
463 463
464 464 isexec = False
465 465 try:
466 466 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
467 467 except OSError as e:
468 468 if e.errno != errno.ENOENT:
469 469 raise
470 470 try:
471 471 return (self.opener.read(fname), (False, isexec))
472 472 except IOError as e:
473 473 if e.errno != errno.ENOENT:
474 474 raise
475 475 return None, None
476 476
477 477 def setfile(self, fname, data, mode, copysource):
478 478 islink, isexec = mode
479 479 if data is None:
480 480 self.opener.setflags(fname, islink, isexec)
481 481 return
482 482 if islink:
483 483 self.opener.symlink(data, fname)
484 484 else:
485 485 self.opener.write(fname, data)
486 486 if isexec:
487 487 self.opener.setflags(fname, False, True)
488 488
489 489 def unlink(self, fname):
490 490 self.opener.unlinkpath(fname, ignoremissing=True)
491 491
492 492 def writerej(self, fname, failed, total, lines):
493 493 fname = fname + ".rej"
494 494 self.ui.warn(
495 495 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
496 496 (failed, total, fname))
497 497 fp = self.opener(fname, 'w')
498 498 fp.writelines(lines)
499 499 fp.close()
500 500
501 501 def exists(self, fname):
502 502 return self.opener.lexists(fname)
503 503
504 504 class workingbackend(fsbackend):
505 505 def __init__(self, ui, repo, similarity):
506 506 super(workingbackend, self).__init__(ui, repo.root)
507 507 self.repo = repo
508 508 self.similarity = similarity
509 509 self.removed = set()
510 510 self.changed = set()
511 511 self.copied = []
512 512
513 513 def _checkknown(self, fname):
514 514 if self.repo.dirstate[fname] == '?' and self.exists(fname):
515 515 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
516 516
517 517 def setfile(self, fname, data, mode, copysource):
518 518 self._checkknown(fname)
519 519 super(workingbackend, self).setfile(fname, data, mode, copysource)
520 520 if copysource is not None:
521 521 self.copied.append((copysource, fname))
522 522 self.changed.add(fname)
523 523
524 524 def unlink(self, fname):
525 525 self._checkknown(fname)
526 526 super(workingbackend, self).unlink(fname)
527 527 self.removed.add(fname)
528 528 self.changed.add(fname)
529 529
530 530 def close(self):
531 531 wctx = self.repo[None]
532 532 changed = set(self.changed)
533 533 for src, dst in self.copied:
534 534 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
535 535 if self.removed:
536 536 wctx.forget(sorted(self.removed))
537 537 for f in self.removed:
538 538 if f not in self.repo.dirstate:
539 539 # File was deleted and no longer belongs to the
540 540 # dirstate, it was probably marked added then
541 541 # deleted, and should not be considered by
542 542 # marktouched().
543 543 changed.discard(f)
544 544 if changed:
545 545 scmutil.marktouched(self.repo, changed, self.similarity)
546 546 return sorted(self.changed)
547 547
548 548 class filestore(object):
549 549 def __init__(self, maxsize=None):
550 550 self.opener = None
551 551 self.files = {}
552 552 self.created = 0
553 553 self.maxsize = maxsize
554 554 if self.maxsize is None:
555 555 self.maxsize = 4*(2**20)
556 556 self.size = 0
557 557 self.data = {}
558 558
559 559 def setfile(self, fname, data, mode, copied=None):
560 560 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
561 561 self.data[fname] = (data, mode, copied)
562 562 self.size += len(data)
563 563 else:
564 564 if self.opener is None:
565 565 root = tempfile.mkdtemp(prefix='hg-patch-')
566 566 self.opener = vfsmod.vfs(root)
567 567 # Avoid filename issues with these simple names
568 568 fn = str(self.created)
569 569 self.opener.write(fn, data)
570 570 self.created += 1
571 571 self.files[fname] = (fn, mode, copied)
572 572
573 573 def getfile(self, fname):
574 574 if fname in self.data:
575 575 return self.data[fname]
576 576 if not self.opener or fname not in self.files:
577 577 return None, None, None
578 578 fn, mode, copied = self.files[fname]
579 579 return self.opener.read(fn), mode, copied
580 580
581 581 def close(self):
582 582 if self.opener:
583 583 shutil.rmtree(self.opener.base)
584 584
585 585 class repobackend(abstractbackend):
586 586 def __init__(self, ui, repo, ctx, store):
587 587 super(repobackend, self).__init__(ui)
588 588 self.repo = repo
589 589 self.ctx = ctx
590 590 self.store = store
591 591 self.changed = set()
592 592 self.removed = set()
593 593 self.copied = {}
594 594
595 595 def _checkknown(self, fname):
596 596 if fname not in self.ctx:
597 597 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
598 598
599 599 def getfile(self, fname):
600 600 try:
601 601 fctx = self.ctx[fname]
602 602 except error.LookupError:
603 603 return None, None
604 604 flags = fctx.flags()
605 605 return fctx.data(), ('l' in flags, 'x' in flags)
606 606
607 607 def setfile(self, fname, data, mode, copysource):
608 608 if copysource:
609 609 self._checkknown(copysource)
610 610 if data is None:
611 611 data = self.ctx[fname].data()
612 612 self.store.setfile(fname, data, mode, copysource)
613 613 self.changed.add(fname)
614 614 if copysource:
615 615 self.copied[fname] = copysource
616 616
617 617 def unlink(self, fname):
618 618 self._checkknown(fname)
619 619 self.removed.add(fname)
620 620
621 621 def exists(self, fname):
622 622 return fname in self.ctx
623 623
624 624 def close(self):
625 625 return self.changed | self.removed
626 626
627 627 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
628 628 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
629 629 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
630 630 eolmodes = ['strict', 'crlf', 'lf', 'auto']
631 631
632 632 class patchfile(object):
633 633 def __init__(self, ui, gp, backend, store, eolmode='strict'):
634 634 self.fname = gp.path
635 635 self.eolmode = eolmode
636 636 self.eol = None
637 637 self.backend = backend
638 638 self.ui = ui
639 639 self.lines = []
640 640 self.exists = False
641 641 self.missing = True
642 642 self.mode = gp.mode
643 643 self.copysource = gp.oldpath
644 644 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
645 645 self.remove = gp.op == 'DELETE'
646 646 if self.copysource is None:
647 647 data, mode = backend.getfile(self.fname)
648 648 else:
649 649 data, mode = store.getfile(self.copysource)[:2]
650 650 if data is not None:
651 651 self.exists = self.copysource is None or backend.exists(self.fname)
652 652 self.missing = False
653 653 if data:
654 654 self.lines = mdiff.splitnewlines(data)
655 655 if self.mode is None:
656 656 self.mode = mode
657 657 if self.lines:
658 658 # Normalize line endings
659 659 if self.lines[0].endswith('\r\n'):
660 660 self.eol = '\r\n'
661 661 elif self.lines[0].endswith('\n'):
662 662 self.eol = '\n'
663 663 if eolmode != 'strict':
664 664 nlines = []
665 665 for l in self.lines:
666 666 if l.endswith('\r\n'):
667 667 l = l[:-2] + '\n'
668 668 nlines.append(l)
669 669 self.lines = nlines
670 670 else:
671 671 if self.create:
672 672 self.missing = False
673 673 if self.mode is None:
674 674 self.mode = (False, False)
675 675 if self.missing:
676 676 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
677 677 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
678 678 "current directory)\n"))
679 679
680 680 self.hash = {}
681 681 self.dirty = 0
682 682 self.offset = 0
683 683 self.skew = 0
684 684 self.rej = []
685 685 self.fileprinted = False
686 686 self.printfile(False)
687 687 self.hunks = 0
688 688
689 689 def writelines(self, fname, lines, mode):
690 690 if self.eolmode == 'auto':
691 691 eol = self.eol
692 692 elif self.eolmode == 'crlf':
693 693 eol = '\r\n'
694 694 else:
695 695 eol = '\n'
696 696
697 697 if self.eolmode != 'strict' and eol and eol != '\n':
698 698 rawlines = []
699 699 for l in lines:
700 700 if l and l[-1] == '\n':
701 701 l = l[:-1] + eol
702 702 rawlines.append(l)
703 703 lines = rawlines
704 704
705 705 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
706 706
707 707 def printfile(self, warn):
708 708 if self.fileprinted:
709 709 return
710 710 if warn or self.ui.verbose:
711 711 self.fileprinted = True
712 712 s = _("patching file %s\n") % self.fname
713 713 if warn:
714 714 self.ui.warn(s)
715 715 else:
716 716 self.ui.note(s)
717 717
718 718
719 719 def findlines(self, l, linenum):
720 720 # looks through the hash and finds candidate lines. The
721 721 # result is a list of line numbers sorted based on distance
722 722 # from linenum
723 723
724 724 cand = self.hash.get(l, [])
725 725 if len(cand) > 1:
726 726 # resort our list of potentials forward then back.
727 727 cand.sort(key=lambda x: abs(x - linenum))
728 728 return cand
729 729
730 730 def write_rej(self):
731 731 # our rejects are a little different from patch(1). This always
732 732 # creates rejects in the same form as the original patch. A file
733 733 # header is inserted so that you can run the reject through patch again
734 734 # without having to type the filename.
735 735 if not self.rej:
736 736 return
737 737 base = os.path.basename(self.fname)
738 738 lines = ["--- %s\n+++ %s\n" % (base, base)]
739 739 for x in self.rej:
740 740 for l in x.hunk:
741 741 lines.append(l)
742 742 if l[-1:] != '\n':
743 743 lines.append("\n\ No newline at end of file\n")
744 744 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
745 745
746 746 def apply(self, h):
747 747 if not h.complete():
748 748 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
749 749 (h.number, h.desc, len(h.a), h.lena, len(h.b),
750 750 h.lenb))
751 751
752 752 self.hunks += 1
753 753
754 754 if self.missing:
755 755 self.rej.append(h)
756 756 return -1
757 757
758 758 if self.exists and self.create:
759 759 if self.copysource:
760 760 self.ui.warn(_("cannot create %s: destination already "
761 761 "exists\n") % self.fname)
762 762 else:
763 763 self.ui.warn(_("file %s already exists\n") % self.fname)
764 764 self.rej.append(h)
765 765 return -1
766 766
767 767 if isinstance(h, binhunk):
768 768 if self.remove:
769 769 self.backend.unlink(self.fname)
770 770 else:
771 771 l = h.new(self.lines)
772 772 self.lines[:] = l
773 773 self.offset += len(l)
774 774 self.dirty = True
775 775 return 0
776 776
777 777 horig = h
778 778 if (self.eolmode in ('crlf', 'lf')
779 779 or self.eolmode == 'auto' and self.eol):
780 780 # If new eols are going to be normalized, then normalize
781 781 # hunk data before patching. Otherwise, preserve input
782 782 # line-endings.
783 783 h = h.getnormalized()
784 784
785 785 # fast case first, no offsets, no fuzz
786 786 old, oldstart, new, newstart = h.fuzzit(0, False)
787 787 oldstart += self.offset
788 788 orig_start = oldstart
789 789 # if there's skew we want to emit the "(offset %d lines)" even
790 790 # when the hunk cleanly applies at start + skew, so skip the
791 791 # fast case code
792 792 if (self.skew == 0 and
793 793 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
794 794 if self.remove:
795 795 self.backend.unlink(self.fname)
796 796 else:
797 797 self.lines[oldstart:oldstart + len(old)] = new
798 798 self.offset += len(new) - len(old)
799 799 self.dirty = True
800 800 return 0
801 801
802 802 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
803 803 self.hash = {}
804 804 for x, s in enumerate(self.lines):
805 805 self.hash.setdefault(s, []).append(x)
806 806
807 807 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
808 808 for toponly in [True, False]:
809 809 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
810 810 oldstart = oldstart + self.offset + self.skew
811 811 oldstart = min(oldstart, len(self.lines))
812 812 if old:
813 813 cand = self.findlines(old[0][1:], oldstart)
814 814 else:
815 815 # Only adding lines with no or fuzzed context, just
816 816 # take the skew in account
817 817 cand = [oldstart]
818 818
819 819 for l in cand:
820 820 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
821 821 self.lines[l : l + len(old)] = new
822 822 self.offset += len(new) - len(old)
823 823 self.skew = l - orig_start
824 824 self.dirty = True
825 825 offset = l - orig_start - fuzzlen
826 826 if fuzzlen:
827 827 msg = _("Hunk #%d succeeded at %d "
828 828 "with fuzz %d "
829 829 "(offset %d lines).\n")
830 830 self.printfile(True)
831 831 self.ui.warn(msg %
832 832 (h.number, l + 1, fuzzlen, offset))
833 833 else:
834 834 msg = _("Hunk #%d succeeded at %d "
835 835 "(offset %d lines).\n")
836 836 self.ui.note(msg % (h.number, l + 1, offset))
837 837 return fuzzlen
838 838 self.printfile(True)
839 839 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
840 840 self.rej.append(horig)
841 841 return -1
842 842
843 843 def close(self):
844 844 if self.dirty:
845 845 self.writelines(self.fname, self.lines, self.mode)
846 846 self.write_rej()
847 847 return len(self.rej)
848 848
849 849 class header(object):
850 850 """patch header
851 851 """
852 852 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
853 853 diff_re = re.compile('diff -r .* (.*)$')
854 854 allhunks_re = re.compile('(?:index|deleted file) ')
855 855 pretty_re = re.compile('(?:new file|deleted file) ')
856 856 special_re = re.compile('(?:index|deleted|copy|rename) ')
857 857 newfile_re = re.compile('(?:new file)')
858 858
859 859 def __init__(self, header):
860 860 self.header = header
861 861 self.hunks = []
862 862
863 863 def binary(self):
864 864 return any(h.startswith('index ') for h in self.header)
865 865
866 866 def pretty(self, fp):
867 867 for h in self.header:
868 868 if h.startswith('index '):
869 869 fp.write(_('this modifies a binary file (all or nothing)\n'))
870 870 break
871 871 if self.pretty_re.match(h):
872 872 fp.write(h)
873 873 if self.binary():
874 874 fp.write(_('this is a binary file\n'))
875 875 break
876 876 if h.startswith('---'):
877 877 fp.write(_('%d hunks, %d lines changed\n') %
878 878 (len(self.hunks),
879 879 sum([max(h.added, h.removed) for h in self.hunks])))
880 880 break
881 881 fp.write(h)
882 882
883 883 def write(self, fp):
884 884 fp.write(''.join(self.header))
885 885
886 886 def allhunks(self):
887 887 return any(self.allhunks_re.match(h) for h in self.header)
888 888
889 889 def files(self):
890 890 match = self.diffgit_re.match(self.header[0])
891 891 if match:
892 892 fromfile, tofile = match.groups()
893 893 if fromfile == tofile:
894 894 return [fromfile]
895 895 return [fromfile, tofile]
896 896 else:
897 897 return self.diff_re.match(self.header[0]).groups()
898 898
899 899 def filename(self):
900 900 return self.files()[-1]
901 901
902 902 def __repr__(self):
903 903 return '<header %s>' % (' '.join(map(repr, self.files())))
904 904
905 905 def isnewfile(self):
906 906 return any(self.newfile_re.match(h) for h in self.header)
907 907
908 908 def special(self):
909 909 # Special files are shown only at the header level and not at the hunk
910 910 # level for example a file that has been deleted is a special file.
911 911 # The user cannot change the content of the operation, in the case of
912 912 # the deleted file he has to take the deletion or not take it, he
913 913 # cannot take some of it.
914 914 # Newly added files are special if they are empty, they are not special
915 915 # if they have some content as we want to be able to change it
916 916 nocontent = len(self.header) == 2
917 917 emptynewfile = self.isnewfile() and nocontent
918 918 return emptynewfile or \
919 919 any(self.special_re.match(h) for h in self.header)
920 920
921 921 class recordhunk(object):
922 922 """patch hunk
923 923
924 924 XXX shouldn't we merge this with the other hunk class?
925 925 """
926 926
927 927 def __init__(self, header, fromline, toline, proc, before, hunk, after,
928 928 maxcontext=None):
929 929 def trimcontext(lines, reverse=False):
930 930 if maxcontext is not None:
931 931 delta = len(lines) - maxcontext
932 932 if delta > 0:
933 933 if reverse:
934 934 return delta, lines[delta:]
935 935 else:
936 936 return delta, lines[:maxcontext]
937 937 return 0, lines
938 938
939 939 self.header = header
940 940 trimedbefore, self.before = trimcontext(before, True)
941 941 self.fromline = fromline + trimedbefore
942 942 self.toline = toline + trimedbefore
943 943 _trimedafter, self.after = trimcontext(after, False)
944 944 self.proc = proc
945 945 self.hunk = hunk
946 946 self.added, self.removed = self.countchanges(self.hunk)
947 947
948 948 def __eq__(self, v):
949 949 if not isinstance(v, recordhunk):
950 950 return False
951 951
952 952 return ((v.hunk == self.hunk) and
953 953 (v.proc == self.proc) and
954 954 (self.fromline == v.fromline) and
955 955 (self.header.files() == v.header.files()))
956 956
957 957 def __hash__(self):
958 958 return hash((tuple(self.hunk),
959 959 tuple(self.header.files()),
960 960 self.fromline,
961 961 self.proc))
962 962
963 963 def countchanges(self, hunk):
964 964 """hunk -> (n+,n-)"""
965 965 add = len([h for h in hunk if h.startswith('+')])
966 966 rem = len([h for h in hunk if h.startswith('-')])
967 967 return add, rem
968 968
969 969 def reversehunk(self):
970 970 """return another recordhunk which is the reverse of the hunk
971 971
972 972 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
973 973 that, swap fromline/toline and +/- signs while keep other things
974 974 unchanged.
975 975 """
976 976 m = {'+': '-', '-': '+', '\\': '\\'}
977 977 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
978 978 return recordhunk(self.header, self.toline, self.fromline, self.proc,
979 979 self.before, hunk, self.after)
980 980
981 981 def write(self, fp):
982 982 delta = len(self.before) + len(self.after)
983 983 if self.after and self.after[-1] == '\\ No newline at end of file\n':
984 984 delta -= 1
985 985 fromlen = delta + self.removed
986 986 tolen = delta + self.added
987 987 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
988 988 (self.fromline, fromlen, self.toline, tolen,
989 989 self.proc and (' ' + self.proc)))
990 990 fp.write(''.join(self.before + self.hunk + self.after))
991 991
992 992 pretty = write
993 993
994 994 def filename(self):
995 995 return self.header.filename()
996 996
997 997 def __repr__(self):
998 998 return '<hunk %r@%d>' % (self.filename(), self.fromline)
999 999
1000 1000 def getmessages():
1001 1001 return {
1002 1002 'multiple': {
1003 1003 'apply': _("apply change %d/%d to '%s'?"),
1004 1004 'discard': _("discard change %d/%d to '%s'?"),
1005 1005 'record': _("record change %d/%d to '%s'?"),
1006 1006 },
1007 1007 'single': {
1008 1008 'apply': _("apply this change to '%s'?"),
1009 1009 'discard': _("discard this change to '%s'?"),
1010 1010 'record': _("record this change to '%s'?"),
1011 1011 },
1012 1012 'help': {
1013 1013 'apply': _('[Ynesfdaq?]'
1014 1014 '$$ &Yes, apply this change'
1015 1015 '$$ &No, skip this change'
1016 1016 '$$ &Edit this change manually'
1017 1017 '$$ &Skip remaining changes to this file'
1018 1018 '$$ Apply remaining changes to this &file'
1019 1019 '$$ &Done, skip remaining changes and files'
1020 1020 '$$ Apply &all changes to all remaining files'
1021 1021 '$$ &Quit, applying no changes'
1022 1022 '$$ &? (display help)'),
1023 1023 'discard': _('[Ynesfdaq?]'
1024 1024 '$$ &Yes, discard this change'
1025 1025 '$$ &No, skip this change'
1026 1026 '$$ &Edit this change manually'
1027 1027 '$$ &Skip remaining changes to this file'
1028 1028 '$$ Discard remaining changes to this &file'
1029 1029 '$$ &Done, skip remaining changes and files'
1030 1030 '$$ Discard &all changes to all remaining files'
1031 1031 '$$ &Quit, discarding no changes'
1032 1032 '$$ &? (display help)'),
1033 1033 'record': _('[Ynesfdaq?]'
1034 1034 '$$ &Yes, record this change'
1035 1035 '$$ &No, skip this change'
1036 1036 '$$ &Edit this change manually'
1037 1037 '$$ &Skip remaining changes to this file'
1038 1038 '$$ Record remaining changes to this &file'
1039 1039 '$$ &Done, skip remaining changes and files'
1040 1040 '$$ Record &all changes to all remaining files'
1041 1041 '$$ &Quit, recording no changes'
1042 1042 '$$ &? (display help)'),
1043 1043 }
1044 1044 }
1045 1045
1046 1046 def filterpatch(ui, headers, operation=None):
1047 1047 """Interactively filter patch chunks into applied-only chunks"""
1048 1048 messages = getmessages()
1049 1049
1050 1050 if operation is None:
1051 1051 operation = 'record'
1052 1052
1053 1053 def prompt(skipfile, skipall, query, chunk):
1054 1054 """prompt query, and process base inputs
1055 1055
1056 1056 - y/n for the rest of file
1057 1057 - y/n for the rest
1058 1058 - ? (help)
1059 1059 - q (quit)
1060 1060
1061 1061 Return True/False and possibly updated skipfile and skipall.
1062 1062 """
1063 1063 newpatches = None
1064 1064 if skipall is not None:
1065 1065 return skipall, skipfile, skipall, newpatches
1066 1066 if skipfile is not None:
1067 1067 return skipfile, skipfile, skipall, newpatches
1068 1068 while True:
1069 1069 resps = messages['help'][operation]
1070 1070 r = ui.promptchoice("%s %s" % (query, resps))
1071 1071 ui.write("\n")
1072 1072 if r == 8: # ?
1073 1073 for c, t in ui.extractchoices(resps)[1]:
1074 1074 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1075 1075 continue
1076 1076 elif r == 0: # yes
1077 1077 ret = True
1078 1078 elif r == 1: # no
1079 1079 ret = False
1080 1080 elif r == 2: # Edit patch
1081 1081 if chunk is None:
1082 1082 ui.write(_('cannot edit patch for whole file'))
1083 1083 ui.write("\n")
1084 1084 continue
1085 1085 if chunk.header.binary():
1086 1086 ui.write(_('cannot edit patch for binary file'))
1087 1087 ui.write("\n")
1088 1088 continue
1089 1089 # Patch comment based on the Git one (based on comment at end of
1090 1090 # https://mercurial-scm.org/wiki/RecordExtension)
1091 1091 phelp = '---' + _("""
1092 1092 To remove '-' lines, make them ' ' lines (context).
1093 1093 To remove '+' lines, delete them.
1094 1094 Lines starting with # will be removed from the patch.
1095 1095
1096 1096 If the patch applies cleanly, the edited hunk will immediately be
1097 1097 added to the record list. If it does not apply cleanly, a rejects
1098 1098 file will be generated: you can use that when you try again. If
1099 1099 all lines of the hunk are removed, then the edit is aborted and
1100 1100 the hunk is left unchanged.
1101 1101 """)
1102 1102 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1103 1103 suffix=".diff", text=True)
1104 1104 ncpatchfp = None
1105 1105 try:
1106 1106 # Write the initial patch
1107 1107 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1108 1108 chunk.header.write(f)
1109 1109 chunk.write(f)
1110 1110 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1111 1111 f.close()
1112 1112 # Start the editor and wait for it to complete
1113 1113 editor = ui.geteditor()
1114 1114 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1115 1115 environ={'HGUSER': ui.username()},
1116 1116 blockedtag='filterpatch')
1117 1117 if ret != 0:
1118 1118 ui.warn(_("editor exited with exit code %d\n") % ret)
1119 1119 continue
1120 1120 # Remove comment lines
1121 1121 patchfp = open(patchfn)
1122 1122 ncpatchfp = stringio()
1123 1123 for line in util.iterfile(patchfp):
1124 1124 if not line.startswith('#'):
1125 1125 ncpatchfp.write(line)
1126 1126 patchfp.close()
1127 1127 ncpatchfp.seek(0)
1128 1128 newpatches = parsepatch(ncpatchfp)
1129 1129 finally:
1130 1130 os.unlink(patchfn)
1131 1131 del ncpatchfp
1132 1132 # Signal that the chunk shouldn't be applied as-is, but
1133 1133 # provide the new patch to be used instead.
1134 1134 ret = False
1135 1135 elif r == 3: # Skip
1136 1136 ret = skipfile = False
1137 1137 elif r == 4: # file (Record remaining)
1138 1138 ret = skipfile = True
1139 1139 elif r == 5: # done, skip remaining
1140 1140 ret = skipall = False
1141 1141 elif r == 6: # all
1142 1142 ret = skipall = True
1143 1143 elif r == 7: # quit
1144 1144 raise error.Abort(_('user quit'))
1145 1145 return ret, skipfile, skipall, newpatches
1146 1146
1147 1147 seen = set()
1148 1148 applied = {} # 'filename' -> [] of chunks
1149 1149 skipfile, skipall = None, None
1150 1150 pos, total = 1, sum(len(h.hunks) for h in headers)
1151 1151 for h in headers:
1152 1152 pos += len(h.hunks)
1153 1153 skipfile = None
1154 1154 fixoffset = 0
1155 1155 hdr = ''.join(h.header)
1156 1156 if hdr in seen:
1157 1157 continue
1158 1158 seen.add(hdr)
1159 1159 if skipall is None:
1160 1160 h.pretty(ui)
1161 1161 msg = (_('examine changes to %s?') %
1162 1162 _(' and ').join("'%s'" % f for f in h.files()))
1163 1163 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1164 1164 if not r:
1165 1165 continue
1166 1166 applied[h.filename()] = [h]
1167 1167 if h.allhunks():
1168 1168 applied[h.filename()] += h.hunks
1169 1169 continue
1170 1170 for i, chunk in enumerate(h.hunks):
1171 1171 if skipfile is None and skipall is None:
1172 1172 chunk.pretty(ui)
1173 1173 if total == 1:
1174 1174 msg = messages['single'][operation] % chunk.filename()
1175 1175 else:
1176 1176 idx = pos - len(h.hunks) + i
1177 1177 msg = messages['multiple'][operation] % (idx, total,
1178 1178 chunk.filename())
1179 1179 r, skipfile, skipall, newpatches = prompt(skipfile,
1180 1180 skipall, msg, chunk)
1181 1181 if r:
1182 1182 if fixoffset:
1183 1183 chunk = copy.copy(chunk)
1184 1184 chunk.toline += fixoffset
1185 1185 applied[chunk.filename()].append(chunk)
1186 1186 elif newpatches is not None:
1187 1187 for newpatch in newpatches:
1188 1188 for newhunk in newpatch.hunks:
1189 1189 if fixoffset:
1190 1190 newhunk.toline += fixoffset
1191 1191 applied[newhunk.filename()].append(newhunk)
1192 1192 else:
1193 1193 fixoffset += chunk.removed - chunk.added
1194 1194 return (sum([h for h in applied.itervalues()
1195 1195 if h[0].special() or len(h) > 1], []), {})
1196 1196 class hunk(object):
1197 1197 def __init__(self, desc, num, lr, context):
1198 1198 self.number = num
1199 1199 self.desc = desc
1200 1200 self.hunk = [desc]
1201 1201 self.a = []
1202 1202 self.b = []
1203 1203 self.starta = self.lena = None
1204 1204 self.startb = self.lenb = None
1205 1205 if lr is not None:
1206 1206 if context:
1207 1207 self.read_context_hunk(lr)
1208 1208 else:
1209 1209 self.read_unified_hunk(lr)
1210 1210
1211 1211 def getnormalized(self):
1212 1212 """Return a copy with line endings normalized to LF."""
1213 1213
1214 1214 def normalize(lines):
1215 1215 nlines = []
1216 1216 for line in lines:
1217 1217 if line.endswith('\r\n'):
1218 1218 line = line[:-2] + '\n'
1219 1219 nlines.append(line)
1220 1220 return nlines
1221 1221
1222 1222 # Dummy object, it is rebuilt manually
1223 1223 nh = hunk(self.desc, self.number, None, None)
1224 1224 nh.number = self.number
1225 1225 nh.desc = self.desc
1226 1226 nh.hunk = self.hunk
1227 1227 nh.a = normalize(self.a)
1228 1228 nh.b = normalize(self.b)
1229 1229 nh.starta = self.starta
1230 1230 nh.startb = self.startb
1231 1231 nh.lena = self.lena
1232 1232 nh.lenb = self.lenb
1233 1233 return nh
1234 1234
1235 1235 def read_unified_hunk(self, lr):
1236 1236 m = unidesc.match(self.desc)
1237 1237 if not m:
1238 1238 raise PatchError(_("bad hunk #%d") % self.number)
1239 1239 self.starta, self.lena, self.startb, self.lenb = m.groups()
1240 1240 if self.lena is None:
1241 1241 self.lena = 1
1242 1242 else:
1243 1243 self.lena = int(self.lena)
1244 1244 if self.lenb is None:
1245 1245 self.lenb = 1
1246 1246 else:
1247 1247 self.lenb = int(self.lenb)
1248 1248 self.starta = int(self.starta)
1249 1249 self.startb = int(self.startb)
1250 1250 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1251 1251 self.b)
1252 1252 # if we hit eof before finishing out the hunk, the last line will
1253 1253 # be zero length. Lets try to fix it up.
1254 1254 while len(self.hunk[-1]) == 0:
1255 1255 del self.hunk[-1]
1256 1256 del self.a[-1]
1257 1257 del self.b[-1]
1258 1258 self.lena -= 1
1259 1259 self.lenb -= 1
1260 1260 self._fixnewline(lr)
1261 1261
1262 1262 def read_context_hunk(self, lr):
1263 1263 self.desc = lr.readline()
1264 1264 m = contextdesc.match(self.desc)
1265 1265 if not m:
1266 1266 raise PatchError(_("bad hunk #%d") % self.number)
1267 1267 self.starta, aend = m.groups()
1268 1268 self.starta = int(self.starta)
1269 1269 if aend is None:
1270 1270 aend = self.starta
1271 1271 self.lena = int(aend) - self.starta
1272 1272 if self.starta:
1273 1273 self.lena += 1
1274 1274 for x in xrange(self.lena):
1275 1275 l = lr.readline()
1276 1276 if l.startswith('---'):
1277 1277 # lines addition, old block is empty
1278 1278 lr.push(l)
1279 1279 break
1280 1280 s = l[2:]
1281 1281 if l.startswith('- ') or l.startswith('! '):
1282 1282 u = '-' + s
1283 1283 elif l.startswith(' '):
1284 1284 u = ' ' + s
1285 1285 else:
1286 1286 raise PatchError(_("bad hunk #%d old text line %d") %
1287 1287 (self.number, x))
1288 1288 self.a.append(u)
1289 1289 self.hunk.append(u)
1290 1290
1291 1291 l = lr.readline()
1292 1292 if l.startswith('\ '):
1293 1293 s = self.a[-1][:-1]
1294 1294 self.a[-1] = s
1295 1295 self.hunk[-1] = s
1296 1296 l = lr.readline()
1297 1297 m = contextdesc.match(l)
1298 1298 if not m:
1299 1299 raise PatchError(_("bad hunk #%d") % self.number)
1300 1300 self.startb, bend = m.groups()
1301 1301 self.startb = int(self.startb)
1302 1302 if bend is None:
1303 1303 bend = self.startb
1304 1304 self.lenb = int(bend) - self.startb
1305 1305 if self.startb:
1306 1306 self.lenb += 1
1307 1307 hunki = 1
1308 1308 for x in xrange(self.lenb):
1309 1309 l = lr.readline()
1310 1310 if l.startswith('\ '):
1311 1311 # XXX: the only way to hit this is with an invalid line range.
1312 1312 # The no-eol marker is not counted in the line range, but I
1313 1313 # guess there are diff(1) out there which behave differently.
1314 1314 s = self.b[-1][:-1]
1315 1315 self.b[-1] = s
1316 1316 self.hunk[hunki - 1] = s
1317 1317 continue
1318 1318 if not l:
1319 1319 # line deletions, new block is empty and we hit EOF
1320 1320 lr.push(l)
1321 1321 break
1322 1322 s = l[2:]
1323 1323 if l.startswith('+ ') or l.startswith('! '):
1324 1324 u = '+' + s
1325 1325 elif l.startswith(' '):
1326 1326 u = ' ' + s
1327 1327 elif len(self.b) == 0:
1328 1328 # line deletions, new block is empty
1329 1329 lr.push(l)
1330 1330 break
1331 1331 else:
1332 1332 raise PatchError(_("bad hunk #%d old text line %d") %
1333 1333 (self.number, x))
1334 1334 self.b.append(s)
1335 1335 while True:
1336 1336 if hunki >= len(self.hunk):
1337 1337 h = ""
1338 1338 else:
1339 1339 h = self.hunk[hunki]
1340 1340 hunki += 1
1341 1341 if h == u:
1342 1342 break
1343 1343 elif h.startswith('-'):
1344 1344 continue
1345 1345 else:
1346 1346 self.hunk.insert(hunki - 1, u)
1347 1347 break
1348 1348
1349 1349 if not self.a:
1350 1350 # this happens when lines were only added to the hunk
1351 1351 for x in self.hunk:
1352 1352 if x.startswith('-') or x.startswith(' '):
1353 1353 self.a.append(x)
1354 1354 if not self.b:
1355 1355 # this happens when lines were only deleted from the hunk
1356 1356 for x in self.hunk:
1357 1357 if x.startswith('+') or x.startswith(' '):
1358 1358 self.b.append(x[1:])
1359 1359 # @@ -start,len +start,len @@
1360 1360 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1361 1361 self.startb, self.lenb)
1362 1362 self.hunk[0] = self.desc
1363 1363 self._fixnewline(lr)
1364 1364
1365 1365 def _fixnewline(self, lr):
1366 1366 l = lr.readline()
1367 1367 if l.startswith('\ '):
1368 1368 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1369 1369 else:
1370 1370 lr.push(l)
1371 1371
1372 1372 def complete(self):
1373 1373 return len(self.a) == self.lena and len(self.b) == self.lenb
1374 1374
1375 1375 def _fuzzit(self, old, new, fuzz, toponly):
1376 1376 # this removes context lines from the top and bottom of list 'l'. It
1377 1377 # checks the hunk to make sure only context lines are removed, and then
1378 1378 # returns a new shortened list of lines.
1379 1379 fuzz = min(fuzz, len(old))
1380 1380 if fuzz:
1381 1381 top = 0
1382 1382 bot = 0
1383 1383 hlen = len(self.hunk)
1384 1384 for x in xrange(hlen - 1):
1385 1385 # the hunk starts with the @@ line, so use x+1
1386 1386 if self.hunk[x + 1][0] == ' ':
1387 1387 top += 1
1388 1388 else:
1389 1389 break
1390 1390 if not toponly:
1391 1391 for x in xrange(hlen - 1):
1392 1392 if self.hunk[hlen - bot - 1][0] == ' ':
1393 1393 bot += 1
1394 1394 else:
1395 1395 break
1396 1396
1397 1397 bot = min(fuzz, bot)
1398 1398 top = min(fuzz, top)
1399 1399 return old[top:len(old) - bot], new[top:len(new) - bot], top
1400 1400 return old, new, 0
1401 1401
1402 1402 def fuzzit(self, fuzz, toponly):
1403 1403 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1404 1404 oldstart = self.starta + top
1405 1405 newstart = self.startb + top
1406 1406 # zero length hunk ranges already have their start decremented
1407 1407 if self.lena and oldstart > 0:
1408 1408 oldstart -= 1
1409 1409 if self.lenb and newstart > 0:
1410 1410 newstart -= 1
1411 1411 return old, oldstart, new, newstart
1412 1412
1413 1413 class binhunk(object):
1414 1414 'A binary patch file.'
1415 1415 def __init__(self, lr, fname):
1416 1416 self.text = None
1417 1417 self.delta = False
1418 1418 self.hunk = ['GIT binary patch\n']
1419 1419 self._fname = fname
1420 1420 self._read(lr)
1421 1421
1422 1422 def complete(self):
1423 1423 return self.text is not None
1424 1424
1425 1425 def new(self, lines):
1426 1426 if self.delta:
1427 1427 return [applybindelta(self.text, ''.join(lines))]
1428 1428 return [self.text]
1429 1429
1430 1430 def _read(self, lr):
1431 1431 def getline(lr, hunk):
1432 1432 l = lr.readline()
1433 1433 hunk.append(l)
1434 1434 return l.rstrip('\r\n')
1435 1435
1436 1436 size = 0
1437 1437 while True:
1438 1438 line = getline(lr, self.hunk)
1439 1439 if not line:
1440 1440 raise PatchError(_('could not extract "%s" binary data')
1441 1441 % self._fname)
1442 1442 if line.startswith('literal '):
1443 1443 size = int(line[8:].rstrip())
1444 1444 break
1445 1445 if line.startswith('delta '):
1446 1446 size = int(line[6:].rstrip())
1447 1447 self.delta = True
1448 1448 break
1449 1449 dec = []
1450 1450 line = getline(lr, self.hunk)
1451 1451 while len(line) > 1:
1452 1452 l = line[0]
1453 1453 if l <= 'Z' and l >= 'A':
1454 1454 l = ord(l) - ord('A') + 1
1455 1455 else:
1456 1456 l = ord(l) - ord('a') + 27
1457 1457 try:
1458 1458 dec.append(util.b85decode(line[1:])[:l])
1459 1459 except ValueError as e:
1460 1460 raise PatchError(_('could not decode "%s" binary patch: %s')
1461 1461 % (self._fname, str(e)))
1462 1462 line = getline(lr, self.hunk)
1463 1463 text = zlib.decompress(''.join(dec))
1464 1464 if len(text) != size:
1465 1465 raise PatchError(_('"%s" length is %d bytes, should be %d')
1466 1466 % (self._fname, len(text), size))
1467 1467 self.text = text
1468 1468
1469 1469 def parsefilename(str):
1470 1470 # --- filename \t|space stuff
1471 1471 s = str[4:].rstrip('\r\n')
1472 1472 i = s.find('\t')
1473 1473 if i < 0:
1474 1474 i = s.find(' ')
1475 1475 if i < 0:
1476 1476 return s
1477 1477 return s[:i]
1478 1478
1479 1479 def reversehunks(hunks):
1480 1480 '''reverse the signs in the hunks given as argument
1481 1481
1482 1482 This function operates on hunks coming out of patch.filterpatch, that is
1483 1483 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1484 1484
1485 1485 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1486 1486 ... --- a/folder1/g
1487 1487 ... +++ b/folder1/g
1488 1488 ... @@ -1,7 +1,7 @@
1489 1489 ... +firstline
1490 1490 ... c
1491 1491 ... 1
1492 1492 ... 2
1493 1493 ... + 3
1494 1494 ... -4
1495 1495 ... 5
1496 1496 ... d
1497 1497 ... +lastline"""
1498 1498 >>> hunks = parsepatch([rawpatch])
1499 1499 >>> hunkscomingfromfilterpatch = []
1500 1500 >>> for h in hunks:
1501 1501 ... hunkscomingfromfilterpatch.append(h)
1502 1502 ... hunkscomingfromfilterpatch.extend(h.hunks)
1503 1503
1504 1504 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1505 1505 >>> from . import util
1506 1506 >>> fp = util.stringio()
1507 1507 >>> for c in reversedhunks:
1508 1508 ... c.write(fp)
1509 1509 >>> fp.seek(0) or None
1510 1510 >>> reversedpatch = fp.read()
1511 1511 >>> print(pycompat.sysstr(reversedpatch))
1512 1512 diff --git a/folder1/g b/folder1/g
1513 1513 --- a/folder1/g
1514 1514 +++ b/folder1/g
1515 1515 @@ -1,4 +1,3 @@
1516 1516 -firstline
1517 1517 c
1518 1518 1
1519 1519 2
1520 1520 @@ -2,6 +1,6 @@
1521 1521 c
1522 1522 1
1523 1523 2
1524 1524 - 3
1525 1525 +4
1526 1526 5
1527 1527 d
1528 1528 @@ -6,3 +5,2 @@
1529 1529 5
1530 1530 d
1531 1531 -lastline
1532 1532
1533 1533 '''
1534 1534
1535 1535 newhunks = []
1536 1536 for c in hunks:
1537 1537 if util.safehasattr(c, 'reversehunk'):
1538 1538 c = c.reversehunk()
1539 1539 newhunks.append(c)
1540 1540 return newhunks
1541 1541
1542 1542 def parsepatch(originalchunks, maxcontext=None):
1543 1543 """patch -> [] of headers -> [] of hunks
1544 1544
1545 1545 If maxcontext is not None, trim context lines if necessary.
1546 1546
1547 1547 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1548 1548 ... --- a/folder1/g
1549 1549 ... +++ b/folder1/g
1550 1550 ... @@ -1,8 +1,10 @@
1551 1551 ... 1
1552 1552 ... 2
1553 1553 ... -3
1554 1554 ... 4
1555 1555 ... 5
1556 1556 ... 6
1557 1557 ... +6.1
1558 1558 ... +6.2
1559 1559 ... 7
1560 1560 ... 8
1561 1561 ... +9'''
1562 1562 >>> out = util.stringio()
1563 1563 >>> headers = parsepatch([rawpatch], maxcontext=1)
1564 1564 >>> for header in headers:
1565 1565 ... header.write(out)
1566 1566 ... for hunk in header.hunks:
1567 1567 ... hunk.write(out)
1568 1568 >>> print(pycompat.sysstr(out.getvalue()))
1569 1569 diff --git a/folder1/g b/folder1/g
1570 1570 --- a/folder1/g
1571 1571 +++ b/folder1/g
1572 1572 @@ -2,3 +2,2 @@
1573 1573 2
1574 1574 -3
1575 1575 4
1576 1576 @@ -6,2 +5,4 @@
1577 1577 6
1578 1578 +6.1
1579 1579 +6.2
1580 1580 7
1581 1581 @@ -8,1 +9,2 @@
1582 1582 8
1583 1583 +9
1584 1584 """
1585 1585 class parser(object):
1586 1586 """patch parsing state machine"""
1587 1587 def __init__(self):
1588 1588 self.fromline = 0
1589 1589 self.toline = 0
1590 1590 self.proc = ''
1591 1591 self.header = None
1592 1592 self.context = []
1593 1593 self.before = []
1594 1594 self.hunk = []
1595 1595 self.headers = []
1596 1596
1597 1597 def addrange(self, limits):
1598 1598 fromstart, fromend, tostart, toend, proc = limits
1599 1599 self.fromline = int(fromstart)
1600 1600 self.toline = int(tostart)
1601 1601 self.proc = proc
1602 1602
1603 1603 def addcontext(self, context):
1604 1604 if self.hunk:
1605 1605 h = recordhunk(self.header, self.fromline, self.toline,
1606 1606 self.proc, self.before, self.hunk, context, maxcontext)
1607 1607 self.header.hunks.append(h)
1608 1608 self.fromline += len(self.before) + h.removed
1609 1609 self.toline += len(self.before) + h.added
1610 1610 self.before = []
1611 1611 self.hunk = []
1612 1612 self.context = context
1613 1613
1614 1614 def addhunk(self, hunk):
1615 1615 if self.context:
1616 1616 self.before = self.context
1617 1617 self.context = []
1618 1618 self.hunk = hunk
1619 1619
1620 1620 def newfile(self, hdr):
1621 1621 self.addcontext([])
1622 1622 h = header(hdr)
1623 1623 self.headers.append(h)
1624 1624 self.header = h
1625 1625
1626 1626 def addother(self, line):
1627 1627 pass # 'other' lines are ignored
1628 1628
1629 1629 def finished(self):
1630 1630 self.addcontext([])
1631 1631 return self.headers
1632 1632
1633 1633 transitions = {
1634 1634 'file': {'context': addcontext,
1635 1635 'file': newfile,
1636 1636 'hunk': addhunk,
1637 1637 'range': addrange},
1638 1638 'context': {'file': newfile,
1639 1639 'hunk': addhunk,
1640 1640 'range': addrange,
1641 1641 'other': addother},
1642 1642 'hunk': {'context': addcontext,
1643 1643 'file': newfile,
1644 1644 'range': addrange},
1645 1645 'range': {'context': addcontext,
1646 1646 'hunk': addhunk},
1647 1647 'other': {'other': addother},
1648 1648 }
1649 1649
1650 1650 p = parser()
1651 1651 fp = stringio()
1652 1652 fp.write(''.join(originalchunks))
1653 1653 fp.seek(0)
1654 1654
1655 1655 state = 'context'
1656 1656 for newstate, data in scanpatch(fp):
1657 1657 try:
1658 1658 p.transitions[state][newstate](p, data)
1659 1659 except KeyError:
1660 1660 raise PatchError('unhandled transition: %s -> %s' %
1661 1661 (state, newstate))
1662 1662 state = newstate
1663 1663 del fp
1664 1664 return p.finished()
1665 1665
1666 1666 def pathtransform(path, strip, prefix):
1667 1667 '''turn a path from a patch into a path suitable for the repository
1668 1668
1669 1669 prefix, if not empty, is expected to be normalized with a / at the end.
1670 1670
1671 1671 Returns (stripped components, path in repository).
1672 1672
1673 1673 >>> pathtransform(b'a/b/c', 0, b'')
1674 1674 ('', 'a/b/c')
1675 1675 >>> pathtransform(b' a/b/c ', 0, b'')
1676 1676 ('', ' a/b/c')
1677 1677 >>> pathtransform(b' a/b/c ', 2, b'')
1678 1678 ('a/b/', 'c')
1679 1679 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1680 1680 ('', 'd/e/a/b/c')
1681 1681 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1682 1682 ('a//b/', 'd/e/c')
1683 1683 >>> pathtransform(b'a/b/c', 3, b'')
1684 1684 Traceback (most recent call last):
1685 1685 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1686 1686 '''
1687 1687 pathlen = len(path)
1688 1688 i = 0
1689 1689 if strip == 0:
1690 1690 return '', prefix + path.rstrip()
1691 1691 count = strip
1692 1692 while count > 0:
1693 1693 i = path.find('/', i)
1694 1694 if i == -1:
1695 1695 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1696 1696 (count, strip, path))
1697 1697 i += 1
1698 1698 # consume '//' in the path
1699 1699 while i < pathlen - 1 and path[i:i + 1] == '/':
1700 1700 i += 1
1701 1701 count -= 1
1702 1702 return path[:i].lstrip(), prefix + path[i:].rstrip()
1703 1703
1704 1704 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1705 1705 nulla = afile_orig == "/dev/null"
1706 1706 nullb = bfile_orig == "/dev/null"
1707 1707 create = nulla and hunk.starta == 0 and hunk.lena == 0
1708 1708 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1709 1709 abase, afile = pathtransform(afile_orig, strip, prefix)
1710 1710 gooda = not nulla and backend.exists(afile)
1711 1711 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1712 1712 if afile == bfile:
1713 1713 goodb = gooda
1714 1714 else:
1715 1715 goodb = not nullb and backend.exists(bfile)
1716 1716 missing = not goodb and not gooda and not create
1717 1717
1718 1718 # some diff programs apparently produce patches where the afile is
1719 1719 # not /dev/null, but afile starts with bfile
1720 1720 abasedir = afile[:afile.rfind('/') + 1]
1721 1721 bbasedir = bfile[:bfile.rfind('/') + 1]
1722 1722 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1723 1723 and hunk.starta == 0 and hunk.lena == 0):
1724 1724 create = True
1725 1725 missing = False
1726 1726
1727 1727 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1728 1728 # diff is between a file and its backup. In this case, the original
1729 1729 # file should be patched (see original mpatch code).
1730 1730 isbackup = (abase == bbase and bfile.startswith(afile))
1731 1731 fname = None
1732 1732 if not missing:
1733 1733 if gooda and goodb:
1734 1734 if isbackup:
1735 1735 fname = afile
1736 1736 else:
1737 1737 fname = bfile
1738 1738 elif gooda:
1739 1739 fname = afile
1740 1740
1741 1741 if not fname:
1742 1742 if not nullb:
1743 1743 if isbackup:
1744 1744 fname = afile
1745 1745 else:
1746 1746 fname = bfile
1747 1747 elif not nulla:
1748 1748 fname = afile
1749 1749 else:
1750 1750 raise PatchError(_("undefined source and destination files"))
1751 1751
1752 1752 gp = patchmeta(fname)
1753 1753 if create:
1754 1754 gp.op = 'ADD'
1755 1755 elif remove:
1756 1756 gp.op = 'DELETE'
1757 1757 return gp
1758 1758
1759 1759 def scanpatch(fp):
1760 1760 """like patch.iterhunks, but yield different events
1761 1761
1762 1762 - ('file', [header_lines + fromfile + tofile])
1763 1763 - ('context', [context_lines])
1764 1764 - ('hunk', [hunk_lines])
1765 1765 - ('range', (-start,len, +start,len, proc))
1766 1766 """
1767 1767 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1768 1768 lr = linereader(fp)
1769 1769
1770 1770 def scanwhile(first, p):
1771 1771 """scan lr while predicate holds"""
1772 1772 lines = [first]
1773 1773 for line in iter(lr.readline, ''):
1774 1774 if p(line):
1775 1775 lines.append(line)
1776 1776 else:
1777 1777 lr.push(line)
1778 1778 break
1779 1779 return lines
1780 1780
1781 1781 for line in iter(lr.readline, ''):
1782 1782 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1783 1783 def notheader(line):
1784 1784 s = line.split(None, 1)
1785 1785 return not s or s[0] not in ('---', 'diff')
1786 1786 header = scanwhile(line, notheader)
1787 1787 fromfile = lr.readline()
1788 1788 if fromfile.startswith('---'):
1789 1789 tofile = lr.readline()
1790 1790 header += [fromfile, tofile]
1791 1791 else:
1792 1792 lr.push(fromfile)
1793 1793 yield 'file', header
1794 1794 elif line[0:1] == ' ':
1795 1795 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1796 1796 elif line[0] in '-+':
1797 1797 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1798 1798 else:
1799 1799 m = lines_re.match(line)
1800 1800 if m:
1801 1801 yield 'range', m.groups()
1802 1802 else:
1803 1803 yield 'other', line
1804 1804
1805 1805 def scangitpatch(lr, firstline):
1806 1806 """
1807 1807 Git patches can emit:
1808 1808 - rename a to b
1809 1809 - change b
1810 1810 - copy a to c
1811 1811 - change c
1812 1812
1813 1813 We cannot apply this sequence as-is, the renamed 'a' could not be
1814 1814 found for it would have been renamed already. And we cannot copy
1815 1815 from 'b' instead because 'b' would have been changed already. So
1816 1816 we scan the git patch for copy and rename commands so we can
1817 1817 perform the copies ahead of time.
1818 1818 """
1819 1819 pos = 0
1820 1820 try:
1821 1821 pos = lr.fp.tell()
1822 1822 fp = lr.fp
1823 1823 except IOError:
1824 1824 fp = stringio(lr.fp.read())
1825 1825 gitlr = linereader(fp)
1826 1826 gitlr.push(firstline)
1827 1827 gitpatches = readgitpatch(gitlr)
1828 1828 fp.seek(pos)
1829 1829 return gitpatches
1830 1830
1831 1831 def iterhunks(fp):
1832 1832 """Read a patch and yield the following events:
1833 1833 - ("file", afile, bfile, firsthunk): select a new target file.
1834 1834 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1835 1835 "file" event.
1836 1836 - ("git", gitchanges): current diff is in git format, gitchanges
1837 1837 maps filenames to gitpatch records. Unique event.
1838 1838 """
1839 1839 afile = ""
1840 1840 bfile = ""
1841 1841 state = None
1842 1842 hunknum = 0
1843 1843 emitfile = newfile = False
1844 1844 gitpatches = None
1845 1845
1846 1846 # our states
1847 1847 BFILE = 1
1848 1848 context = None
1849 1849 lr = linereader(fp)
1850 1850
1851 1851 for x in iter(lr.readline, ''):
1852 1852 if state == BFILE and (
1853 1853 (not context and x[0] == '@')
1854 1854 or (context is not False and x.startswith('***************'))
1855 1855 or x.startswith('GIT binary patch')):
1856 1856 gp = None
1857 1857 if (gitpatches and
1858 1858 gitpatches[-1].ispatching(afile, bfile)):
1859 1859 gp = gitpatches.pop()
1860 1860 if x.startswith('GIT binary patch'):
1861 1861 h = binhunk(lr, gp.path)
1862 1862 else:
1863 1863 if context is None and x.startswith('***************'):
1864 1864 context = True
1865 1865 h = hunk(x, hunknum + 1, lr, context)
1866 1866 hunknum += 1
1867 1867 if emitfile:
1868 1868 emitfile = False
1869 1869 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1870 1870 yield 'hunk', h
1871 1871 elif x.startswith('diff --git a/'):
1872 1872 m = gitre.match(x.rstrip(' \r\n'))
1873 1873 if not m:
1874 1874 continue
1875 1875 if gitpatches is None:
1876 1876 # scan whole input for git metadata
1877 1877 gitpatches = scangitpatch(lr, x)
1878 1878 yield 'git', [g.copy() for g in gitpatches
1879 1879 if g.op in ('COPY', 'RENAME')]
1880 1880 gitpatches.reverse()
1881 1881 afile = 'a/' + m.group(1)
1882 1882 bfile = 'b/' + m.group(2)
1883 1883 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1884 1884 gp = gitpatches.pop()
1885 1885 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1886 1886 if not gitpatches:
1887 1887 raise PatchError(_('failed to synchronize metadata for "%s"')
1888 1888 % afile[2:])
1889 1889 gp = gitpatches[-1]
1890 1890 newfile = True
1891 1891 elif x.startswith('---'):
1892 1892 # check for a unified diff
1893 1893 l2 = lr.readline()
1894 1894 if not l2.startswith('+++'):
1895 1895 lr.push(l2)
1896 1896 continue
1897 1897 newfile = True
1898 1898 context = False
1899 1899 afile = parsefilename(x)
1900 1900 bfile = parsefilename(l2)
1901 1901 elif x.startswith('***'):
1902 1902 # check for a context diff
1903 1903 l2 = lr.readline()
1904 1904 if not l2.startswith('---'):
1905 1905 lr.push(l2)
1906 1906 continue
1907 1907 l3 = lr.readline()
1908 1908 lr.push(l3)
1909 1909 if not l3.startswith("***************"):
1910 1910 lr.push(l2)
1911 1911 continue
1912 1912 newfile = True
1913 1913 context = True
1914 1914 afile = parsefilename(x)
1915 1915 bfile = parsefilename(l2)
1916 1916
1917 1917 if newfile:
1918 1918 newfile = False
1919 1919 emitfile = True
1920 1920 state = BFILE
1921 1921 hunknum = 0
1922 1922
1923 1923 while gitpatches:
1924 1924 gp = gitpatches.pop()
1925 1925 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1926 1926
1927 1927 def applybindelta(binchunk, data):
1928 1928 """Apply a binary delta hunk
1929 1929 The algorithm used is the algorithm from git's patch-delta.c
1930 1930 """
1931 1931 def deltahead(binchunk):
1932 1932 i = 0
1933 1933 for c in binchunk:
1934 1934 i += 1
1935 1935 if not (ord(c) & 0x80):
1936 1936 return i
1937 1937 return i
1938 1938 out = ""
1939 1939 s = deltahead(binchunk)
1940 1940 binchunk = binchunk[s:]
1941 1941 s = deltahead(binchunk)
1942 1942 binchunk = binchunk[s:]
1943 1943 i = 0
1944 1944 while i < len(binchunk):
1945 1945 cmd = ord(binchunk[i])
1946 1946 i += 1
1947 1947 if (cmd & 0x80):
1948 1948 offset = 0
1949 1949 size = 0
1950 1950 if (cmd & 0x01):
1951 1951 offset = ord(binchunk[i])
1952 1952 i += 1
1953 1953 if (cmd & 0x02):
1954 1954 offset |= ord(binchunk[i]) << 8
1955 1955 i += 1
1956 1956 if (cmd & 0x04):
1957 1957 offset |= ord(binchunk[i]) << 16
1958 1958 i += 1
1959 1959 if (cmd & 0x08):
1960 1960 offset |= ord(binchunk[i]) << 24
1961 1961 i += 1
1962 1962 if (cmd & 0x10):
1963 1963 size = ord(binchunk[i])
1964 1964 i += 1
1965 1965 if (cmd & 0x20):
1966 1966 size |= ord(binchunk[i]) << 8
1967 1967 i += 1
1968 1968 if (cmd & 0x40):
1969 1969 size |= ord(binchunk[i]) << 16
1970 1970 i += 1
1971 1971 if size == 0:
1972 1972 size = 0x10000
1973 1973 offset_end = offset + size
1974 1974 out += data[offset:offset_end]
1975 1975 elif cmd != 0:
1976 1976 offset_end = i + cmd
1977 1977 out += binchunk[i:offset_end]
1978 1978 i += cmd
1979 1979 else:
1980 1980 raise PatchError(_('unexpected delta opcode 0'))
1981 1981 return out
1982 1982
1983 1983 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1984 1984 """Reads a patch from fp and tries to apply it.
1985 1985
1986 1986 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1987 1987 there was any fuzz.
1988 1988
1989 1989 If 'eolmode' is 'strict', the patch content and patched file are
1990 1990 read in binary mode. Otherwise, line endings are ignored when
1991 1991 patching then normalized according to 'eolmode'.
1992 1992 """
1993 1993 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1994 1994 prefix=prefix, eolmode=eolmode)
1995 1995
1996 1996 def _canonprefix(repo, prefix):
1997 1997 if prefix:
1998 1998 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
1999 1999 if prefix != '':
2000 2000 prefix += '/'
2001 2001 return prefix
2002 2002
2003 2003 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2004 2004 eolmode='strict'):
2005 2005 prefix = _canonprefix(backend.repo, prefix)
2006 2006 def pstrip(p):
2007 2007 return pathtransform(p, strip - 1, prefix)[1]
2008 2008
2009 2009 rejects = 0
2010 2010 err = 0
2011 2011 current_file = None
2012 2012
2013 2013 for state, values in iterhunks(fp):
2014 2014 if state == 'hunk':
2015 2015 if not current_file:
2016 2016 continue
2017 2017 ret = current_file.apply(values)
2018 2018 if ret > 0:
2019 2019 err = 1
2020 2020 elif state == 'file':
2021 2021 if current_file:
2022 2022 rejects += current_file.close()
2023 2023 current_file = None
2024 2024 afile, bfile, first_hunk, gp = values
2025 2025 if gp:
2026 2026 gp.path = pstrip(gp.path)
2027 2027 if gp.oldpath:
2028 2028 gp.oldpath = pstrip(gp.oldpath)
2029 2029 else:
2030 2030 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2031 2031 prefix)
2032 2032 if gp.op == 'RENAME':
2033 2033 backend.unlink(gp.oldpath)
2034 2034 if not first_hunk:
2035 2035 if gp.op == 'DELETE':
2036 2036 backend.unlink(gp.path)
2037 2037 continue
2038 2038 data, mode = None, None
2039 2039 if gp.op in ('RENAME', 'COPY'):
2040 2040 data, mode = store.getfile(gp.oldpath)[:2]
2041 2041 if data is None:
2042 2042 # This means that the old path does not exist
2043 2043 raise PatchError(_("source file '%s' does not exist")
2044 2044 % gp.oldpath)
2045 2045 if gp.mode:
2046 2046 mode = gp.mode
2047 2047 if gp.op == 'ADD':
2048 2048 # Added files without content have no hunk and
2049 2049 # must be created
2050 2050 data = ''
2051 2051 if data or mode:
2052 2052 if (gp.op in ('ADD', 'RENAME', 'COPY')
2053 2053 and backend.exists(gp.path)):
2054 2054 raise PatchError(_("cannot create %s: destination "
2055 2055 "already exists") % gp.path)
2056 2056 backend.setfile(gp.path, data, mode, gp.oldpath)
2057 2057 continue
2058 2058 try:
2059 2059 current_file = patcher(ui, gp, backend, store,
2060 2060 eolmode=eolmode)
2061 2061 except PatchError as inst:
2062 2062 ui.warn(str(inst) + '\n')
2063 2063 current_file = None
2064 2064 rejects += 1
2065 2065 continue
2066 2066 elif state == 'git':
2067 2067 for gp in values:
2068 2068 path = pstrip(gp.oldpath)
2069 2069 data, mode = backend.getfile(path)
2070 2070 if data is None:
2071 2071 # The error ignored here will trigger a getfile()
2072 2072 # error in a place more appropriate for error
2073 2073 # handling, and will not interrupt the patching
2074 2074 # process.
2075 2075 pass
2076 2076 else:
2077 2077 store.setfile(path, data, mode)
2078 2078 else:
2079 2079 raise error.Abort(_('unsupported parser state: %s') % state)
2080 2080
2081 2081 if current_file:
2082 2082 rejects += current_file.close()
2083 2083
2084 2084 if rejects:
2085 2085 return -1
2086 2086 return err
2087 2087
2088 2088 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2089 2089 similarity):
2090 2090 """use <patcher> to apply <patchname> to the working directory.
2091 2091 returns whether patch was applied with fuzz factor."""
2092 2092
2093 2093 fuzz = False
2094 2094 args = []
2095 2095 cwd = repo.root
2096 2096 if cwd:
2097 2097 args.append('-d %s' % util.shellquote(cwd))
2098 2098 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2099 2099 util.shellquote(patchname)))
2100 2100 try:
2101 2101 for line in util.iterfile(fp):
2102 2102 line = line.rstrip()
2103 2103 ui.note(line + '\n')
2104 2104 if line.startswith('patching file '):
2105 2105 pf = util.parsepatchoutput(line)
2106 2106 printed_file = False
2107 2107 files.add(pf)
2108 2108 elif line.find('with fuzz') >= 0:
2109 2109 fuzz = True
2110 2110 if not printed_file:
2111 2111 ui.warn(pf + '\n')
2112 2112 printed_file = True
2113 2113 ui.warn(line + '\n')
2114 2114 elif line.find('saving rejects to file') >= 0:
2115 2115 ui.warn(line + '\n')
2116 2116 elif line.find('FAILED') >= 0:
2117 2117 if not printed_file:
2118 2118 ui.warn(pf + '\n')
2119 2119 printed_file = True
2120 2120 ui.warn(line + '\n')
2121 2121 finally:
2122 2122 if files:
2123 2123 scmutil.marktouched(repo, files, similarity)
2124 2124 code = fp.close()
2125 2125 if code:
2126 2126 raise PatchError(_("patch command failed: %s") %
2127 2127 util.explainexit(code)[0])
2128 2128 return fuzz
2129 2129
2130 2130 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2131 2131 eolmode='strict'):
2132 2132 if files is None:
2133 2133 files = set()
2134 2134 if eolmode is None:
2135 2135 eolmode = ui.config('patch', 'eol')
2136 2136 if eolmode.lower() not in eolmodes:
2137 2137 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2138 2138 eolmode = eolmode.lower()
2139 2139
2140 2140 store = filestore()
2141 2141 try:
2142 2142 fp = open(patchobj, 'rb')
2143 2143 except TypeError:
2144 2144 fp = patchobj
2145 2145 try:
2146 2146 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2147 2147 eolmode=eolmode)
2148 2148 finally:
2149 2149 if fp != patchobj:
2150 2150 fp.close()
2151 2151 files.update(backend.close())
2152 2152 store.close()
2153 2153 if ret < 0:
2154 2154 raise PatchError(_('patch failed to apply'))
2155 2155 return ret > 0
2156 2156
2157 2157 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2158 2158 eolmode='strict', similarity=0):
2159 2159 """use builtin patch to apply <patchobj> to the working directory.
2160 2160 returns whether patch was applied with fuzz factor."""
2161 2161 backend = workingbackend(ui, repo, similarity)
2162 2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163 2163
2164 2164 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2165 2165 eolmode='strict'):
2166 2166 backend = repobackend(ui, repo, ctx, store)
2167 2167 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2168 2168
2169 2169 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2170 2170 similarity=0):
2171 2171 """Apply <patchname> to the working directory.
2172 2172
2173 2173 'eolmode' specifies how end of lines should be handled. It can be:
2174 2174 - 'strict': inputs are read in binary mode, EOLs are preserved
2175 2175 - 'crlf': EOLs are ignored when patching and reset to CRLF
2176 2176 - 'lf': EOLs are ignored when patching and reset to LF
2177 2177 - None: get it from user settings, default to 'strict'
2178 2178 'eolmode' is ignored when using an external patcher program.
2179 2179
2180 2180 Returns whether patch was applied with fuzz factor.
2181 2181 """
2182 2182 patcher = ui.config('ui', 'patch')
2183 2183 if files is None:
2184 2184 files = set()
2185 2185 if patcher:
2186 2186 return _externalpatch(ui, repo, patcher, patchname, strip,
2187 2187 files, similarity)
2188 2188 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2189 2189 similarity)
2190 2190
2191 2191 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2192 2192 backend = fsbackend(ui, repo.root)
2193 2193 prefix = _canonprefix(repo, prefix)
2194 2194 with open(patchpath, 'rb') as fp:
2195 2195 changed = set()
2196 2196 for state, values in iterhunks(fp):
2197 2197 if state == 'file':
2198 2198 afile, bfile, first_hunk, gp = values
2199 2199 if gp:
2200 2200 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2201 2201 if gp.oldpath:
2202 2202 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2203 2203 prefix)[1]
2204 2204 else:
2205 2205 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2206 2206 prefix)
2207 2207 changed.add(gp.path)
2208 2208 if gp.op == 'RENAME':
2209 2209 changed.add(gp.oldpath)
2210 2210 elif state not in ('hunk', 'git'):
2211 2211 raise error.Abort(_('unsupported parser state: %s') % state)
2212 2212 return changed
2213 2213
2214 2214 class GitDiffRequired(Exception):
2215 2215 pass
2216 2216
2217 2217 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2218 2218 '''return diffopts with all features supported and parsed'''
2219 2219 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2220 2220 git=True, whitespace=True, formatchanging=True)
2221 2221
2222 2222 diffopts = diffallopts
2223 2223
2224 2224 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2225 2225 whitespace=False, formatchanging=False):
2226 2226 '''return diffopts with only opted-in features parsed
2227 2227
2228 2228 Features:
2229 2229 - git: git-style diffs
2230 2230 - whitespace: whitespace options like ignoreblanklines and ignorews
2231 2231 - formatchanging: options that will likely break or cause correctness issues
2232 2232 with most diff parsers
2233 2233 '''
2234 2234 def get(key, name=None, getter=ui.configbool, forceplain=None):
2235 2235 if opts:
2236 2236 v = opts.get(key)
2237 2237 # diffopts flags are either None-default (which is passed
2238 2238 # through unchanged, so we can identify unset values), or
2239 2239 # some other falsey default (eg --unified, which defaults
2240 2240 # to an empty string). We only want to override the config
2241 2241 # entries from hgrc with command line values if they
2242 2242 # appear to have been set, which is any truthy value,
2243 2243 # True, or False.
2244 2244 if v or isinstance(v, bool):
2245 2245 return v
2246 2246 if forceplain is not None and ui.plain():
2247 2247 return forceplain
2248 2248 return getter(section, name or key, untrusted=untrusted)
2249 2249
2250 2250 # core options, expected to be understood by every diff parser
2251 2251 buildopts = {
2252 2252 'nodates': get('nodates'),
2253 2253 'showfunc': get('show_function', 'showfunc'),
2254 2254 'context': get('unified', getter=ui.config),
2255 2255 }
2256 2256 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2257 2257
2258 2258 if git:
2259 2259 buildopts['git'] = get('git')
2260 2260
2261 2261 # since this is in the experimental section, we need to call
2262 2262 # ui.configbool directory
2263 2263 buildopts['showsimilarity'] = ui.configbool('experimental',
2264 2264 'extendedheader.similarity')
2265 2265
2266 2266 # need to inspect the ui object instead of using get() since we want to
2267 2267 # test for an int
2268 2268 hconf = ui.config('experimental', 'extendedheader.index')
2269 2269 if hconf is not None:
2270 2270 hlen = None
2271 2271 try:
2272 2272 # the hash config could be an integer (for length of hash) or a
2273 2273 # word (e.g. short, full, none)
2274 2274 hlen = int(hconf)
2275 2275 if hlen < 0 or hlen > 40:
2276 2276 msg = _("invalid length for extendedheader.index: '%d'\n")
2277 2277 ui.warn(msg % hlen)
2278 2278 except ValueError:
2279 2279 # default value
2280 2280 if hconf == 'short' or hconf == '':
2281 2281 hlen = 12
2282 2282 elif hconf == 'full':
2283 2283 hlen = 40
2284 2284 elif hconf != 'none':
2285 2285 msg = _("invalid value for extendedheader.index: '%s'\n")
2286 2286 ui.warn(msg % hconf)
2287 2287 finally:
2288 2288 buildopts['index'] = hlen
2289 2289
2290 2290 if whitespace:
2291 2291 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2292 2292 buildopts['ignorewsamount'] = get('ignore_space_change',
2293 2293 'ignorewsamount')
2294 2294 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2295 2295 'ignoreblanklines')
2296 2296 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2297 2297 if formatchanging:
2298 2298 buildopts['text'] = opts and opts.get('text')
2299 2299 binary = None if opts is None else opts.get('binary')
2300 2300 buildopts['nobinary'] = (not binary if binary is not None
2301 2301 else get('nobinary', forceplain=False))
2302 2302 buildopts['noprefix'] = get('noprefix', forceplain=False)
2303 2303
2304 2304 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2305 2305
2306 2306 def diff(repo, node1=None, node2=None, match=None, changes=None,
2307 2307 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2308 2308 hunksfilterfn=None):
2309 2309 '''yields diff of changes to files between two nodes, or node and
2310 2310 working directory.
2311 2311
2312 2312 if node1 is None, use first dirstate parent instead.
2313 2313 if node2 is None, compare node1 with working directory.
2314 2314
2315 2315 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2316 2316 every time some change cannot be represented with the current
2317 2317 patch format. Return False to upgrade to git patch format, True to
2318 2318 accept the loss or raise an exception to abort the diff. It is
2319 2319 called with the name of current file being diffed as 'fn'. If set
2320 2320 to None, patches will always be upgraded to git format when
2321 2321 necessary.
2322 2322
2323 2323 prefix is a filename prefix that is prepended to all filenames on
2324 2324 display (used for subrepos).
2325 2325
2326 2326 relroot, if not empty, must be normalized with a trailing /. Any match
2327 2327 patterns that fall outside it will be ignored.
2328 2328
2329 2329 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2330 2330 information.
2331 2331
2332 2332 hunksfilterfn, if not None, should be a function taking a filectx and
2333 2333 hunks generator that may yield filtered hunks.
2334 2334 '''
2335 2335 for fctx1, fctx2, hdr, hunks in diffhunks(
2336 2336 repo, node1=node1, node2=node2,
2337 2337 match=match, changes=changes, opts=opts,
2338 2338 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2339 2339 ):
2340 2340 if hunksfilterfn is not None:
2341 2341 # If the file has been removed, fctx2 is None; but this should
2342 2342 # not occur here since we catch removed files early in
2343 2343 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2344 2344 assert fctx2 is not None, \
2345 2345 'fctx2 unexpectly None in diff hunks filtering'
2346 2346 hunks = hunksfilterfn(fctx2, hunks)
2347 2347 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2348 2348 if hdr and (text or len(hdr) > 1):
2349 2349 yield '\n'.join(hdr) + '\n'
2350 2350 if text:
2351 2351 yield text
2352 2352
2353 2353 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2354 2354 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2355 2355 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2356 2356 where `header` is a list of diff headers and `hunks` is an iterable of
2357 2357 (`hunkrange`, `hunklines`) tuples.
2358 2358
2359 2359 See diff() for the meaning of parameters.
2360 2360 """
2361 2361
2362 2362 if opts is None:
2363 2363 opts = mdiff.defaultopts
2364 2364
2365 2365 if not node1 and not node2:
2366 2366 node1 = repo.dirstate.p1()
2367 2367
2368 2368 def lrugetfilectx():
2369 2369 cache = {}
2370 2370 order = collections.deque()
2371 2371 def getfilectx(f, ctx):
2372 2372 fctx = ctx.filectx(f, filelog=cache.get(f))
2373 2373 if f not in cache:
2374 2374 if len(cache) > 20:
2375 2375 del cache[order.popleft()]
2376 2376 cache[f] = fctx.filelog()
2377 2377 else:
2378 2378 order.remove(f)
2379 2379 order.append(f)
2380 2380 return fctx
2381 2381 return getfilectx
2382 2382 getfilectx = lrugetfilectx()
2383 2383
2384 2384 ctx1 = repo[node1]
2385 2385 ctx2 = repo[node2]
2386 2386
2387 2387 relfiltered = False
2388 2388 if relroot != '' and match.always():
2389 2389 # as a special case, create a new matcher with just the relroot
2390 2390 pats = [relroot]
2391 2391 match = scmutil.match(ctx2, pats, default='path')
2392 2392 relfiltered = True
2393 2393
2394 2394 if not changes:
2395 2395 changes = repo.status(ctx1, ctx2, match=match)
2396 2396 modified, added, removed = changes[:3]
2397 2397
2398 2398 if not modified and not added and not removed:
2399 2399 return []
2400 2400
2401 2401 if repo.ui.debugflag:
2402 2402 hexfunc = hex
2403 2403 else:
2404 2404 hexfunc = short
2405 2405 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2406 2406
2407 2407 if copy is None:
2408 2408 copy = {}
2409 2409 if opts.git or opts.upgrade:
2410 2410 copy = copies.pathcopies(ctx1, ctx2, match=match)
2411 2411
2412 2412 if relroot is not None:
2413 2413 if not relfiltered:
2414 2414 # XXX this would ideally be done in the matcher, but that is
2415 2415 # generally meant to 'or' patterns, not 'and' them. In this case we
2416 2416 # need to 'and' all the patterns from the matcher with relroot.
2417 2417 def filterrel(l):
2418 2418 return [f for f in l if f.startswith(relroot)]
2419 2419 modified = filterrel(modified)
2420 2420 added = filterrel(added)
2421 2421 removed = filterrel(removed)
2422 2422 relfiltered = True
2423 2423 # filter out copies where either side isn't inside the relative root
2424 2424 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2425 2425 if dst.startswith(relroot)
2426 2426 and src.startswith(relroot)))
2427 2427
2428 2428 modifiedset = set(modified)
2429 2429 addedset = set(added)
2430 2430 removedset = set(removed)
2431 2431 for f in modified:
2432 2432 if f not in ctx1:
2433 2433 # Fix up added, since merged-in additions appear as
2434 2434 # modifications during merges
2435 2435 modifiedset.remove(f)
2436 2436 addedset.add(f)
2437 2437 for f in removed:
2438 2438 if f not in ctx1:
2439 2439 # Merged-in additions that are then removed are reported as removed.
2440 2440 # They are not in ctx1, so We don't want to show them in the diff.
2441 2441 removedset.remove(f)
2442 2442 modified = sorted(modifiedset)
2443 2443 added = sorted(addedset)
2444 2444 removed = sorted(removedset)
2445 2445 for dst, src in copy.items():
2446 2446 if src not in ctx1:
2447 2447 # Files merged in during a merge and then copied/renamed are
2448 2448 # reported as copies. We want to show them in the diff as additions.
2449 2449 del copy[dst]
2450 2450
2451 2451 def difffn(opts, losedata):
2452 2452 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2453 2453 copy, getfilectx, opts, losedata, prefix, relroot)
2454 2454 if opts.upgrade and not opts.git:
2455 2455 try:
2456 2456 def losedata(fn):
2457 2457 if not losedatafn or not losedatafn(fn=fn):
2458 2458 raise GitDiffRequired
2459 2459 # Buffer the whole output until we are sure it can be generated
2460 2460 return list(difffn(opts.copy(git=False), losedata))
2461 2461 except GitDiffRequired:
2462 2462 return difffn(opts.copy(git=True), None)
2463 2463 else:
2464 2464 return difffn(opts, None)
2465 2465
2466 2466 def difflabel(func, *args, **kw):
2467 2467 '''yields 2-tuples of (output, label) based on the output of func()'''
2468 2468 inlinecolor = False
2469 2469 if kw.get('opts'):
2470 2470 inlinecolor = kw['opts'].worddiff
2471 2471 headprefixes = [('diff', 'diff.diffline'),
2472 2472 ('copy', 'diff.extended'),
2473 2473 ('rename', 'diff.extended'),
2474 2474 ('old', 'diff.extended'),
2475 2475 ('new', 'diff.extended'),
2476 2476 ('deleted', 'diff.extended'),
2477 2477 ('index', 'diff.extended'),
2478 2478 ('similarity', 'diff.extended'),
2479 2479 ('---', 'diff.file_a'),
2480 2480 ('+++', 'diff.file_b')]
2481 2481 textprefixes = [('@', 'diff.hunk'),
2482 2482 ('-', 'diff.deleted'),
2483 2483 ('+', 'diff.inserted')]
2484 2484 head = False
2485 2485 for chunk in func(*args, **kw):
2486 2486 lines = chunk.split('\n')
2487 2487 matches = {}
2488 2488 if inlinecolor:
2489 2489 matches = _findmatches(lines)
2490 2490 for i, line in enumerate(lines):
2491 2491 if i != 0:
2492 2492 yield ('\n', '')
2493 2493 if head:
2494 2494 if line.startswith('@'):
2495 2495 head = False
2496 2496 else:
2497 2497 if line and line[0] not in ' +-@\\':
2498 2498 head = True
2499 2499 stripline = line
2500 2500 diffline = False
2501 2501 if not head and line and line[0] in '+-':
2502 2502 # highlight tabs and trailing whitespace, but only in
2503 2503 # changed lines
2504 2504 stripline = line.rstrip()
2505 2505 diffline = True
2506 2506
2507 2507 prefixes = textprefixes
2508 2508 if head:
2509 2509 prefixes = headprefixes
2510 2510 for prefix, label in prefixes:
2511 2511 if stripline.startswith(prefix):
2512 2512 if diffline:
2513 2513 for token in tabsplitter.findall(stripline):
2514 2514 if '\t' == token[0]:
2515 2515 yield (token, 'diff.tab')
2516 2516 else:
2517 2517 if i in matches:
2518 for l, t in _inlinediff(
2518 for t, l in _inlinediff(
2519 2519 lines[i].rstrip(),
2520 2520 lines[matches[i]].rstrip(),
2521 2521 label):
2522 2522 yield (t, l)
2523 2523 else:
2524 2524 yield (token, label)
2525 2525 else:
2526 2526 yield (stripline, label)
2527 2527 break
2528 2528 else:
2529 2529 yield (line, '')
2530 2530 if line != stripline:
2531 2531 yield (line[len(stripline):], 'diff.trailingwhitespace')
2532 2532
2533 2533 def _findmatches(slist):
2534 2534 '''Look for insertion matches to deletion and returns a dict of
2535 2535 correspondences.
2536 2536 '''
2537 2537 lastmatch = 0
2538 2538 matches = {}
2539 2539 for i, line in enumerate(slist):
2540 2540 if line == '':
2541 2541 continue
2542 2542 if line[0] == '-':
2543 2543 lastmatch = max(lastmatch, i)
2544 2544 newgroup = False
2545 2545 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 2546 if newline == '':
2547 2547 continue
2548 2548 if newline[0] == '-' and newgroup: # too far, no match
2549 2549 break
2550 2550 if newline[0] == '+': # potential match
2551 2551 newgroup = True
2552 2552 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 2553 if sim > 0.7:
2554 2554 lastmatch = lastmatch + 1 + j
2555 2555 matches[i] = lastmatch
2556 2556 matches[lastmatch] = i
2557 2557 break
2558 2558 return matches
2559 2559
2560 2560 def _inlinediff(s1, s2, operation):
2561 2561 '''Perform string diff to highlight specific changes.'''
2562 2562 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 2563 if operation == 'diff.deleted':
2564 2564 s2, s1 = s1, s2
2565 2565
2566 2566 buff = []
2567 2567 # we never want to higlight the leading +-
2568 2568 if operation == 'diff.deleted' and s2.startswith('-'):
2569 2569 label = operation
2570 2570 token = '-'
2571 2571 s2 = s2[1:]
2572 2572 s1 = s1[1:]
2573 2573 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 2574 label = operation
2575 2575 token = '+'
2576 2576 s2 = s2[1:]
2577 2577 s1 = s1[1:]
2578 2578
2579 2579 s = difflib.ndiff(re.split(br'(\W)', s2), re.split(br'(\W)', s1))
2580 2580 for part in s:
2581 2581 if part[0] in operation_skip:
2582 2582 continue
2583 2583 l = operation + '.highlight'
2584 2584 if part[0] in ' ':
2585 2585 l = operation
2586 2586 if l == label: # contiguous token with same label
2587 2587 token += part[2:]
2588 2588 continue
2589 2589 else:
2590 buff.append((label, token))
2590 buff.append((token, label))
2591 2591 label = l
2592 2592 token = part[2:]
2593 buff.append((label, token))
2593 buff.append((token, label))
2594 2594
2595 2595 return buff
2596 2596
2597 2597 def diffui(*args, **kw):
2598 2598 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2599 2599 return difflabel(diff, *args, **kw)
2600 2600
2601 2601 def _filepairs(modified, added, removed, copy, opts):
2602 2602 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2603 2603 before and f2 is the the name after. For added files, f1 will be None,
2604 2604 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2605 2605 or 'rename' (the latter two only if opts.git is set).'''
2606 2606 gone = set()
2607 2607
2608 2608 copyto = dict([(v, k) for k, v in copy.items()])
2609 2609
2610 2610 addedset, removedset = set(added), set(removed)
2611 2611
2612 2612 for f in sorted(modified + added + removed):
2613 2613 copyop = None
2614 2614 f1, f2 = f, f
2615 2615 if f in addedset:
2616 2616 f1 = None
2617 2617 if f in copy:
2618 2618 if opts.git:
2619 2619 f1 = copy[f]
2620 2620 if f1 in removedset and f1 not in gone:
2621 2621 copyop = 'rename'
2622 2622 gone.add(f1)
2623 2623 else:
2624 2624 copyop = 'copy'
2625 2625 elif f in removedset:
2626 2626 f2 = None
2627 2627 if opts.git:
2628 2628 # have we already reported a copy above?
2629 2629 if (f in copyto and copyto[f] in addedset
2630 2630 and copy[copyto[f]] == f):
2631 2631 continue
2632 2632 yield f1, f2, copyop
2633 2633
2634 2634 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2635 2635 copy, getfilectx, opts, losedatafn, prefix, relroot):
2636 2636 '''given input data, generate a diff and yield it in blocks
2637 2637
2638 2638 If generating a diff would lose data like flags or binary data and
2639 2639 losedatafn is not None, it will be called.
2640 2640
2641 2641 relroot is removed and prefix is added to every path in the diff output.
2642 2642
2643 2643 If relroot is not empty, this function expects every path in modified,
2644 2644 added, removed and copy to start with it.'''
2645 2645
2646 2646 def gitindex(text):
2647 2647 if not text:
2648 2648 text = ""
2649 2649 l = len(text)
2650 2650 s = hashlib.sha1('blob %d\0' % l)
2651 2651 s.update(text)
2652 2652 return s.hexdigest()
2653 2653
2654 2654 if opts.noprefix:
2655 2655 aprefix = bprefix = ''
2656 2656 else:
2657 2657 aprefix = 'a/'
2658 2658 bprefix = 'b/'
2659 2659
2660 2660 def diffline(f, revs):
2661 2661 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2662 2662 return 'diff %s %s' % (revinfo, f)
2663 2663
2664 2664 def isempty(fctx):
2665 2665 return fctx is None or fctx.size() == 0
2666 2666
2667 2667 date1 = util.datestr(ctx1.date())
2668 2668 date2 = util.datestr(ctx2.date())
2669 2669
2670 2670 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2671 2671
2672 2672 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2673 2673 or repo.ui.configbool('devel', 'check-relroot')):
2674 2674 for f in modified + added + removed + list(copy) + list(copy.values()):
2675 2675 if f is not None and not f.startswith(relroot):
2676 2676 raise AssertionError(
2677 2677 "file %s doesn't start with relroot %s" % (f, relroot))
2678 2678
2679 2679 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2680 2680 content1 = None
2681 2681 content2 = None
2682 2682 fctx1 = None
2683 2683 fctx2 = None
2684 2684 flag1 = None
2685 2685 flag2 = None
2686 2686 if f1:
2687 2687 fctx1 = getfilectx(f1, ctx1)
2688 2688 if opts.git or losedatafn:
2689 2689 flag1 = ctx1.flags(f1)
2690 2690 if f2:
2691 2691 fctx2 = getfilectx(f2, ctx2)
2692 2692 if opts.git or losedatafn:
2693 2693 flag2 = ctx2.flags(f2)
2694 2694 # if binary is True, output "summary" or "base85", but not "text diff"
2695 2695 binary = not opts.text and any(f.isbinary()
2696 2696 for f in [fctx1, fctx2] if f is not None)
2697 2697
2698 2698 if losedatafn and not opts.git:
2699 2699 if (binary or
2700 2700 # copy/rename
2701 2701 f2 in copy or
2702 2702 # empty file creation
2703 2703 (not f1 and isempty(fctx2)) or
2704 2704 # empty file deletion
2705 2705 (isempty(fctx1) and not f2) or
2706 2706 # create with flags
2707 2707 (not f1 and flag2) or
2708 2708 # change flags
2709 2709 (f1 and f2 and flag1 != flag2)):
2710 2710 losedatafn(f2 or f1)
2711 2711
2712 2712 path1 = f1 or f2
2713 2713 path2 = f2 or f1
2714 2714 path1 = posixpath.join(prefix, path1[len(relroot):])
2715 2715 path2 = posixpath.join(prefix, path2[len(relroot):])
2716 2716 header = []
2717 2717 if opts.git:
2718 2718 header.append('diff --git %s%s %s%s' %
2719 2719 (aprefix, path1, bprefix, path2))
2720 2720 if not f1: # added
2721 2721 header.append('new file mode %s' % gitmode[flag2])
2722 2722 elif not f2: # removed
2723 2723 header.append('deleted file mode %s' % gitmode[flag1])
2724 2724 else: # modified/copied/renamed
2725 2725 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2726 2726 if mode1 != mode2:
2727 2727 header.append('old mode %s' % mode1)
2728 2728 header.append('new mode %s' % mode2)
2729 2729 if copyop is not None:
2730 2730 if opts.showsimilarity:
2731 2731 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2732 2732 header.append('similarity index %d%%' % sim)
2733 2733 header.append('%s from %s' % (copyop, path1))
2734 2734 header.append('%s to %s' % (copyop, path2))
2735 2735 elif revs and not repo.ui.quiet:
2736 2736 header.append(diffline(path1, revs))
2737 2737
2738 2738 # fctx.is | diffopts | what to | is fctx.data()
2739 2739 # binary() | text nobinary git index | output? | outputted?
2740 2740 # ------------------------------------|----------------------------
2741 2741 # yes | no no no * | summary | no
2742 2742 # yes | no no yes * | base85 | yes
2743 2743 # yes | no yes no * | summary | no
2744 2744 # yes | no yes yes 0 | summary | no
2745 2745 # yes | no yes yes >0 | summary | semi [1]
2746 2746 # yes | yes * * * | text diff | yes
2747 2747 # no | * * * * | text diff | yes
2748 2748 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2749 2749 if binary and (not opts.git or (opts.git and opts.nobinary and not
2750 2750 opts.index)):
2751 2751 # fast path: no binary content will be displayed, content1 and
2752 2752 # content2 are only used for equivalent test. cmp() could have a
2753 2753 # fast path.
2754 2754 if fctx1 is not None:
2755 2755 content1 = b'\0'
2756 2756 if fctx2 is not None:
2757 2757 if fctx1 is not None and not fctx1.cmp(fctx2):
2758 2758 content2 = b'\0' # not different
2759 2759 else:
2760 2760 content2 = b'\0\0'
2761 2761 else:
2762 2762 # normal path: load contents
2763 2763 if fctx1 is not None:
2764 2764 content1 = fctx1.data()
2765 2765 if fctx2 is not None:
2766 2766 content2 = fctx2.data()
2767 2767
2768 2768 if binary and opts.git and not opts.nobinary:
2769 2769 text = mdiff.b85diff(content1, content2)
2770 2770 if text:
2771 2771 header.append('index %s..%s' %
2772 2772 (gitindex(content1), gitindex(content2)))
2773 2773 hunks = (None, [text]),
2774 2774 else:
2775 2775 if opts.git and opts.index > 0:
2776 2776 flag = flag1
2777 2777 if flag is None:
2778 2778 flag = flag2
2779 2779 header.append('index %s..%s %s' %
2780 2780 (gitindex(content1)[0:opts.index],
2781 2781 gitindex(content2)[0:opts.index],
2782 2782 gitmode[flag]))
2783 2783
2784 2784 uheaders, hunks = mdiff.unidiff(content1, date1,
2785 2785 content2, date2,
2786 2786 path1, path2, opts=opts)
2787 2787 header.extend(uheaders)
2788 2788 yield fctx1, fctx2, header, hunks
2789 2789
2790 2790 def diffstatsum(stats):
2791 2791 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2792 2792 for f, a, r, b in stats:
2793 2793 maxfile = max(maxfile, encoding.colwidth(f))
2794 2794 maxtotal = max(maxtotal, a + r)
2795 2795 addtotal += a
2796 2796 removetotal += r
2797 2797 binary = binary or b
2798 2798
2799 2799 return maxfile, maxtotal, addtotal, removetotal, binary
2800 2800
2801 2801 def diffstatdata(lines):
2802 2802 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2803 2803
2804 2804 results = []
2805 2805 filename, adds, removes, isbinary = None, 0, 0, False
2806 2806
2807 2807 def addresult():
2808 2808 if filename:
2809 2809 results.append((filename, adds, removes, isbinary))
2810 2810
2811 2811 # inheader is used to track if a line is in the
2812 2812 # header portion of the diff. This helps properly account
2813 2813 # for lines that start with '--' or '++'
2814 2814 inheader = False
2815 2815
2816 2816 for line in lines:
2817 2817 if line.startswith('diff'):
2818 2818 addresult()
2819 2819 # starting a new file diff
2820 2820 # set numbers to 0 and reset inheader
2821 2821 inheader = True
2822 2822 adds, removes, isbinary = 0, 0, False
2823 2823 if line.startswith('diff --git a/'):
2824 2824 filename = gitre.search(line).group(2)
2825 2825 elif line.startswith('diff -r'):
2826 2826 # format: "diff -r ... -r ... filename"
2827 2827 filename = diffre.search(line).group(1)
2828 2828 elif line.startswith('@@'):
2829 2829 inheader = False
2830 2830 elif line.startswith('+') and not inheader:
2831 2831 adds += 1
2832 2832 elif line.startswith('-') and not inheader:
2833 2833 removes += 1
2834 2834 elif (line.startswith('GIT binary patch') or
2835 2835 line.startswith('Binary file')):
2836 2836 isbinary = True
2837 2837 addresult()
2838 2838 return results
2839 2839
2840 2840 def diffstat(lines, width=80):
2841 2841 output = []
2842 2842 stats = diffstatdata(lines)
2843 2843 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2844 2844
2845 2845 countwidth = len(str(maxtotal))
2846 2846 if hasbinary and countwidth < 3:
2847 2847 countwidth = 3
2848 2848 graphwidth = width - countwidth - maxname - 6
2849 2849 if graphwidth < 10:
2850 2850 graphwidth = 10
2851 2851
2852 2852 def scale(i):
2853 2853 if maxtotal <= graphwidth:
2854 2854 return i
2855 2855 # If diffstat runs out of room it doesn't print anything,
2856 2856 # which isn't very useful, so always print at least one + or -
2857 2857 # if there were at least some changes.
2858 2858 return max(i * graphwidth // maxtotal, int(bool(i)))
2859 2859
2860 2860 for filename, adds, removes, isbinary in stats:
2861 2861 if isbinary:
2862 2862 count = 'Bin'
2863 2863 else:
2864 2864 count = '%d' % (adds + removes)
2865 2865 pluses = '+' * scale(adds)
2866 2866 minuses = '-' * scale(removes)
2867 2867 output.append(' %s%s | %*s %s%s\n' %
2868 2868 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2869 2869 countwidth, count, pluses, minuses))
2870 2870
2871 2871 if stats:
2872 2872 output.append(_(' %d files changed, %d insertions(+), '
2873 2873 '%d deletions(-)\n')
2874 2874 % (len(stats), totaladds, totalremoves))
2875 2875
2876 2876 return ''.join(output)
2877 2877
2878 2878 def diffstatui(*args, **kw):
2879 2879 '''like diffstat(), but yields 2-tuples of (output, label) for
2880 2880 ui.write()
2881 2881 '''
2882 2882
2883 2883 for line in diffstat(*args, **kw).splitlines():
2884 2884 if line and line[-1] in '+-':
2885 2885 name, graph = line.rsplit(' ', 1)
2886 2886 yield (name + ' ', '')
2887 2887 m = re.search(br'\++', graph)
2888 2888 if m:
2889 2889 yield (m.group(0), 'diffstat.inserted')
2890 2890 m = re.search(br'-+', graph)
2891 2891 if m:
2892 2892 yield (m.group(0), 'diffstat.deleted')
2893 2893 else:
2894 2894 yield (line, '')
2895 2895 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now