##// END OF EJS Templates
patch: fix a str + bytes issue in an exception handler...
Matt Harbison -
r44127:eab0b738 stable
parent child Browse files
Show More
@@ -1,3217 +1,3217 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import contextlib
13 13 import copy
14 14 import errno
15 15 import hashlib
16 16 import os
17 17 import re
18 18 import shutil
19 19 import zlib
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 hex,
24 24 short,
25 25 )
26 26 from .pycompat import open
27 27 from . import (
28 28 copies,
29 29 diffhelper,
30 30 diffutil,
31 31 encoding,
32 32 error,
33 33 mail,
34 34 mdiff,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 similar,
39 39 util,
40 40 vfs as vfsmod,
41 41 )
42 42 from .utils import (
43 43 dateutil,
44 44 procutil,
45 45 stringutil,
46 46 )
47 47
48 48 stringio = util.stringio
49 49
50 50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 52 wordsplitter = re.compile(
53 53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 54 )
55 55
56 56 PatchError = error.PatchError
57 57
58 58 # public functions
59 59
60 60
61 61 def split(stream):
62 62 '''return an iterator of individual patches from a stream'''
63 63
64 64 def isheader(line, inheader):
65 65 if inheader and line.startswith((b' ', b'\t')):
66 66 # continuation
67 67 return True
68 68 if line.startswith((b' ', b'-', b'+')):
69 69 # diff line - don't check for header pattern in there
70 70 return False
71 71 l = line.split(b': ', 1)
72 72 return len(l) == 2 and b' ' not in l[0]
73 73
74 74 def chunk(lines):
75 75 return stringio(b''.join(lines))
76 76
77 77 def hgsplit(stream, cur):
78 78 inheader = True
79 79
80 80 for line in stream:
81 81 if not line.strip():
82 82 inheader = False
83 83 if not inheader and line.startswith(b'# HG changeset patch'):
84 84 yield chunk(cur)
85 85 cur = []
86 86 inheader = True
87 87
88 88 cur.append(line)
89 89
90 90 if cur:
91 91 yield chunk(cur)
92 92
93 93 def mboxsplit(stream, cur):
94 94 for line in stream:
95 95 if line.startswith(b'From '):
96 96 for c in split(chunk(cur[1:])):
97 97 yield c
98 98 cur = []
99 99
100 100 cur.append(line)
101 101
102 102 if cur:
103 103 for c in split(chunk(cur[1:])):
104 104 yield c
105 105
106 106 def mimesplit(stream, cur):
107 107 def msgfp(m):
108 108 fp = stringio()
109 109 g = mail.Generator(fp, mangle_from_=False)
110 110 g.flatten(m)
111 111 fp.seek(0)
112 112 return fp
113 113
114 114 for line in stream:
115 115 cur.append(line)
116 116 c = chunk(cur)
117 117
118 118 m = mail.parse(c)
119 119 if not m.is_multipart():
120 120 yield msgfp(m)
121 121 else:
122 122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 123 for part in m.walk():
124 124 ct = part.get_content_type()
125 125 if ct not in ok_types:
126 126 continue
127 127 yield msgfp(part)
128 128
129 129 def headersplit(stream, cur):
130 130 inheader = False
131 131
132 132 for line in stream:
133 133 if not inheader and isheader(line, inheader):
134 134 yield chunk(cur)
135 135 cur = []
136 136 inheader = True
137 137 if inheader and not isheader(line, inheader):
138 138 inheader = False
139 139
140 140 cur.append(line)
141 141
142 142 if cur:
143 143 yield chunk(cur)
144 144
145 145 def remainder(cur):
146 146 yield chunk(cur)
147 147
148 148 class fiter(object):
149 149 def __init__(self, fp):
150 150 self.fp = fp
151 151
152 152 def __iter__(self):
153 153 return self
154 154
155 155 def next(self):
156 156 l = self.fp.readline()
157 157 if not l:
158 158 raise StopIteration
159 159 return l
160 160
161 161 __next__ = next
162 162
163 163 inheader = False
164 164 cur = []
165 165
166 166 mimeheaders = [b'content-type']
167 167
168 168 if not util.safehasattr(stream, b'next'):
169 169 # http responses, for example, have readline but not next
170 170 stream = fiter(stream)
171 171
172 172 for line in stream:
173 173 cur.append(line)
174 174 if line.startswith(b'# HG changeset patch'):
175 175 return hgsplit(stream, cur)
176 176 elif line.startswith(b'From '):
177 177 return mboxsplit(stream, cur)
178 178 elif isheader(line, inheader):
179 179 inheader = True
180 180 if line.split(b':', 1)[0].lower() in mimeheaders:
181 181 # let email parser handle this
182 182 return mimesplit(stream, cur)
183 183 elif line.startswith(b'--- ') and inheader:
184 184 # No evil headers seen by diff start, split by hand
185 185 return headersplit(stream, cur)
186 186 # Not enough info, keep reading
187 187
188 188 # if we are here, we have a very plain patch
189 189 return remainder(cur)
190 190
191 191
192 192 ## Some facility for extensible patch parsing:
193 193 # list of pairs ("header to match", "data key")
194 194 patchheadermap = [
195 195 (b'Date', b'date'),
196 196 (b'Branch', b'branch'),
197 197 (b'Node ID', b'nodeid'),
198 198 ]
199 199
200 200
201 201 @contextlib.contextmanager
202 202 def extract(ui, fileobj):
203 203 '''extract patch from data read from fileobj.
204 204
205 205 patch can be a normal patch or contained in an email message.
206 206
207 207 return a dictionary. Standard keys are:
208 208 - filename,
209 209 - message,
210 210 - user,
211 211 - date,
212 212 - branch,
213 213 - node,
214 214 - p1,
215 215 - p2.
216 216 Any item can be missing from the dictionary. If filename is missing,
217 217 fileobj did not contain a patch. Caller must unlink filename when done.'''
218 218
219 219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 220 tmpfp = os.fdopen(fd, r'wb')
221 221 try:
222 222 yield _extract(ui, fileobj, tmpname, tmpfp)
223 223 finally:
224 224 tmpfp.close()
225 225 os.unlink(tmpname)
226 226
227 227
228 228 def _extract(ui, fileobj, tmpname, tmpfp):
229 229
230 230 # attempt to detect the start of a patch
231 231 # (this heuristic is borrowed from quilt)
232 232 diffre = re.compile(
233 233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 235 br'---[ \t].*?^\+\+\+[ \t]|'
236 236 br'\*\*\*[ \t].*?^---[ \t])',
237 237 re.MULTILINE | re.DOTALL,
238 238 )
239 239
240 240 data = {}
241 241
242 242 msg = mail.parse(fileobj)
243 243
244 244 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
245 245 data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
246 246 if not subject and not data[b'user']:
247 247 # Not an email, restore parsed headers if any
248 248 subject = (
249 249 b'\n'.join(
250 250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 251 )
252 252 + b'\n'
253 253 )
254 254
255 255 # should try to parse msg['Date']
256 256 parents = []
257 257
258 258 nodeid = msg[r'X-Mercurial-Node']
259 259 if nodeid:
260 260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 261 ui.debug(b'Node ID: %s\n' % nodeid)
262 262
263 263 if subject:
264 264 if subject.startswith(b'[PATCH'):
265 265 pend = subject.find(b']')
266 266 if pend >= 0:
267 267 subject = subject[pend + 1 :].lstrip()
268 268 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 269 ui.debug(b'Subject: %s\n' % subject)
270 270 if data[b'user']:
271 271 ui.debug(b'From: %s\n' % data[b'user'])
272 272 diffs_seen = 0
273 273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 274 message = b''
275 275 for part in msg.walk():
276 276 content_type = pycompat.bytestr(part.get_content_type())
277 277 ui.debug(b'Content-Type: %s\n' % content_type)
278 278 if content_type not in ok_types:
279 279 continue
280 280 payload = part.get_payload(decode=True)
281 281 m = diffre.search(payload)
282 282 if m:
283 283 hgpatch = False
284 284 hgpatchheader = False
285 285 ignoretext = False
286 286
287 287 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 288 diffs_seen += 1
289 289 cfp = stringio()
290 290 for line in payload[: m.start(0)].splitlines():
291 291 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 292 ui.debug(b'patch generated by hg export\n')
293 293 hgpatch = True
294 294 hgpatchheader = True
295 295 # drop earlier commit message content
296 296 cfp.seek(0)
297 297 cfp.truncate()
298 298 subject = None
299 299 elif hgpatchheader:
300 300 if line.startswith(b'# User '):
301 301 data[b'user'] = line[7:]
302 302 ui.debug(b'From: %s\n' % data[b'user'])
303 303 elif line.startswith(b"# Parent "):
304 304 parents.append(line[9:].lstrip())
305 305 elif line.startswith(b"# "):
306 306 for header, key in patchheadermap:
307 307 prefix = b'# %s ' % header
308 308 if line.startswith(prefix):
309 309 data[key] = line[len(prefix) :]
310 310 ui.debug(b'%s: %s\n' % (header, data[key]))
311 311 else:
312 312 hgpatchheader = False
313 313 elif line == b'---':
314 314 ignoretext = True
315 315 if not hgpatchheader and not ignoretext:
316 316 cfp.write(line)
317 317 cfp.write(b'\n')
318 318 message = cfp.getvalue()
319 319 if tmpfp:
320 320 tmpfp.write(payload)
321 321 if not payload.endswith(b'\n'):
322 322 tmpfp.write(b'\n')
323 323 elif not diffs_seen and message and content_type == b'text/plain':
324 324 message += b'\n' + payload
325 325
326 326 if subject and not message.startswith(subject):
327 327 message = b'%s\n%s' % (subject, message)
328 328 data[b'message'] = message
329 329 tmpfp.close()
330 330 if parents:
331 331 data[b'p1'] = parents.pop(0)
332 332 if parents:
333 333 data[b'p2'] = parents.pop(0)
334 334
335 335 if diffs_seen:
336 336 data[b'filename'] = tmpname
337 337
338 338 return data
339 339
340 340
341 341 class patchmeta(object):
342 342 """Patched file metadata
343 343
344 344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 345 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 346 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 347 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 348 'islink' is True if the file is a symlink and 'isexec' is True if
349 349 the file is executable. Otherwise, 'mode' is None.
350 350 """
351 351
352 352 def __init__(self, path):
353 353 self.path = path
354 354 self.oldpath = None
355 355 self.mode = None
356 356 self.op = b'MODIFY'
357 357 self.binary = False
358 358
359 359 def setmode(self, mode):
360 360 islink = mode & 0o20000
361 361 isexec = mode & 0o100
362 362 self.mode = (islink, isexec)
363 363
364 364 def copy(self):
365 365 other = patchmeta(self.path)
366 366 other.oldpath = self.oldpath
367 367 other.mode = self.mode
368 368 other.op = self.op
369 369 other.binary = self.binary
370 370 return other
371 371
372 372 def _ispatchinga(self, afile):
373 373 if afile == b'/dev/null':
374 374 return self.op == b'ADD'
375 375 return afile == b'a/' + (self.oldpath or self.path)
376 376
377 377 def _ispatchingb(self, bfile):
378 378 if bfile == b'/dev/null':
379 379 return self.op == b'DELETE'
380 380 return bfile == b'b/' + self.path
381 381
382 382 def ispatching(self, afile, bfile):
383 383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384 384
385 385 def __repr__(self):
386 386 return r"<patchmeta %s %r>" % (self.op, self.path)
387 387
388 388
389 389 def readgitpatch(lr):
390 390 """extract git-style metadata about patches from <patchname>"""
391 391
392 392 # Filter patch for git information
393 393 gp = None
394 394 gitpatches = []
395 395 for line in lr:
396 396 line = line.rstrip(b' \r\n')
397 397 if line.startswith(b'diff --git a/'):
398 398 m = gitre.match(line)
399 399 if m:
400 400 if gp:
401 401 gitpatches.append(gp)
402 402 dst = m.group(2)
403 403 gp = patchmeta(dst)
404 404 elif gp:
405 405 if line.startswith(b'--- '):
406 406 gitpatches.append(gp)
407 407 gp = None
408 408 continue
409 409 if line.startswith(b'rename from '):
410 410 gp.op = b'RENAME'
411 411 gp.oldpath = line[12:]
412 412 elif line.startswith(b'rename to '):
413 413 gp.path = line[10:]
414 414 elif line.startswith(b'copy from '):
415 415 gp.op = b'COPY'
416 416 gp.oldpath = line[10:]
417 417 elif line.startswith(b'copy to '):
418 418 gp.path = line[8:]
419 419 elif line.startswith(b'deleted file'):
420 420 gp.op = b'DELETE'
421 421 elif line.startswith(b'new file mode '):
422 422 gp.op = b'ADD'
423 423 gp.setmode(int(line[-6:], 8))
424 424 elif line.startswith(b'new mode '):
425 425 gp.setmode(int(line[-6:], 8))
426 426 elif line.startswith(b'GIT binary patch'):
427 427 gp.binary = True
428 428 if gp:
429 429 gitpatches.append(gp)
430 430
431 431 return gitpatches
432 432
433 433
434 434 class linereader(object):
435 435 # simple class to allow pushing lines back into the input stream
436 436 def __init__(self, fp):
437 437 self.fp = fp
438 438 self.buf = []
439 439
440 440 def push(self, line):
441 441 if line is not None:
442 442 self.buf.append(line)
443 443
444 444 def readline(self):
445 445 if self.buf:
446 446 l = self.buf[0]
447 447 del self.buf[0]
448 448 return l
449 449 return self.fp.readline()
450 450
451 451 def __iter__(self):
452 452 return iter(self.readline, b'')
453 453
454 454
455 455 class abstractbackend(object):
456 456 def __init__(self, ui):
457 457 self.ui = ui
458 458
459 459 def getfile(self, fname):
460 460 """Return target file data and flags as a (data, (islink,
461 461 isexec)) tuple. Data is None if file is missing/deleted.
462 462 """
463 463 raise NotImplementedError
464 464
465 465 def setfile(self, fname, data, mode, copysource):
466 466 """Write data to target file fname and set its mode. mode is a
467 467 (islink, isexec) tuple. If data is None, the file content should
468 468 be left unchanged. If the file is modified after being copied,
469 469 copysource is set to the original file name.
470 470 """
471 471 raise NotImplementedError
472 472
473 473 def unlink(self, fname):
474 474 """Unlink target file."""
475 475 raise NotImplementedError
476 476
477 477 def writerej(self, fname, failed, total, lines):
478 478 """Write rejected lines for fname. total is the number of hunks
479 479 which failed to apply and total the total number of hunks for this
480 480 files.
481 481 """
482 482
483 483 def exists(self, fname):
484 484 raise NotImplementedError
485 485
486 486 def close(self):
487 487 raise NotImplementedError
488 488
489 489
490 490 class fsbackend(abstractbackend):
491 491 def __init__(self, ui, basedir):
492 492 super(fsbackend, self).__init__(ui)
493 493 self.opener = vfsmod.vfs(basedir)
494 494
495 495 def getfile(self, fname):
496 496 if self.opener.islink(fname):
497 497 return (self.opener.readlink(fname), (True, False))
498 498
499 499 isexec = False
500 500 try:
501 501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 502 except OSError as e:
503 503 if e.errno != errno.ENOENT:
504 504 raise
505 505 try:
506 506 return (self.opener.read(fname), (False, isexec))
507 507 except IOError as e:
508 508 if e.errno != errno.ENOENT:
509 509 raise
510 510 return None, None
511 511
512 512 def setfile(self, fname, data, mode, copysource):
513 513 islink, isexec = mode
514 514 if data is None:
515 515 self.opener.setflags(fname, islink, isexec)
516 516 return
517 517 if islink:
518 518 self.opener.symlink(data, fname)
519 519 else:
520 520 self.opener.write(fname, data)
521 521 if isexec:
522 522 self.opener.setflags(fname, False, True)
523 523
524 524 def unlink(self, fname):
525 525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527 527
528 528 def writerej(self, fname, failed, total, lines):
529 529 fname = fname + b".rej"
530 530 self.ui.warn(
531 531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 532 % (failed, total, fname)
533 533 )
534 534 fp = self.opener(fname, b'w')
535 535 fp.writelines(lines)
536 536 fp.close()
537 537
538 538 def exists(self, fname):
539 539 return self.opener.lexists(fname)
540 540
541 541
542 542 class workingbackend(fsbackend):
543 543 def __init__(self, ui, repo, similarity):
544 544 super(workingbackend, self).__init__(ui, repo.root)
545 545 self.repo = repo
546 546 self.similarity = similarity
547 547 self.removed = set()
548 548 self.changed = set()
549 549 self.copied = []
550 550
551 551 def _checkknown(self, fname):
552 552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554 554
555 555 def setfile(self, fname, data, mode, copysource):
556 556 self._checkknown(fname)
557 557 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 558 if copysource is not None:
559 559 self.copied.append((copysource, fname))
560 560 self.changed.add(fname)
561 561
562 562 def unlink(self, fname):
563 563 self._checkknown(fname)
564 564 super(workingbackend, self).unlink(fname)
565 565 self.removed.add(fname)
566 566 self.changed.add(fname)
567 567
568 568 def close(self):
569 569 wctx = self.repo[None]
570 570 changed = set(self.changed)
571 571 for src, dst in self.copied:
572 572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 573 if self.removed:
574 574 wctx.forget(sorted(self.removed))
575 575 for f in self.removed:
576 576 if f not in self.repo.dirstate:
577 577 # File was deleted and no longer belongs to the
578 578 # dirstate, it was probably marked added then
579 579 # deleted, and should not be considered by
580 580 # marktouched().
581 581 changed.discard(f)
582 582 if changed:
583 583 scmutil.marktouched(self.repo, changed, self.similarity)
584 584 return sorted(self.changed)
585 585
586 586
587 587 class filestore(object):
588 588 def __init__(self, maxsize=None):
589 589 self.opener = None
590 590 self.files = {}
591 591 self.created = 0
592 592 self.maxsize = maxsize
593 593 if self.maxsize is None:
594 594 self.maxsize = 4 * (2 ** 20)
595 595 self.size = 0
596 596 self.data = {}
597 597
598 598 def setfile(self, fname, data, mode, copied=None):
599 599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 600 self.data[fname] = (data, mode, copied)
601 601 self.size += len(data)
602 602 else:
603 603 if self.opener is None:
604 604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 605 self.opener = vfsmod.vfs(root)
606 606 # Avoid filename issues with these simple names
607 607 fn = b'%d' % self.created
608 608 self.opener.write(fn, data)
609 609 self.created += 1
610 610 self.files[fname] = (fn, mode, copied)
611 611
612 612 def getfile(self, fname):
613 613 if fname in self.data:
614 614 return self.data[fname]
615 615 if not self.opener or fname not in self.files:
616 616 return None, None, None
617 617 fn, mode, copied = self.files[fname]
618 618 return self.opener.read(fn), mode, copied
619 619
620 620 def close(self):
621 621 if self.opener:
622 622 shutil.rmtree(self.opener.base)
623 623
624 624
625 625 class repobackend(abstractbackend):
626 626 def __init__(self, ui, repo, ctx, store):
627 627 super(repobackend, self).__init__(ui)
628 628 self.repo = repo
629 629 self.ctx = ctx
630 630 self.store = store
631 631 self.changed = set()
632 632 self.removed = set()
633 633 self.copied = {}
634 634
635 635 def _checkknown(self, fname):
636 636 if fname not in self.ctx:
637 637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638 638
639 639 def getfile(self, fname):
640 640 try:
641 641 fctx = self.ctx[fname]
642 642 except error.LookupError:
643 643 return None, None
644 644 flags = fctx.flags()
645 645 return fctx.data(), (b'l' in flags, b'x' in flags)
646 646
647 647 def setfile(self, fname, data, mode, copysource):
648 648 if copysource:
649 649 self._checkknown(copysource)
650 650 if data is None:
651 651 data = self.ctx[fname].data()
652 652 self.store.setfile(fname, data, mode, copysource)
653 653 self.changed.add(fname)
654 654 if copysource:
655 655 self.copied[fname] = copysource
656 656
657 657 def unlink(self, fname):
658 658 self._checkknown(fname)
659 659 self.removed.add(fname)
660 660
661 661 def exists(self, fname):
662 662 return fname in self.ctx
663 663
664 664 def close(self):
665 665 return self.changed | self.removed
666 666
667 667
668 668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672 672
673 673
674 674 class patchfile(object):
675 675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 676 self.fname = gp.path
677 677 self.eolmode = eolmode
678 678 self.eol = None
679 679 self.backend = backend
680 680 self.ui = ui
681 681 self.lines = []
682 682 self.exists = False
683 683 self.missing = True
684 684 self.mode = gp.mode
685 685 self.copysource = gp.oldpath
686 686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 687 self.remove = gp.op == b'DELETE'
688 688 if self.copysource is None:
689 689 data, mode = backend.getfile(self.fname)
690 690 else:
691 691 data, mode = store.getfile(self.copysource)[:2]
692 692 if data is not None:
693 693 self.exists = self.copysource is None or backend.exists(self.fname)
694 694 self.missing = False
695 695 if data:
696 696 self.lines = mdiff.splitnewlines(data)
697 697 if self.mode is None:
698 698 self.mode = mode
699 699 if self.lines:
700 700 # Normalize line endings
701 701 if self.lines[0].endswith(b'\r\n'):
702 702 self.eol = b'\r\n'
703 703 elif self.lines[0].endswith(b'\n'):
704 704 self.eol = b'\n'
705 705 if eolmode != b'strict':
706 706 nlines = []
707 707 for l in self.lines:
708 708 if l.endswith(b'\r\n'):
709 709 l = l[:-2] + b'\n'
710 710 nlines.append(l)
711 711 self.lines = nlines
712 712 else:
713 713 if self.create:
714 714 self.missing = False
715 715 if self.mode is None:
716 716 self.mode = (False, False)
717 717 if self.missing:
718 718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 719 self.ui.warn(
720 720 _(
721 721 b"(use '--prefix' to apply patch relative to the "
722 722 b"current directory)\n"
723 723 )
724 724 )
725 725
726 726 self.hash = {}
727 727 self.dirty = 0
728 728 self.offset = 0
729 729 self.skew = 0
730 730 self.rej = []
731 731 self.fileprinted = False
732 732 self.printfile(False)
733 733 self.hunks = 0
734 734
735 735 def writelines(self, fname, lines, mode):
736 736 if self.eolmode == b'auto':
737 737 eol = self.eol
738 738 elif self.eolmode == b'crlf':
739 739 eol = b'\r\n'
740 740 else:
741 741 eol = b'\n'
742 742
743 743 if self.eolmode != b'strict' and eol and eol != b'\n':
744 744 rawlines = []
745 745 for l in lines:
746 746 if l and l.endswith(b'\n'):
747 747 l = l[:-1] + eol
748 748 rawlines.append(l)
749 749 lines = rawlines
750 750
751 751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752 752
753 753 def printfile(self, warn):
754 754 if self.fileprinted:
755 755 return
756 756 if warn or self.ui.verbose:
757 757 self.fileprinted = True
758 758 s = _(b"patching file %s\n") % self.fname
759 759 if warn:
760 760 self.ui.warn(s)
761 761 else:
762 762 self.ui.note(s)
763 763
764 764 def findlines(self, l, linenum):
765 765 # looks through the hash and finds candidate lines. The
766 766 # result is a list of line numbers sorted based on distance
767 767 # from linenum
768 768
769 769 cand = self.hash.get(l, [])
770 770 if len(cand) > 1:
771 771 # resort our list of potentials forward then back.
772 772 cand.sort(key=lambda x: abs(x - linenum))
773 773 return cand
774 774
775 775 def write_rej(self):
776 776 # our rejects are a little different from patch(1). This always
777 777 # creates rejects in the same form as the original patch. A file
778 778 # header is inserted so that you can run the reject through patch again
779 779 # without having to type the filename.
780 780 if not self.rej:
781 781 return
782 782 base = os.path.basename(self.fname)
783 783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 784 for x in self.rej:
785 785 for l in x.hunk:
786 786 lines.append(l)
787 787 if l[-1:] != b'\n':
788 788 lines.append(b"\n\\ No newline at end of file\n")
789 789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790 790
791 791 def apply(self, h):
792 792 if not h.complete():
793 793 raise PatchError(
794 794 _(b"bad hunk #%d %s (%d %d %d %d)")
795 795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 796 )
797 797
798 798 self.hunks += 1
799 799
800 800 if self.missing:
801 801 self.rej.append(h)
802 802 return -1
803 803
804 804 if self.exists and self.create:
805 805 if self.copysource:
806 806 self.ui.warn(
807 807 _(b"cannot create %s: destination already exists\n")
808 808 % self.fname
809 809 )
810 810 else:
811 811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 812 self.rej.append(h)
813 813 return -1
814 814
815 815 if isinstance(h, binhunk):
816 816 if self.remove:
817 817 self.backend.unlink(self.fname)
818 818 else:
819 819 l = h.new(self.lines)
820 820 self.lines[:] = l
821 821 self.offset += len(l)
822 822 self.dirty = True
823 823 return 0
824 824
825 825 horig = h
826 826 if (
827 827 self.eolmode in (b'crlf', b'lf')
828 828 or self.eolmode == b'auto'
829 829 and self.eol
830 830 ):
831 831 # If new eols are going to be normalized, then normalize
832 832 # hunk data before patching. Otherwise, preserve input
833 833 # line-endings.
834 834 h = h.getnormalized()
835 835
836 836 # fast case first, no offsets, no fuzz
837 837 old, oldstart, new, newstart = h.fuzzit(0, False)
838 838 oldstart += self.offset
839 839 orig_start = oldstart
840 840 # if there's skew we want to emit the "(offset %d lines)" even
841 841 # when the hunk cleanly applies at start + skew, so skip the
842 842 # fast case code
843 843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 844 if self.remove:
845 845 self.backend.unlink(self.fname)
846 846 else:
847 847 self.lines[oldstart : oldstart + len(old)] = new
848 848 self.offset += len(new) - len(old)
849 849 self.dirty = True
850 850 return 0
851 851
852 852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 853 self.hash = {}
854 854 for x, s in enumerate(self.lines):
855 855 self.hash.setdefault(s, []).append(x)
856 856
857 857 for fuzzlen in pycompat.xrange(
858 858 self.ui.configint(b"patch", b"fuzz") + 1
859 859 ):
860 860 for toponly in [True, False]:
861 861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 862 oldstart = oldstart + self.offset + self.skew
863 863 oldstart = min(oldstart, len(self.lines))
864 864 if old:
865 865 cand = self.findlines(old[0][1:], oldstart)
866 866 else:
867 867 # Only adding lines with no or fuzzed context, just
868 868 # take the skew in account
869 869 cand = [oldstart]
870 870
871 871 for l in cand:
872 872 if not old or diffhelper.testhunk(old, self.lines, l):
873 873 self.lines[l : l + len(old)] = new
874 874 self.offset += len(new) - len(old)
875 875 self.skew = l - orig_start
876 876 self.dirty = True
877 877 offset = l - orig_start - fuzzlen
878 878 if fuzzlen:
879 879 msg = _(
880 880 b"Hunk #%d succeeded at %d "
881 881 b"with fuzz %d "
882 882 b"(offset %d lines).\n"
883 883 )
884 884 self.printfile(True)
885 885 self.ui.warn(
886 886 msg % (h.number, l + 1, fuzzlen, offset)
887 887 )
888 888 else:
889 889 msg = _(
890 890 b"Hunk #%d succeeded at %d "
891 891 b"(offset %d lines).\n"
892 892 )
893 893 self.ui.note(msg % (h.number, l + 1, offset))
894 894 return fuzzlen
895 895 self.printfile(True)
896 896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 897 self.rej.append(horig)
898 898 return -1
899 899
900 900 def close(self):
901 901 if self.dirty:
902 902 self.writelines(self.fname, self.lines, self.mode)
903 903 self.write_rej()
904 904 return len(self.rej)
905 905
906 906
907 907 class header(object):
908 908 """patch header
909 909 """
910 910
911 911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
912 912 diff_re = re.compile(b'diff -r .* (.*)$')
913 913 allhunks_re = re.compile(b'(?:index|deleted file) ')
914 914 pretty_re = re.compile(b'(?:new file|deleted file) ')
915 915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
916 916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
917 917
918 918 def __init__(self, header):
919 919 self.header = header
920 920 self.hunks = []
921 921
922 922 def binary(self):
923 923 return any(h.startswith(b'index ') for h in self.header)
924 924
925 925 def pretty(self, fp):
926 926 for h in self.header:
927 927 if h.startswith(b'index '):
928 928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
929 929 break
930 930 if self.pretty_re.match(h):
931 931 fp.write(h)
932 932 if self.binary():
933 933 fp.write(_(b'this is a binary file\n'))
934 934 break
935 935 if h.startswith(b'---'):
936 936 fp.write(
937 937 _(b'%d hunks, %d lines changed\n')
938 938 % (
939 939 len(self.hunks),
940 940 sum([max(h.added, h.removed) for h in self.hunks]),
941 941 )
942 942 )
943 943 break
944 944 fp.write(h)
945 945
946 946 def write(self, fp):
947 947 fp.write(b''.join(self.header))
948 948
949 949 def allhunks(self):
950 950 return any(self.allhunks_re.match(h) for h in self.header)
951 951
952 952 def files(self):
953 953 match = self.diffgit_re.match(self.header[0])
954 954 if match:
955 955 fromfile, tofile = match.groups()
956 956 if fromfile == tofile:
957 957 return [fromfile]
958 958 return [fromfile, tofile]
959 959 else:
960 960 return self.diff_re.match(self.header[0]).groups()
961 961
962 962 def filename(self):
963 963 return self.files()[-1]
964 964
965 965 def __repr__(self):
966 966 return b'<header %s>' % (b' '.join(map(repr, self.files())))
967 967
968 968 def isnewfile(self):
969 969 return any(self.newfile_re.match(h) for h in self.header)
970 970
971 971 def special(self):
972 972 # Special files are shown only at the header level and not at the hunk
973 973 # level for example a file that has been deleted is a special file.
974 974 # The user cannot change the content of the operation, in the case of
975 975 # the deleted file he has to take the deletion or not take it, he
976 976 # cannot take some of it.
977 977 # Newly added files are special if they are empty, they are not special
978 978 # if they have some content as we want to be able to change it
979 979 nocontent = len(self.header) == 2
980 980 emptynewfile = self.isnewfile() and nocontent
981 981 return emptynewfile or any(
982 982 self.special_re.match(h) for h in self.header
983 983 )
984 984
985 985
986 986 class recordhunk(object):
987 987 """patch hunk
988 988
989 989 XXX shouldn't we merge this with the other hunk class?
990 990 """
991 991
992 992 def __init__(
993 993 self,
994 994 header,
995 995 fromline,
996 996 toline,
997 997 proc,
998 998 before,
999 999 hunk,
1000 1000 after,
1001 1001 maxcontext=None,
1002 1002 ):
1003 1003 def trimcontext(lines, reverse=False):
1004 1004 if maxcontext is not None:
1005 1005 delta = len(lines) - maxcontext
1006 1006 if delta > 0:
1007 1007 if reverse:
1008 1008 return delta, lines[delta:]
1009 1009 else:
1010 1010 return delta, lines[:maxcontext]
1011 1011 return 0, lines
1012 1012
1013 1013 self.header = header
1014 1014 trimedbefore, self.before = trimcontext(before, True)
1015 1015 self.fromline = fromline + trimedbefore
1016 1016 self.toline = toline + trimedbefore
1017 1017 _trimedafter, self.after = trimcontext(after, False)
1018 1018 self.proc = proc
1019 1019 self.hunk = hunk
1020 1020 self.added, self.removed = self.countchanges(self.hunk)
1021 1021
1022 1022 def __eq__(self, v):
1023 1023 if not isinstance(v, recordhunk):
1024 1024 return False
1025 1025
1026 1026 return (
1027 1027 (v.hunk == self.hunk)
1028 1028 and (v.proc == self.proc)
1029 1029 and (self.fromline == v.fromline)
1030 1030 and (self.header.files() == v.header.files())
1031 1031 )
1032 1032
1033 1033 def __hash__(self):
1034 1034 return hash(
1035 1035 (
1036 1036 tuple(self.hunk),
1037 1037 tuple(self.header.files()),
1038 1038 self.fromline,
1039 1039 self.proc,
1040 1040 )
1041 1041 )
1042 1042
1043 1043 def countchanges(self, hunk):
1044 1044 """hunk -> (n+,n-)"""
1045 1045 add = len([h for h in hunk if h.startswith(b'+')])
1046 1046 rem = len([h for h in hunk if h.startswith(b'-')])
1047 1047 return add, rem
1048 1048
1049 1049 def reversehunk(self):
1050 1050 """return another recordhunk which is the reverse of the hunk
1051 1051
1052 1052 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1053 1053 that, swap fromline/toline and +/- signs while keep other things
1054 1054 unchanged.
1055 1055 """
1056 1056 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1057 1057 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1058 1058 return recordhunk(
1059 1059 self.header,
1060 1060 self.toline,
1061 1061 self.fromline,
1062 1062 self.proc,
1063 1063 self.before,
1064 1064 hunk,
1065 1065 self.after,
1066 1066 )
1067 1067
1068 1068 def write(self, fp):
1069 1069 delta = len(self.before) + len(self.after)
1070 1070 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1071 1071 delta -= 1
1072 1072 fromlen = delta + self.removed
1073 1073 tolen = delta + self.added
1074 1074 fp.write(
1075 1075 b'@@ -%d,%d +%d,%d @@%s\n'
1076 1076 % (
1077 1077 self.fromline,
1078 1078 fromlen,
1079 1079 self.toline,
1080 1080 tolen,
1081 1081 self.proc and (b' ' + self.proc),
1082 1082 )
1083 1083 )
1084 1084 fp.write(b''.join(self.before + self.hunk + self.after))
1085 1085
1086 1086 pretty = write
1087 1087
1088 1088 def filename(self):
1089 1089 return self.header.filename()
1090 1090
1091 1091 def __repr__(self):
1092 1092 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1093 1093
1094 1094
1095 1095 def getmessages():
1096 1096 return {
1097 1097 b'multiple': {
1098 1098 b'apply': _(b"apply change %d/%d to '%s'?"),
1099 1099 b'discard': _(b"discard change %d/%d to '%s'?"),
1100 1100 b'keep': _(b"keep change %d/%d to '%s'?"),
1101 1101 b'record': _(b"record change %d/%d to '%s'?"),
1102 1102 },
1103 1103 b'single': {
1104 1104 b'apply': _(b"apply this change to '%s'?"),
1105 1105 b'discard': _(b"discard this change to '%s'?"),
1106 1106 b'keep': _(b"keep this change to '%s'?"),
1107 1107 b'record': _(b"record this change to '%s'?"),
1108 1108 },
1109 1109 b'help': {
1110 1110 b'apply': _(
1111 1111 b'[Ynesfdaq?]'
1112 1112 b'$$ &Yes, apply this change'
1113 1113 b'$$ &No, skip this change'
1114 1114 b'$$ &Edit this change manually'
1115 1115 b'$$ &Skip remaining changes to this file'
1116 1116 b'$$ Apply remaining changes to this &file'
1117 1117 b'$$ &Done, skip remaining changes and files'
1118 1118 b'$$ Apply &all changes to all remaining files'
1119 1119 b'$$ &Quit, applying no changes'
1120 1120 b'$$ &? (display help)'
1121 1121 ),
1122 1122 b'discard': _(
1123 1123 b'[Ynesfdaq?]'
1124 1124 b'$$ &Yes, discard this change'
1125 1125 b'$$ &No, skip this change'
1126 1126 b'$$ &Edit this change manually'
1127 1127 b'$$ &Skip remaining changes to this file'
1128 1128 b'$$ Discard remaining changes to this &file'
1129 1129 b'$$ &Done, skip remaining changes and files'
1130 1130 b'$$ Discard &all changes to all remaining files'
1131 1131 b'$$ &Quit, discarding no changes'
1132 1132 b'$$ &? (display help)'
1133 1133 ),
1134 1134 b'keep': _(
1135 1135 b'[Ynesfdaq?]'
1136 1136 b'$$ &Yes, keep this change'
1137 1137 b'$$ &No, skip this change'
1138 1138 b'$$ &Edit this change manually'
1139 1139 b'$$ &Skip remaining changes to this file'
1140 1140 b'$$ Keep remaining changes to this &file'
1141 1141 b'$$ &Done, skip remaining changes and files'
1142 1142 b'$$ Keep &all changes to all remaining files'
1143 1143 b'$$ &Quit, keeping all changes'
1144 1144 b'$$ &? (display help)'
1145 1145 ),
1146 1146 b'record': _(
1147 1147 b'[Ynesfdaq?]'
1148 1148 b'$$ &Yes, record this change'
1149 1149 b'$$ &No, skip this change'
1150 1150 b'$$ &Edit this change manually'
1151 1151 b'$$ &Skip remaining changes to this file'
1152 1152 b'$$ Record remaining changes to this &file'
1153 1153 b'$$ &Done, skip remaining changes and files'
1154 1154 b'$$ Record &all changes to all remaining files'
1155 1155 b'$$ &Quit, recording no changes'
1156 1156 b'$$ &? (display help)'
1157 1157 ),
1158 1158 },
1159 1159 }
1160 1160
1161 1161
1162 1162 def filterpatch(ui, headers, match, operation=None):
1163 1163 """Interactively filter patch chunks into applied-only chunks"""
1164 1164 messages = getmessages()
1165 1165
1166 1166 if operation is None:
1167 1167 operation = b'record'
1168 1168
1169 1169 def prompt(skipfile, skipall, query, chunk):
1170 1170 """prompt query, and process base inputs
1171 1171
1172 1172 - y/n for the rest of file
1173 1173 - y/n for the rest
1174 1174 - ? (help)
1175 1175 - q (quit)
1176 1176
1177 1177 Return True/False and possibly updated skipfile and skipall.
1178 1178 """
1179 1179 newpatches = None
1180 1180 if skipall is not None:
1181 1181 return skipall, skipfile, skipall, newpatches
1182 1182 if skipfile is not None:
1183 1183 return skipfile, skipfile, skipall, newpatches
1184 1184 while True:
1185 1185 resps = messages[b'help'][operation]
1186 1186 # IMPORTANT: keep the last line of this prompt short (<40 english
1187 1187 # chars is a good target) because of issue6158.
1188 1188 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1189 1189 ui.write(b"\n")
1190 1190 if r == 8: # ?
1191 1191 for c, t in ui.extractchoices(resps)[1]:
1192 1192 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1193 1193 continue
1194 1194 elif r == 0: # yes
1195 1195 ret = True
1196 1196 elif r == 1: # no
1197 1197 ret = False
1198 1198 elif r == 2: # Edit patch
1199 1199 if chunk is None:
1200 1200 ui.write(_(b'cannot edit patch for whole file'))
1201 1201 ui.write(b"\n")
1202 1202 continue
1203 1203 if chunk.header.binary():
1204 1204 ui.write(_(b'cannot edit patch for binary file'))
1205 1205 ui.write(b"\n")
1206 1206 continue
1207 1207 # Patch comment based on the Git one (based on comment at end of
1208 1208 # https://mercurial-scm.org/wiki/RecordExtension)
1209 1209 phelp = b'---' + _(
1210 1210 """
1211 1211 To remove '-' lines, make them ' ' lines (context).
1212 1212 To remove '+' lines, delete them.
1213 1213 Lines starting with # will be removed from the patch.
1214 1214
1215 1215 If the patch applies cleanly, the edited hunk will immediately be
1216 1216 added to the record list. If it does not apply cleanly, a rejects
1217 1217 file will be generated: you can use that when you try again. If
1218 1218 all lines of the hunk are removed, then the edit is aborted and
1219 1219 the hunk is left unchanged.
1220 1220 """
1221 1221 )
1222 1222 (patchfd, patchfn) = pycompat.mkstemp(
1223 1223 prefix=b"hg-editor-", suffix=b".diff"
1224 1224 )
1225 1225 ncpatchfp = None
1226 1226 try:
1227 1227 # Write the initial patch
1228 1228 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1229 1229 chunk.header.write(f)
1230 1230 chunk.write(f)
1231 1231 f.write(
1232 1232 b''.join(
1233 1233 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1234 1234 )
1235 1235 )
1236 1236 f.close()
1237 1237 # Start the editor and wait for it to complete
1238 1238 editor = ui.geteditor()
1239 1239 ret = ui.system(
1240 1240 b"%s \"%s\"" % (editor, patchfn),
1241 1241 environ={b'HGUSER': ui.username()},
1242 1242 blockedtag=b'filterpatch',
1243 1243 )
1244 1244 if ret != 0:
1245 1245 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1246 1246 continue
1247 1247 # Remove comment lines
1248 1248 patchfp = open(patchfn, r'rb')
1249 1249 ncpatchfp = stringio()
1250 1250 for line in util.iterfile(patchfp):
1251 1251 line = util.fromnativeeol(line)
1252 1252 if not line.startswith(b'#'):
1253 1253 ncpatchfp.write(line)
1254 1254 patchfp.close()
1255 1255 ncpatchfp.seek(0)
1256 1256 newpatches = parsepatch(ncpatchfp)
1257 1257 finally:
1258 1258 os.unlink(patchfn)
1259 1259 del ncpatchfp
1260 1260 # Signal that the chunk shouldn't be applied as-is, but
1261 1261 # provide the new patch to be used instead.
1262 1262 ret = False
1263 1263 elif r == 3: # Skip
1264 1264 ret = skipfile = False
1265 1265 elif r == 4: # file (Record remaining)
1266 1266 ret = skipfile = True
1267 1267 elif r == 5: # done, skip remaining
1268 1268 ret = skipall = False
1269 1269 elif r == 6: # all
1270 1270 ret = skipall = True
1271 1271 elif r == 7: # quit
1272 1272 raise error.Abort(_(b'user quit'))
1273 1273 return ret, skipfile, skipall, newpatches
1274 1274
1275 1275 seen = set()
1276 1276 applied = {} # 'filename' -> [] of chunks
1277 1277 skipfile, skipall = None, None
1278 1278 pos, total = 1, sum(len(h.hunks) for h in headers)
1279 1279 for h in headers:
1280 1280 pos += len(h.hunks)
1281 1281 skipfile = None
1282 1282 fixoffset = 0
1283 1283 hdr = b''.join(h.header)
1284 1284 if hdr in seen:
1285 1285 continue
1286 1286 seen.add(hdr)
1287 1287 if skipall is None:
1288 1288 h.pretty(ui)
1289 1289 files = h.files()
1290 1290 msg = _(b'examine changes to %s?') % _(b' and ').join(
1291 1291 b"'%s'" % f for f in files
1292 1292 )
1293 1293 if all(match.exact(f) for f in files):
1294 1294 r, skipall, np = True, None, None
1295 1295 else:
1296 1296 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1297 1297 if not r:
1298 1298 continue
1299 1299 applied[h.filename()] = [h]
1300 1300 if h.allhunks():
1301 1301 applied[h.filename()] += h.hunks
1302 1302 continue
1303 1303 for i, chunk in enumerate(h.hunks):
1304 1304 if skipfile is None and skipall is None:
1305 1305 chunk.pretty(ui)
1306 1306 if total == 1:
1307 1307 msg = messages[b'single'][operation] % chunk.filename()
1308 1308 else:
1309 1309 idx = pos - len(h.hunks) + i
1310 1310 msg = messages[b'multiple'][operation] % (
1311 1311 idx,
1312 1312 total,
1313 1313 chunk.filename(),
1314 1314 )
1315 1315 r, skipfile, skipall, newpatches = prompt(
1316 1316 skipfile, skipall, msg, chunk
1317 1317 )
1318 1318 if r:
1319 1319 if fixoffset:
1320 1320 chunk = copy.copy(chunk)
1321 1321 chunk.toline += fixoffset
1322 1322 applied[chunk.filename()].append(chunk)
1323 1323 elif newpatches is not None:
1324 1324 for newpatch in newpatches:
1325 1325 for newhunk in newpatch.hunks:
1326 1326 if fixoffset:
1327 1327 newhunk.toline += fixoffset
1328 1328 applied[newhunk.filename()].append(newhunk)
1329 1329 else:
1330 1330 fixoffset += chunk.removed - chunk.added
1331 1331 return (
1332 1332 sum(
1333 1333 [
1334 1334 h
1335 1335 for h in pycompat.itervalues(applied)
1336 1336 if h[0].special() or len(h) > 1
1337 1337 ],
1338 1338 [],
1339 1339 ),
1340 1340 {},
1341 1341 )
1342 1342
1343 1343
1344 1344 class hunk(object):
1345 1345 def __init__(self, desc, num, lr, context):
1346 1346 self.number = num
1347 1347 self.desc = desc
1348 1348 self.hunk = [desc]
1349 1349 self.a = []
1350 1350 self.b = []
1351 1351 self.starta = self.lena = None
1352 1352 self.startb = self.lenb = None
1353 1353 if lr is not None:
1354 1354 if context:
1355 1355 self.read_context_hunk(lr)
1356 1356 else:
1357 1357 self.read_unified_hunk(lr)
1358 1358
1359 1359 def getnormalized(self):
1360 1360 """Return a copy with line endings normalized to LF."""
1361 1361
1362 1362 def normalize(lines):
1363 1363 nlines = []
1364 1364 for line in lines:
1365 1365 if line.endswith(b'\r\n'):
1366 1366 line = line[:-2] + b'\n'
1367 1367 nlines.append(line)
1368 1368 return nlines
1369 1369
1370 1370 # Dummy object, it is rebuilt manually
1371 1371 nh = hunk(self.desc, self.number, None, None)
1372 1372 nh.number = self.number
1373 1373 nh.desc = self.desc
1374 1374 nh.hunk = self.hunk
1375 1375 nh.a = normalize(self.a)
1376 1376 nh.b = normalize(self.b)
1377 1377 nh.starta = self.starta
1378 1378 nh.startb = self.startb
1379 1379 nh.lena = self.lena
1380 1380 nh.lenb = self.lenb
1381 1381 return nh
1382 1382
1383 1383 def read_unified_hunk(self, lr):
1384 1384 m = unidesc.match(self.desc)
1385 1385 if not m:
1386 1386 raise PatchError(_(b"bad hunk #%d") % self.number)
1387 1387 self.starta, self.lena, self.startb, self.lenb = m.groups()
1388 1388 if self.lena is None:
1389 1389 self.lena = 1
1390 1390 else:
1391 1391 self.lena = int(self.lena)
1392 1392 if self.lenb is None:
1393 1393 self.lenb = 1
1394 1394 else:
1395 1395 self.lenb = int(self.lenb)
1396 1396 self.starta = int(self.starta)
1397 1397 self.startb = int(self.startb)
1398 1398 try:
1399 1399 diffhelper.addlines(
1400 1400 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1401 1401 )
1402 1402 except error.ParseError as e:
1403 1403 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1404 1404 # if we hit eof before finishing out the hunk, the last line will
1405 1405 # be zero length. Lets try to fix it up.
1406 1406 while len(self.hunk[-1]) == 0:
1407 1407 del self.hunk[-1]
1408 1408 del self.a[-1]
1409 1409 del self.b[-1]
1410 1410 self.lena -= 1
1411 1411 self.lenb -= 1
1412 1412 self._fixnewline(lr)
1413 1413
1414 1414 def read_context_hunk(self, lr):
1415 1415 self.desc = lr.readline()
1416 1416 m = contextdesc.match(self.desc)
1417 1417 if not m:
1418 1418 raise PatchError(_(b"bad hunk #%d") % self.number)
1419 1419 self.starta, aend = m.groups()
1420 1420 self.starta = int(self.starta)
1421 1421 if aend is None:
1422 1422 aend = self.starta
1423 1423 self.lena = int(aend) - self.starta
1424 1424 if self.starta:
1425 1425 self.lena += 1
1426 1426 for x in pycompat.xrange(self.lena):
1427 1427 l = lr.readline()
1428 1428 if l.startswith(b'---'):
1429 1429 # lines addition, old block is empty
1430 1430 lr.push(l)
1431 1431 break
1432 1432 s = l[2:]
1433 1433 if l.startswith(b'- ') or l.startswith(b'! '):
1434 1434 u = b'-' + s
1435 1435 elif l.startswith(b' '):
1436 1436 u = b' ' + s
1437 1437 else:
1438 1438 raise PatchError(
1439 1439 _(b"bad hunk #%d old text line %d") % (self.number, x)
1440 1440 )
1441 1441 self.a.append(u)
1442 1442 self.hunk.append(u)
1443 1443
1444 1444 l = lr.readline()
1445 1445 if l.startswith(br'\ '):
1446 1446 s = self.a[-1][:-1]
1447 1447 self.a[-1] = s
1448 1448 self.hunk[-1] = s
1449 1449 l = lr.readline()
1450 1450 m = contextdesc.match(l)
1451 1451 if not m:
1452 1452 raise PatchError(_(b"bad hunk #%d") % self.number)
1453 1453 self.startb, bend = m.groups()
1454 1454 self.startb = int(self.startb)
1455 1455 if bend is None:
1456 1456 bend = self.startb
1457 1457 self.lenb = int(bend) - self.startb
1458 1458 if self.startb:
1459 1459 self.lenb += 1
1460 1460 hunki = 1
1461 1461 for x in pycompat.xrange(self.lenb):
1462 1462 l = lr.readline()
1463 1463 if l.startswith(br'\ '):
1464 1464 # XXX: the only way to hit this is with an invalid line range.
1465 1465 # The no-eol marker is not counted in the line range, but I
1466 1466 # guess there are diff(1) out there which behave differently.
1467 1467 s = self.b[-1][:-1]
1468 1468 self.b[-1] = s
1469 1469 self.hunk[hunki - 1] = s
1470 1470 continue
1471 1471 if not l:
1472 1472 # line deletions, new block is empty and we hit EOF
1473 1473 lr.push(l)
1474 1474 break
1475 1475 s = l[2:]
1476 1476 if l.startswith(b'+ ') or l.startswith(b'! '):
1477 1477 u = b'+' + s
1478 1478 elif l.startswith(b' '):
1479 1479 u = b' ' + s
1480 1480 elif len(self.b) == 0:
1481 1481 # line deletions, new block is empty
1482 1482 lr.push(l)
1483 1483 break
1484 1484 else:
1485 1485 raise PatchError(
1486 1486 _(b"bad hunk #%d old text line %d") % (self.number, x)
1487 1487 )
1488 1488 self.b.append(s)
1489 1489 while True:
1490 1490 if hunki >= len(self.hunk):
1491 1491 h = b""
1492 1492 else:
1493 1493 h = self.hunk[hunki]
1494 1494 hunki += 1
1495 1495 if h == u:
1496 1496 break
1497 1497 elif h.startswith(b'-'):
1498 1498 continue
1499 1499 else:
1500 1500 self.hunk.insert(hunki - 1, u)
1501 1501 break
1502 1502
1503 1503 if not self.a:
1504 1504 # this happens when lines were only added to the hunk
1505 1505 for x in self.hunk:
1506 1506 if x.startswith(b'-') or x.startswith(b' '):
1507 1507 self.a.append(x)
1508 1508 if not self.b:
1509 1509 # this happens when lines were only deleted from the hunk
1510 1510 for x in self.hunk:
1511 1511 if x.startswith(b'+') or x.startswith(b' '):
1512 1512 self.b.append(x[1:])
1513 1513 # @@ -start,len +start,len @@
1514 1514 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1515 1515 self.starta,
1516 1516 self.lena,
1517 1517 self.startb,
1518 1518 self.lenb,
1519 1519 )
1520 1520 self.hunk[0] = self.desc
1521 1521 self._fixnewline(lr)
1522 1522
1523 1523 def _fixnewline(self, lr):
1524 1524 l = lr.readline()
1525 1525 if l.startswith(br'\ '):
1526 1526 diffhelper.fixnewline(self.hunk, self.a, self.b)
1527 1527 else:
1528 1528 lr.push(l)
1529 1529
1530 1530 def complete(self):
1531 1531 return len(self.a) == self.lena and len(self.b) == self.lenb
1532 1532
1533 1533 def _fuzzit(self, old, new, fuzz, toponly):
1534 1534 # this removes context lines from the top and bottom of list 'l'. It
1535 1535 # checks the hunk to make sure only context lines are removed, and then
1536 1536 # returns a new shortened list of lines.
1537 1537 fuzz = min(fuzz, len(old))
1538 1538 if fuzz:
1539 1539 top = 0
1540 1540 bot = 0
1541 1541 hlen = len(self.hunk)
1542 1542 for x in pycompat.xrange(hlen - 1):
1543 1543 # the hunk starts with the @@ line, so use x+1
1544 1544 if self.hunk[x + 1].startswith(b' '):
1545 1545 top += 1
1546 1546 else:
1547 1547 break
1548 1548 if not toponly:
1549 1549 for x in pycompat.xrange(hlen - 1):
1550 1550 if self.hunk[hlen - bot - 1].startswith(b' '):
1551 1551 bot += 1
1552 1552 else:
1553 1553 break
1554 1554
1555 1555 bot = min(fuzz, bot)
1556 1556 top = min(fuzz, top)
1557 1557 return old[top : len(old) - bot], new[top : len(new) - bot], top
1558 1558 return old, new, 0
1559 1559
1560 1560 def fuzzit(self, fuzz, toponly):
1561 1561 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1562 1562 oldstart = self.starta + top
1563 1563 newstart = self.startb + top
1564 1564 # zero length hunk ranges already have their start decremented
1565 1565 if self.lena and oldstart > 0:
1566 1566 oldstart -= 1
1567 1567 if self.lenb and newstart > 0:
1568 1568 newstart -= 1
1569 1569 return old, oldstart, new, newstart
1570 1570
1571 1571
1572 1572 class binhunk(object):
1573 1573 b'A binary patch file.'
1574 1574
1575 1575 def __init__(self, lr, fname):
1576 1576 self.text = None
1577 1577 self.delta = False
1578 1578 self.hunk = [b'GIT binary patch\n']
1579 1579 self._fname = fname
1580 1580 self._read(lr)
1581 1581
1582 1582 def complete(self):
1583 1583 return self.text is not None
1584 1584
1585 1585 def new(self, lines):
1586 1586 if self.delta:
1587 1587 return [applybindelta(self.text, b''.join(lines))]
1588 1588 return [self.text]
1589 1589
1590 1590 def _read(self, lr):
1591 1591 def getline(lr, hunk):
1592 1592 l = lr.readline()
1593 1593 hunk.append(l)
1594 1594 return l.rstrip(b'\r\n')
1595 1595
1596 1596 while True:
1597 1597 line = getline(lr, self.hunk)
1598 1598 if not line:
1599 1599 raise PatchError(
1600 1600 _(b'could not extract "%s" binary data') % self._fname
1601 1601 )
1602 1602 if line.startswith(b'literal '):
1603 1603 size = int(line[8:].rstrip())
1604 1604 break
1605 1605 if line.startswith(b'delta '):
1606 1606 size = int(line[6:].rstrip())
1607 1607 self.delta = True
1608 1608 break
1609 1609 dec = []
1610 1610 line = getline(lr, self.hunk)
1611 1611 while len(line) > 1:
1612 1612 l = line[0:1]
1613 1613 if l <= b'Z' and l >= b'A':
1614 1614 l = ord(l) - ord(b'A') + 1
1615 1615 else:
1616 1616 l = ord(l) - ord(b'a') + 27
1617 1617 try:
1618 1618 dec.append(util.b85decode(line[1:])[:l])
1619 1619 except ValueError as e:
1620 1620 raise PatchError(
1621 1621 _(b'could not decode "%s" binary patch: %s')
1622 1622 % (self._fname, stringutil.forcebytestr(e))
1623 1623 )
1624 1624 line = getline(lr, self.hunk)
1625 1625 text = zlib.decompress(b''.join(dec))
1626 1626 if len(text) != size:
1627 1627 raise PatchError(
1628 1628 _(b'"%s" length is %d bytes, should be %d')
1629 1629 % (self._fname, len(text), size)
1630 1630 )
1631 1631 self.text = text
1632 1632
1633 1633
1634 1634 def parsefilename(str):
1635 1635 # --- filename \t|space stuff
1636 1636 s = str[4:].rstrip(b'\r\n')
1637 1637 i = s.find(b'\t')
1638 1638 if i < 0:
1639 1639 i = s.find(b' ')
1640 1640 if i < 0:
1641 1641 return s
1642 1642 return s[:i]
1643 1643
1644 1644
1645 1645 def reversehunks(hunks):
1646 1646 '''reverse the signs in the hunks given as argument
1647 1647
1648 1648 This function operates on hunks coming out of patch.filterpatch, that is
1649 1649 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1650 1650
1651 1651 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1652 1652 ... --- a/folder1/g
1653 1653 ... +++ b/folder1/g
1654 1654 ... @@ -1,7 +1,7 @@
1655 1655 ... +firstline
1656 1656 ... c
1657 1657 ... 1
1658 1658 ... 2
1659 1659 ... + 3
1660 1660 ... -4
1661 1661 ... 5
1662 1662 ... d
1663 1663 ... +lastline"""
1664 1664 >>> hunks = parsepatch([rawpatch])
1665 1665 >>> hunkscomingfromfilterpatch = []
1666 1666 >>> for h in hunks:
1667 1667 ... hunkscomingfromfilterpatch.append(h)
1668 1668 ... hunkscomingfromfilterpatch.extend(h.hunks)
1669 1669
1670 1670 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1671 1671 >>> from . import util
1672 1672 >>> fp = util.stringio()
1673 1673 >>> for c in reversedhunks:
1674 1674 ... c.write(fp)
1675 1675 >>> fp.seek(0) or None
1676 1676 >>> reversedpatch = fp.read()
1677 1677 >>> print(pycompat.sysstr(reversedpatch))
1678 1678 diff --git a/folder1/g b/folder1/g
1679 1679 --- a/folder1/g
1680 1680 +++ b/folder1/g
1681 1681 @@ -1,4 +1,3 @@
1682 1682 -firstline
1683 1683 c
1684 1684 1
1685 1685 2
1686 1686 @@ -2,6 +1,6 @@
1687 1687 c
1688 1688 1
1689 1689 2
1690 1690 - 3
1691 1691 +4
1692 1692 5
1693 1693 d
1694 1694 @@ -6,3 +5,2 @@
1695 1695 5
1696 1696 d
1697 1697 -lastline
1698 1698
1699 1699 '''
1700 1700
1701 1701 newhunks = []
1702 1702 for c in hunks:
1703 1703 if util.safehasattr(c, b'reversehunk'):
1704 1704 c = c.reversehunk()
1705 1705 newhunks.append(c)
1706 1706 return newhunks
1707 1707
1708 1708
1709 1709 def parsepatch(originalchunks, maxcontext=None):
1710 1710 """patch -> [] of headers -> [] of hunks
1711 1711
1712 1712 If maxcontext is not None, trim context lines if necessary.
1713 1713
1714 1714 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1715 1715 ... --- a/folder1/g
1716 1716 ... +++ b/folder1/g
1717 1717 ... @@ -1,8 +1,10 @@
1718 1718 ... 1
1719 1719 ... 2
1720 1720 ... -3
1721 1721 ... 4
1722 1722 ... 5
1723 1723 ... 6
1724 1724 ... +6.1
1725 1725 ... +6.2
1726 1726 ... 7
1727 1727 ... 8
1728 1728 ... +9'''
1729 1729 >>> out = util.stringio()
1730 1730 >>> headers = parsepatch([rawpatch], maxcontext=1)
1731 1731 >>> for header in headers:
1732 1732 ... header.write(out)
1733 1733 ... for hunk in header.hunks:
1734 1734 ... hunk.write(out)
1735 1735 >>> print(pycompat.sysstr(out.getvalue()))
1736 1736 diff --git a/folder1/g b/folder1/g
1737 1737 --- a/folder1/g
1738 1738 +++ b/folder1/g
1739 1739 @@ -2,3 +2,2 @@
1740 1740 2
1741 1741 -3
1742 1742 4
1743 1743 @@ -6,2 +5,4 @@
1744 1744 6
1745 1745 +6.1
1746 1746 +6.2
1747 1747 7
1748 1748 @@ -8,1 +9,2 @@
1749 1749 8
1750 1750 +9
1751 1751 """
1752 1752
1753 1753 class parser(object):
1754 1754 """patch parsing state machine"""
1755 1755
1756 1756 def __init__(self):
1757 1757 self.fromline = 0
1758 1758 self.toline = 0
1759 1759 self.proc = b''
1760 1760 self.header = None
1761 1761 self.context = []
1762 1762 self.before = []
1763 1763 self.hunk = []
1764 1764 self.headers = []
1765 1765
1766 1766 def addrange(self, limits):
1767 1767 self.addcontext([])
1768 1768 fromstart, fromend, tostart, toend, proc = limits
1769 1769 self.fromline = int(fromstart)
1770 1770 self.toline = int(tostart)
1771 1771 self.proc = proc
1772 1772
1773 1773 def addcontext(self, context):
1774 1774 if self.hunk:
1775 1775 h = recordhunk(
1776 1776 self.header,
1777 1777 self.fromline,
1778 1778 self.toline,
1779 1779 self.proc,
1780 1780 self.before,
1781 1781 self.hunk,
1782 1782 context,
1783 1783 maxcontext,
1784 1784 )
1785 1785 self.header.hunks.append(h)
1786 1786 self.fromline += len(self.before) + h.removed
1787 1787 self.toline += len(self.before) + h.added
1788 1788 self.before = []
1789 1789 self.hunk = []
1790 1790 self.context = context
1791 1791
1792 1792 def addhunk(self, hunk):
1793 1793 if self.context:
1794 1794 self.before = self.context
1795 1795 self.context = []
1796 1796 if self.hunk:
1797 1797 self.addcontext([])
1798 1798 self.hunk = hunk
1799 1799
1800 1800 def newfile(self, hdr):
1801 1801 self.addcontext([])
1802 1802 h = header(hdr)
1803 1803 self.headers.append(h)
1804 1804 self.header = h
1805 1805
1806 1806 def addother(self, line):
1807 1807 pass # 'other' lines are ignored
1808 1808
1809 1809 def finished(self):
1810 1810 self.addcontext([])
1811 1811 return self.headers
1812 1812
1813 1813 transitions = {
1814 1814 b'file': {
1815 1815 b'context': addcontext,
1816 1816 b'file': newfile,
1817 1817 b'hunk': addhunk,
1818 1818 b'range': addrange,
1819 1819 },
1820 1820 b'context': {
1821 1821 b'file': newfile,
1822 1822 b'hunk': addhunk,
1823 1823 b'range': addrange,
1824 1824 b'other': addother,
1825 1825 },
1826 1826 b'hunk': {
1827 1827 b'context': addcontext,
1828 1828 b'file': newfile,
1829 1829 b'range': addrange,
1830 1830 },
1831 1831 b'range': {b'context': addcontext, b'hunk': addhunk},
1832 1832 b'other': {b'other': addother},
1833 1833 }
1834 1834
1835 1835 p = parser()
1836 1836 fp = stringio()
1837 1837 fp.write(b''.join(originalchunks))
1838 1838 fp.seek(0)
1839 1839
1840 1840 state = b'context'
1841 1841 for newstate, data in scanpatch(fp):
1842 1842 try:
1843 1843 p.transitions[state][newstate](p, data)
1844 1844 except KeyError:
1845 1845 raise PatchError(
1846 1846 b'unhandled transition: %s -> %s' % (state, newstate)
1847 1847 )
1848 1848 state = newstate
1849 1849 del fp
1850 1850 return p.finished()
1851 1851
1852 1852
1853 1853 def pathtransform(path, strip, prefix):
1854 1854 '''turn a path from a patch into a path suitable for the repository
1855 1855
1856 1856 prefix, if not empty, is expected to be normalized with a / at the end.
1857 1857
1858 1858 Returns (stripped components, path in repository).
1859 1859
1860 1860 >>> pathtransform(b'a/b/c', 0, b'')
1861 1861 ('', 'a/b/c')
1862 1862 >>> pathtransform(b' a/b/c ', 0, b'')
1863 1863 ('', ' a/b/c')
1864 1864 >>> pathtransform(b' a/b/c ', 2, b'')
1865 1865 ('a/b/', 'c')
1866 1866 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1867 1867 ('', 'd/e/a/b/c')
1868 1868 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1869 1869 ('a//b/', 'd/e/c')
1870 1870 >>> pathtransform(b'a/b/c', 3, b'')
1871 1871 Traceback (most recent call last):
1872 1872 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1873 1873 '''
1874 1874 pathlen = len(path)
1875 1875 i = 0
1876 1876 if strip == 0:
1877 1877 return b'', prefix + path.rstrip()
1878 1878 count = strip
1879 1879 while count > 0:
1880 1880 i = path.find(b'/', i)
1881 1881 if i == -1:
1882 1882 raise PatchError(
1883 1883 _(b"unable to strip away %d of %d dirs from %s")
1884 1884 % (count, strip, path)
1885 1885 )
1886 1886 i += 1
1887 1887 # consume '//' in the path
1888 1888 while i < pathlen - 1 and path[i : i + 1] == b'/':
1889 1889 i += 1
1890 1890 count -= 1
1891 1891 return path[:i].lstrip(), prefix + path[i:].rstrip()
1892 1892
1893 1893
1894 1894 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1895 1895 nulla = afile_orig == b"/dev/null"
1896 1896 nullb = bfile_orig == b"/dev/null"
1897 1897 create = nulla and hunk.starta == 0 and hunk.lena == 0
1898 1898 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1899 1899 abase, afile = pathtransform(afile_orig, strip, prefix)
1900 1900 gooda = not nulla and backend.exists(afile)
1901 1901 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1902 1902 if afile == bfile:
1903 1903 goodb = gooda
1904 1904 else:
1905 1905 goodb = not nullb and backend.exists(bfile)
1906 1906 missing = not goodb and not gooda and not create
1907 1907
1908 1908 # some diff programs apparently produce patches where the afile is
1909 1909 # not /dev/null, but afile starts with bfile
1910 1910 abasedir = afile[: afile.rfind(b'/') + 1]
1911 1911 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1912 1912 if (
1913 1913 missing
1914 1914 and abasedir == bbasedir
1915 1915 and afile.startswith(bfile)
1916 1916 and hunk.starta == 0
1917 1917 and hunk.lena == 0
1918 1918 ):
1919 1919 create = True
1920 1920 missing = False
1921 1921
1922 1922 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1923 1923 # diff is between a file and its backup. In this case, the original
1924 1924 # file should be patched (see original mpatch code).
1925 1925 isbackup = abase == bbase and bfile.startswith(afile)
1926 1926 fname = None
1927 1927 if not missing:
1928 1928 if gooda and goodb:
1929 1929 if isbackup:
1930 1930 fname = afile
1931 1931 else:
1932 1932 fname = bfile
1933 1933 elif gooda:
1934 1934 fname = afile
1935 1935
1936 1936 if not fname:
1937 1937 if not nullb:
1938 1938 if isbackup:
1939 1939 fname = afile
1940 1940 else:
1941 1941 fname = bfile
1942 1942 elif not nulla:
1943 1943 fname = afile
1944 1944 else:
1945 1945 raise PatchError(_(b"undefined source and destination files"))
1946 1946
1947 1947 gp = patchmeta(fname)
1948 1948 if create:
1949 1949 gp.op = b'ADD'
1950 1950 elif remove:
1951 1951 gp.op = b'DELETE'
1952 1952 return gp
1953 1953
1954 1954
1955 1955 def scanpatch(fp):
1956 1956 """like patch.iterhunks, but yield different events
1957 1957
1958 1958 - ('file', [header_lines + fromfile + tofile])
1959 1959 - ('context', [context_lines])
1960 1960 - ('hunk', [hunk_lines])
1961 1961 - ('range', (-start,len, +start,len, proc))
1962 1962 """
1963 1963 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1964 1964 lr = linereader(fp)
1965 1965
1966 1966 def scanwhile(first, p):
1967 1967 """scan lr while predicate holds"""
1968 1968 lines = [first]
1969 1969 for line in iter(lr.readline, b''):
1970 1970 if p(line):
1971 1971 lines.append(line)
1972 1972 else:
1973 1973 lr.push(line)
1974 1974 break
1975 1975 return lines
1976 1976
1977 1977 for line in iter(lr.readline, b''):
1978 1978 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1979 1979
1980 1980 def notheader(line):
1981 1981 s = line.split(None, 1)
1982 1982 return not s or s[0] not in (b'---', b'diff')
1983 1983
1984 1984 header = scanwhile(line, notheader)
1985 1985 fromfile = lr.readline()
1986 1986 if fromfile.startswith(b'---'):
1987 1987 tofile = lr.readline()
1988 1988 header += [fromfile, tofile]
1989 1989 else:
1990 1990 lr.push(fromfile)
1991 1991 yield b'file', header
1992 1992 elif line.startswith(b' '):
1993 1993 cs = (b' ', b'\\')
1994 1994 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1995 1995 elif line.startswith((b'-', b'+')):
1996 1996 cs = (b'-', b'+', b'\\')
1997 1997 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
1998 1998 else:
1999 1999 m = lines_re.match(line)
2000 2000 if m:
2001 2001 yield b'range', m.groups()
2002 2002 else:
2003 2003 yield b'other', line
2004 2004
2005 2005
2006 2006 def scangitpatch(lr, firstline):
2007 2007 """
2008 2008 Git patches can emit:
2009 2009 - rename a to b
2010 2010 - change b
2011 2011 - copy a to c
2012 2012 - change c
2013 2013
2014 2014 We cannot apply this sequence as-is, the renamed 'a' could not be
2015 2015 found for it would have been renamed already. And we cannot copy
2016 2016 from 'b' instead because 'b' would have been changed already. So
2017 2017 we scan the git patch for copy and rename commands so we can
2018 2018 perform the copies ahead of time.
2019 2019 """
2020 2020 pos = 0
2021 2021 try:
2022 2022 pos = lr.fp.tell()
2023 2023 fp = lr.fp
2024 2024 except IOError:
2025 2025 fp = stringio(lr.fp.read())
2026 2026 gitlr = linereader(fp)
2027 2027 gitlr.push(firstline)
2028 2028 gitpatches = readgitpatch(gitlr)
2029 2029 fp.seek(pos)
2030 2030 return gitpatches
2031 2031
2032 2032
2033 2033 def iterhunks(fp):
2034 2034 """Read a patch and yield the following events:
2035 2035 - ("file", afile, bfile, firsthunk): select a new target file.
2036 2036 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2037 2037 "file" event.
2038 2038 - ("git", gitchanges): current diff is in git format, gitchanges
2039 2039 maps filenames to gitpatch records. Unique event.
2040 2040 """
2041 2041 afile = b""
2042 2042 bfile = b""
2043 2043 state = None
2044 2044 hunknum = 0
2045 2045 emitfile = newfile = False
2046 2046 gitpatches = None
2047 2047
2048 2048 # our states
2049 2049 BFILE = 1
2050 2050 context = None
2051 2051 lr = linereader(fp)
2052 2052
2053 2053 for x in iter(lr.readline, b''):
2054 2054 if state == BFILE and (
2055 2055 (not context and x.startswith(b'@'))
2056 2056 or (context is not False and x.startswith(b'***************'))
2057 2057 or x.startswith(b'GIT binary patch')
2058 2058 ):
2059 2059 gp = None
2060 2060 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2061 2061 gp = gitpatches.pop()
2062 2062 if x.startswith(b'GIT binary patch'):
2063 2063 h = binhunk(lr, gp.path)
2064 2064 else:
2065 2065 if context is None and x.startswith(b'***************'):
2066 2066 context = True
2067 2067 h = hunk(x, hunknum + 1, lr, context)
2068 2068 hunknum += 1
2069 2069 if emitfile:
2070 2070 emitfile = False
2071 2071 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2072 2072 yield b'hunk', h
2073 2073 elif x.startswith(b'diff --git a/'):
2074 2074 m = gitre.match(x.rstrip(b' \r\n'))
2075 2075 if not m:
2076 2076 continue
2077 2077 if gitpatches is None:
2078 2078 # scan whole input for git metadata
2079 2079 gitpatches = scangitpatch(lr, x)
2080 2080 yield b'git', [
2081 2081 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2082 2082 ]
2083 2083 gitpatches.reverse()
2084 2084 afile = b'a/' + m.group(1)
2085 2085 bfile = b'b/' + m.group(2)
2086 2086 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2087 2087 gp = gitpatches.pop()
2088 2088 yield b'file', (
2089 2089 b'a/' + gp.path,
2090 2090 b'b/' + gp.path,
2091 2091 None,
2092 2092 gp.copy(),
2093 2093 )
2094 2094 if not gitpatches:
2095 2095 raise PatchError(
2096 2096 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2097 2097 )
2098 2098 newfile = True
2099 2099 elif x.startswith(b'---'):
2100 2100 # check for a unified diff
2101 2101 l2 = lr.readline()
2102 2102 if not l2.startswith(b'+++'):
2103 2103 lr.push(l2)
2104 2104 continue
2105 2105 newfile = True
2106 2106 context = False
2107 2107 afile = parsefilename(x)
2108 2108 bfile = parsefilename(l2)
2109 2109 elif x.startswith(b'***'):
2110 2110 # check for a context diff
2111 2111 l2 = lr.readline()
2112 2112 if not l2.startswith(b'---'):
2113 2113 lr.push(l2)
2114 2114 continue
2115 2115 l3 = lr.readline()
2116 2116 lr.push(l3)
2117 2117 if not l3.startswith(b"***************"):
2118 2118 lr.push(l2)
2119 2119 continue
2120 2120 newfile = True
2121 2121 context = True
2122 2122 afile = parsefilename(x)
2123 2123 bfile = parsefilename(l2)
2124 2124
2125 2125 if newfile:
2126 2126 newfile = False
2127 2127 emitfile = True
2128 2128 state = BFILE
2129 2129 hunknum = 0
2130 2130
2131 2131 while gitpatches:
2132 2132 gp = gitpatches.pop()
2133 2133 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2134 2134
2135 2135
2136 2136 def applybindelta(binchunk, data):
2137 2137 """Apply a binary delta hunk
2138 2138 The algorithm used is the algorithm from git's patch-delta.c
2139 2139 """
2140 2140
2141 2141 def deltahead(binchunk):
2142 2142 i = 0
2143 2143 for c in pycompat.bytestr(binchunk):
2144 2144 i += 1
2145 2145 if not (ord(c) & 0x80):
2146 2146 return i
2147 2147 return i
2148 2148
2149 2149 out = b""
2150 2150 s = deltahead(binchunk)
2151 2151 binchunk = binchunk[s:]
2152 2152 s = deltahead(binchunk)
2153 2153 binchunk = binchunk[s:]
2154 2154 i = 0
2155 2155 while i < len(binchunk):
2156 2156 cmd = ord(binchunk[i : i + 1])
2157 2157 i += 1
2158 2158 if cmd & 0x80:
2159 2159 offset = 0
2160 2160 size = 0
2161 2161 if cmd & 0x01:
2162 2162 offset = ord(binchunk[i : i + 1])
2163 2163 i += 1
2164 2164 if cmd & 0x02:
2165 2165 offset |= ord(binchunk[i : i + 1]) << 8
2166 2166 i += 1
2167 2167 if cmd & 0x04:
2168 2168 offset |= ord(binchunk[i : i + 1]) << 16
2169 2169 i += 1
2170 2170 if cmd & 0x08:
2171 2171 offset |= ord(binchunk[i : i + 1]) << 24
2172 2172 i += 1
2173 2173 if cmd & 0x10:
2174 2174 size = ord(binchunk[i : i + 1])
2175 2175 i += 1
2176 2176 if cmd & 0x20:
2177 2177 size |= ord(binchunk[i : i + 1]) << 8
2178 2178 i += 1
2179 2179 if cmd & 0x40:
2180 2180 size |= ord(binchunk[i : i + 1]) << 16
2181 2181 i += 1
2182 2182 if size == 0:
2183 2183 size = 0x10000
2184 2184 offset_end = offset + size
2185 2185 out += data[offset:offset_end]
2186 2186 elif cmd != 0:
2187 2187 offset_end = i + cmd
2188 2188 out += binchunk[i:offset_end]
2189 2189 i += cmd
2190 2190 else:
2191 2191 raise PatchError(_(b'unexpected delta opcode 0'))
2192 2192 return out
2193 2193
2194 2194
2195 2195 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2196 2196 """Reads a patch from fp and tries to apply it.
2197 2197
2198 2198 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2199 2199 there was any fuzz.
2200 2200
2201 2201 If 'eolmode' is 'strict', the patch content and patched file are
2202 2202 read in binary mode. Otherwise, line endings are ignored when
2203 2203 patching then normalized according to 'eolmode'.
2204 2204 """
2205 2205 return _applydiff(
2206 2206 ui,
2207 2207 fp,
2208 2208 patchfile,
2209 2209 backend,
2210 2210 store,
2211 2211 strip=strip,
2212 2212 prefix=prefix,
2213 2213 eolmode=eolmode,
2214 2214 )
2215 2215
2216 2216
2217 2217 def _canonprefix(repo, prefix):
2218 2218 if prefix:
2219 2219 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2220 2220 if prefix != b'':
2221 2221 prefix += b'/'
2222 2222 return prefix
2223 2223
2224 2224
2225 2225 def _applydiff(
2226 2226 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2227 2227 ):
2228 2228 prefix = _canonprefix(backend.repo, prefix)
2229 2229
2230 2230 def pstrip(p):
2231 2231 return pathtransform(p, strip - 1, prefix)[1]
2232 2232
2233 2233 rejects = 0
2234 2234 err = 0
2235 2235 current_file = None
2236 2236
2237 2237 for state, values in iterhunks(fp):
2238 2238 if state == b'hunk':
2239 2239 if not current_file:
2240 2240 continue
2241 2241 ret = current_file.apply(values)
2242 2242 if ret > 0:
2243 2243 err = 1
2244 2244 elif state == b'file':
2245 2245 if current_file:
2246 2246 rejects += current_file.close()
2247 2247 current_file = None
2248 2248 afile, bfile, first_hunk, gp = values
2249 2249 if gp:
2250 2250 gp.path = pstrip(gp.path)
2251 2251 if gp.oldpath:
2252 2252 gp.oldpath = pstrip(gp.oldpath)
2253 2253 else:
2254 2254 gp = makepatchmeta(
2255 2255 backend, afile, bfile, first_hunk, strip, prefix
2256 2256 )
2257 2257 if gp.op == b'RENAME':
2258 2258 backend.unlink(gp.oldpath)
2259 2259 if not first_hunk:
2260 2260 if gp.op == b'DELETE':
2261 2261 backend.unlink(gp.path)
2262 2262 continue
2263 2263 data, mode = None, None
2264 2264 if gp.op in (b'RENAME', b'COPY'):
2265 2265 data, mode = store.getfile(gp.oldpath)[:2]
2266 2266 if data is None:
2267 2267 # This means that the old path does not exist
2268 2268 raise PatchError(
2269 2269 _(b"source file '%s' does not exist") % gp.oldpath
2270 2270 )
2271 2271 if gp.mode:
2272 2272 mode = gp.mode
2273 2273 if gp.op == b'ADD':
2274 2274 # Added files without content have no hunk and
2275 2275 # must be created
2276 2276 data = b''
2277 2277 if data or mode:
2278 2278 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2279 2279 gp.path
2280 2280 ):
2281 2281 raise PatchError(
2282 2282 _(
2283 2283 b"cannot create %s: destination "
2284 2284 b"already exists"
2285 2285 )
2286 2286 % gp.path
2287 2287 )
2288 2288 backend.setfile(gp.path, data, mode, gp.oldpath)
2289 2289 continue
2290 2290 try:
2291 2291 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2292 2292 except PatchError as inst:
2293 ui.warn(str(inst) + b'\n')
2293 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2294 2294 current_file = None
2295 2295 rejects += 1
2296 2296 continue
2297 2297 elif state == b'git':
2298 2298 for gp in values:
2299 2299 path = pstrip(gp.oldpath)
2300 2300 data, mode = backend.getfile(path)
2301 2301 if data is None:
2302 2302 # The error ignored here will trigger a getfile()
2303 2303 # error in a place more appropriate for error
2304 2304 # handling, and will not interrupt the patching
2305 2305 # process.
2306 2306 pass
2307 2307 else:
2308 2308 store.setfile(path, data, mode)
2309 2309 else:
2310 2310 raise error.Abort(_(b'unsupported parser state: %s') % state)
2311 2311
2312 2312 if current_file:
2313 2313 rejects += current_file.close()
2314 2314
2315 2315 if rejects:
2316 2316 return -1
2317 2317 return err
2318 2318
2319 2319
2320 2320 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2321 2321 """use <patcher> to apply <patchname> to the working directory.
2322 2322 returns whether patch was applied with fuzz factor."""
2323 2323
2324 2324 fuzz = False
2325 2325 args = []
2326 2326 cwd = repo.root
2327 2327 if cwd:
2328 2328 args.append(b'-d %s' % procutil.shellquote(cwd))
2329 2329 cmd = b'%s %s -p%d < %s' % (
2330 2330 patcher,
2331 2331 b' '.join(args),
2332 2332 strip,
2333 2333 procutil.shellquote(patchname),
2334 2334 )
2335 2335 ui.debug(b'Using external patch tool: %s\n' % cmd)
2336 2336 fp = procutil.popen(cmd, b'rb')
2337 2337 try:
2338 2338 for line in util.iterfile(fp):
2339 2339 line = line.rstrip()
2340 2340 ui.note(line + b'\n')
2341 2341 if line.startswith(b'patching file '):
2342 2342 pf = util.parsepatchoutput(line)
2343 2343 printed_file = False
2344 2344 files.add(pf)
2345 2345 elif line.find(b'with fuzz') >= 0:
2346 2346 fuzz = True
2347 2347 if not printed_file:
2348 2348 ui.warn(pf + b'\n')
2349 2349 printed_file = True
2350 2350 ui.warn(line + b'\n')
2351 2351 elif line.find(b'saving rejects to file') >= 0:
2352 2352 ui.warn(line + b'\n')
2353 2353 elif line.find(b'FAILED') >= 0:
2354 2354 if not printed_file:
2355 2355 ui.warn(pf + b'\n')
2356 2356 printed_file = True
2357 2357 ui.warn(line + b'\n')
2358 2358 finally:
2359 2359 if files:
2360 2360 scmutil.marktouched(repo, files, similarity)
2361 2361 code = fp.close()
2362 2362 if code:
2363 2363 raise PatchError(
2364 2364 _(b"patch command failed: %s") % procutil.explainexit(code)
2365 2365 )
2366 2366 return fuzz
2367 2367
2368 2368
2369 2369 def patchbackend(
2370 2370 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2371 2371 ):
2372 2372 if files is None:
2373 2373 files = set()
2374 2374 if eolmode is None:
2375 2375 eolmode = ui.config(b'patch', b'eol')
2376 2376 if eolmode.lower() not in eolmodes:
2377 2377 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2378 2378 eolmode = eolmode.lower()
2379 2379
2380 2380 store = filestore()
2381 2381 try:
2382 2382 fp = open(patchobj, b'rb')
2383 2383 except TypeError:
2384 2384 fp = patchobj
2385 2385 try:
2386 2386 ret = applydiff(
2387 2387 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2388 2388 )
2389 2389 finally:
2390 2390 if fp != patchobj:
2391 2391 fp.close()
2392 2392 files.update(backend.close())
2393 2393 store.close()
2394 2394 if ret < 0:
2395 2395 raise PatchError(_(b'patch failed to apply'))
2396 2396 return ret > 0
2397 2397
2398 2398
2399 2399 def internalpatch(
2400 2400 ui,
2401 2401 repo,
2402 2402 patchobj,
2403 2403 strip,
2404 2404 prefix=b'',
2405 2405 files=None,
2406 2406 eolmode=b'strict',
2407 2407 similarity=0,
2408 2408 ):
2409 2409 """use builtin patch to apply <patchobj> to the working directory.
2410 2410 returns whether patch was applied with fuzz factor."""
2411 2411 backend = workingbackend(ui, repo, similarity)
2412 2412 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2413 2413
2414 2414
2415 2415 def patchrepo(
2416 2416 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2417 2417 ):
2418 2418 backend = repobackend(ui, repo, ctx, store)
2419 2419 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2420 2420
2421 2421
2422 2422 def patch(
2423 2423 ui,
2424 2424 repo,
2425 2425 patchname,
2426 2426 strip=1,
2427 2427 prefix=b'',
2428 2428 files=None,
2429 2429 eolmode=b'strict',
2430 2430 similarity=0,
2431 2431 ):
2432 2432 """Apply <patchname> to the working directory.
2433 2433
2434 2434 'eolmode' specifies how end of lines should be handled. It can be:
2435 2435 - 'strict': inputs are read in binary mode, EOLs are preserved
2436 2436 - 'crlf': EOLs are ignored when patching and reset to CRLF
2437 2437 - 'lf': EOLs are ignored when patching and reset to LF
2438 2438 - None: get it from user settings, default to 'strict'
2439 2439 'eolmode' is ignored when using an external patcher program.
2440 2440
2441 2441 Returns whether patch was applied with fuzz factor.
2442 2442 """
2443 2443 patcher = ui.config(b'ui', b'patch')
2444 2444 if files is None:
2445 2445 files = set()
2446 2446 if patcher:
2447 2447 return _externalpatch(
2448 2448 ui, repo, patcher, patchname, strip, files, similarity
2449 2449 )
2450 2450 return internalpatch(
2451 2451 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2452 2452 )
2453 2453
2454 2454
2455 2455 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2456 2456 backend = fsbackend(ui, repo.root)
2457 2457 prefix = _canonprefix(repo, prefix)
2458 2458 with open(patchpath, b'rb') as fp:
2459 2459 changed = set()
2460 2460 for state, values in iterhunks(fp):
2461 2461 if state == b'file':
2462 2462 afile, bfile, first_hunk, gp = values
2463 2463 if gp:
2464 2464 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2465 2465 if gp.oldpath:
2466 2466 gp.oldpath = pathtransform(
2467 2467 gp.oldpath, strip - 1, prefix
2468 2468 )[1]
2469 2469 else:
2470 2470 gp = makepatchmeta(
2471 2471 backend, afile, bfile, first_hunk, strip, prefix
2472 2472 )
2473 2473 changed.add(gp.path)
2474 2474 if gp.op == b'RENAME':
2475 2475 changed.add(gp.oldpath)
2476 2476 elif state not in (b'hunk', b'git'):
2477 2477 raise error.Abort(_(b'unsupported parser state: %s') % state)
2478 2478 return changed
2479 2479
2480 2480
2481 2481 class GitDiffRequired(Exception):
2482 2482 pass
2483 2483
2484 2484
2485 2485 diffopts = diffutil.diffallopts
2486 2486 diffallopts = diffutil.diffallopts
2487 2487 difffeatureopts = diffutil.difffeatureopts
2488 2488
2489 2489
2490 2490 def diff(
2491 2491 repo,
2492 2492 node1=None,
2493 2493 node2=None,
2494 2494 match=None,
2495 2495 changes=None,
2496 2496 opts=None,
2497 2497 losedatafn=None,
2498 2498 pathfn=None,
2499 2499 copy=None,
2500 2500 copysourcematch=None,
2501 2501 hunksfilterfn=None,
2502 2502 ):
2503 2503 '''yields diff of changes to files between two nodes, or node and
2504 2504 working directory.
2505 2505
2506 2506 if node1 is None, use first dirstate parent instead.
2507 2507 if node2 is None, compare node1 with working directory.
2508 2508
2509 2509 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2510 2510 every time some change cannot be represented with the current
2511 2511 patch format. Return False to upgrade to git patch format, True to
2512 2512 accept the loss or raise an exception to abort the diff. It is
2513 2513 called with the name of current file being diffed as 'fn'. If set
2514 2514 to None, patches will always be upgraded to git format when
2515 2515 necessary.
2516 2516
2517 2517 prefix is a filename prefix that is prepended to all filenames on
2518 2518 display (used for subrepos).
2519 2519
2520 2520 relroot, if not empty, must be normalized with a trailing /. Any match
2521 2521 patterns that fall outside it will be ignored.
2522 2522
2523 2523 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2524 2524 information.
2525 2525
2526 2526 if copysourcematch is not None, then copy sources will be filtered by this
2527 2527 matcher
2528 2528
2529 2529 hunksfilterfn, if not None, should be a function taking a filectx and
2530 2530 hunks generator that may yield filtered hunks.
2531 2531 '''
2532 2532 if not node1 and not node2:
2533 2533 node1 = repo.dirstate.p1()
2534 2534
2535 2535 ctx1 = repo[node1]
2536 2536 ctx2 = repo[node2]
2537 2537
2538 2538 for fctx1, fctx2, hdr, hunks in diffhunks(
2539 2539 repo,
2540 2540 ctx1=ctx1,
2541 2541 ctx2=ctx2,
2542 2542 match=match,
2543 2543 changes=changes,
2544 2544 opts=opts,
2545 2545 losedatafn=losedatafn,
2546 2546 pathfn=pathfn,
2547 2547 copy=copy,
2548 2548 copysourcematch=copysourcematch,
2549 2549 ):
2550 2550 if hunksfilterfn is not None:
2551 2551 # If the file has been removed, fctx2 is None; but this should
2552 2552 # not occur here since we catch removed files early in
2553 2553 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2554 2554 assert (
2555 2555 fctx2 is not None
2556 2556 ), b'fctx2 unexpectly None in diff hunks filtering'
2557 2557 hunks = hunksfilterfn(fctx2, hunks)
2558 2558 text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2559 2559 if hdr and (text or len(hdr) > 1):
2560 2560 yield b'\n'.join(hdr) + b'\n'
2561 2561 if text:
2562 2562 yield text
2563 2563
2564 2564
2565 2565 def diffhunks(
2566 2566 repo,
2567 2567 ctx1,
2568 2568 ctx2,
2569 2569 match=None,
2570 2570 changes=None,
2571 2571 opts=None,
2572 2572 losedatafn=None,
2573 2573 pathfn=None,
2574 2574 copy=None,
2575 2575 copysourcematch=None,
2576 2576 ):
2577 2577 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2578 2578 where `header` is a list of diff headers and `hunks` is an iterable of
2579 2579 (`hunkrange`, `hunklines`) tuples.
2580 2580
2581 2581 See diff() for the meaning of parameters.
2582 2582 """
2583 2583
2584 2584 if opts is None:
2585 2585 opts = mdiff.defaultopts
2586 2586
2587 2587 def lrugetfilectx():
2588 2588 cache = {}
2589 2589 order = collections.deque()
2590 2590
2591 2591 def getfilectx(f, ctx):
2592 2592 fctx = ctx.filectx(f, filelog=cache.get(f))
2593 2593 if f not in cache:
2594 2594 if len(cache) > 20:
2595 2595 del cache[order.popleft()]
2596 2596 cache[f] = fctx.filelog()
2597 2597 else:
2598 2598 order.remove(f)
2599 2599 order.append(f)
2600 2600 return fctx
2601 2601
2602 2602 return getfilectx
2603 2603
2604 2604 getfilectx = lrugetfilectx()
2605 2605
2606 2606 if not changes:
2607 2607 changes = ctx1.status(ctx2, match=match)
2608 2608 modified, added, removed = changes[:3]
2609 2609
2610 2610 if not modified and not added and not removed:
2611 2611 return []
2612 2612
2613 2613 if repo.ui.debugflag:
2614 2614 hexfunc = hex
2615 2615 else:
2616 2616 hexfunc = short
2617 2617 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2618 2618
2619 2619 if copy is None:
2620 2620 copy = {}
2621 2621 if opts.git or opts.upgrade:
2622 2622 copy = copies.pathcopies(ctx1, ctx2, match=match)
2623 2623
2624 2624 if copysourcematch:
2625 2625 # filter out copies where source side isn't inside the matcher
2626 2626 # (copies.pathcopies() already filtered out the destination)
2627 2627 copy = {
2628 2628 dst: src
2629 2629 for dst, src in pycompat.iteritems(copy)
2630 2630 if copysourcematch(src)
2631 2631 }
2632 2632
2633 2633 modifiedset = set(modified)
2634 2634 addedset = set(added)
2635 2635 removedset = set(removed)
2636 2636 for f in modified:
2637 2637 if f not in ctx1:
2638 2638 # Fix up added, since merged-in additions appear as
2639 2639 # modifications during merges
2640 2640 modifiedset.remove(f)
2641 2641 addedset.add(f)
2642 2642 for f in removed:
2643 2643 if f not in ctx1:
2644 2644 # Merged-in additions that are then removed are reported as removed.
2645 2645 # They are not in ctx1, so We don't want to show them in the diff.
2646 2646 removedset.remove(f)
2647 2647 modified = sorted(modifiedset)
2648 2648 added = sorted(addedset)
2649 2649 removed = sorted(removedset)
2650 2650 for dst, src in list(copy.items()):
2651 2651 if src not in ctx1:
2652 2652 # Files merged in during a merge and then copied/renamed are
2653 2653 # reported as copies. We want to show them in the diff as additions.
2654 2654 del copy[dst]
2655 2655
2656 2656 prefetchmatch = scmutil.matchfiles(
2657 2657 repo, list(modifiedset | addedset | removedset)
2658 2658 )
2659 2659 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2660 2660
2661 2661 def difffn(opts, losedata):
2662 2662 return trydiff(
2663 2663 repo,
2664 2664 revs,
2665 2665 ctx1,
2666 2666 ctx2,
2667 2667 modified,
2668 2668 added,
2669 2669 removed,
2670 2670 copy,
2671 2671 getfilectx,
2672 2672 opts,
2673 2673 losedata,
2674 2674 pathfn,
2675 2675 )
2676 2676
2677 2677 if opts.upgrade and not opts.git:
2678 2678 try:
2679 2679
2680 2680 def losedata(fn):
2681 2681 if not losedatafn or not losedatafn(fn=fn):
2682 2682 raise GitDiffRequired
2683 2683
2684 2684 # Buffer the whole output until we are sure it can be generated
2685 2685 return list(difffn(opts.copy(git=False), losedata))
2686 2686 except GitDiffRequired:
2687 2687 return difffn(opts.copy(git=True), None)
2688 2688 else:
2689 2689 return difffn(opts, None)
2690 2690
2691 2691
2692 2692 def diffsinglehunk(hunklines):
2693 2693 """yield tokens for a list of lines in a single hunk"""
2694 2694 for line in hunklines:
2695 2695 # chomp
2696 2696 chompline = line.rstrip(b'\r\n')
2697 2697 # highlight tabs and trailing whitespace
2698 2698 stripline = chompline.rstrip()
2699 2699 if line.startswith(b'-'):
2700 2700 label = b'diff.deleted'
2701 2701 elif line.startswith(b'+'):
2702 2702 label = b'diff.inserted'
2703 2703 else:
2704 2704 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2705 2705 for token in tabsplitter.findall(stripline):
2706 2706 if token.startswith(b'\t'):
2707 2707 yield (token, b'diff.tab')
2708 2708 else:
2709 2709 yield (token, label)
2710 2710
2711 2711 if chompline != stripline:
2712 2712 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2713 2713 if chompline != line:
2714 2714 yield (line[len(chompline) :], b'')
2715 2715
2716 2716
2717 2717 def diffsinglehunkinline(hunklines):
2718 2718 """yield tokens for a list of lines in a single hunk, with inline colors"""
2719 2719 # prepare deleted, and inserted content
2720 2720 a = b''
2721 2721 b = b''
2722 2722 for line in hunklines:
2723 2723 if line[0:1] == b'-':
2724 2724 a += line[1:]
2725 2725 elif line[0:1] == b'+':
2726 2726 b += line[1:]
2727 2727 else:
2728 2728 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2729 2729 # fast path: if either side is empty, use diffsinglehunk
2730 2730 if not a or not b:
2731 2731 for t in diffsinglehunk(hunklines):
2732 2732 yield t
2733 2733 return
2734 2734 # re-split the content into words
2735 2735 al = wordsplitter.findall(a)
2736 2736 bl = wordsplitter.findall(b)
2737 2737 # re-arrange the words to lines since the diff algorithm is line-based
2738 2738 aln = [s if s == b'\n' else s + b'\n' for s in al]
2739 2739 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2740 2740 an = b''.join(aln)
2741 2741 bn = b''.join(bln)
2742 2742 # run the diff algorithm, prepare atokens and btokens
2743 2743 atokens = []
2744 2744 btokens = []
2745 2745 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2746 2746 for (a1, a2, b1, b2), btype in blocks:
2747 2747 changed = btype == b'!'
2748 2748 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2749 2749 atokens.append((changed, token))
2750 2750 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2751 2751 btokens.append((changed, token))
2752 2752
2753 2753 # yield deleted tokens, then inserted ones
2754 2754 for prefix, label, tokens in [
2755 2755 (b'-', b'diff.deleted', atokens),
2756 2756 (b'+', b'diff.inserted', btokens),
2757 2757 ]:
2758 2758 nextisnewline = True
2759 2759 for changed, token in tokens:
2760 2760 if nextisnewline:
2761 2761 yield (prefix, label)
2762 2762 nextisnewline = False
2763 2763 # special handling line end
2764 2764 isendofline = token.endswith(b'\n')
2765 2765 if isendofline:
2766 2766 chomp = token[:-1] # chomp
2767 2767 if chomp.endswith(b'\r'):
2768 2768 chomp = chomp[:-1]
2769 2769 endofline = token[len(chomp) :]
2770 2770 token = chomp.rstrip() # detect spaces at the end
2771 2771 endspaces = chomp[len(token) :]
2772 2772 # scan tabs
2773 2773 for maybetab in tabsplitter.findall(token):
2774 2774 if b'\t' == maybetab[0:1]:
2775 2775 currentlabel = b'diff.tab'
2776 2776 else:
2777 2777 if changed:
2778 2778 currentlabel = label + b'.changed'
2779 2779 else:
2780 2780 currentlabel = label + b'.unchanged'
2781 2781 yield (maybetab, currentlabel)
2782 2782 if isendofline:
2783 2783 if endspaces:
2784 2784 yield (endspaces, b'diff.trailingwhitespace')
2785 2785 yield (endofline, b'')
2786 2786 nextisnewline = True
2787 2787
2788 2788
2789 2789 def difflabel(func, *args, **kw):
2790 2790 '''yields 2-tuples of (output, label) based on the output of func()'''
2791 2791 if kw.get(r'opts') and kw[r'opts'].worddiff:
2792 2792 dodiffhunk = diffsinglehunkinline
2793 2793 else:
2794 2794 dodiffhunk = diffsinglehunk
2795 2795 headprefixes = [
2796 2796 (b'diff', b'diff.diffline'),
2797 2797 (b'copy', b'diff.extended'),
2798 2798 (b'rename', b'diff.extended'),
2799 2799 (b'old', b'diff.extended'),
2800 2800 (b'new', b'diff.extended'),
2801 2801 (b'deleted', b'diff.extended'),
2802 2802 (b'index', b'diff.extended'),
2803 2803 (b'similarity', b'diff.extended'),
2804 2804 (b'---', b'diff.file_a'),
2805 2805 (b'+++', b'diff.file_b'),
2806 2806 ]
2807 2807 textprefixes = [
2808 2808 (b'@', b'diff.hunk'),
2809 2809 # - and + are handled by diffsinglehunk
2810 2810 ]
2811 2811 head = False
2812 2812
2813 2813 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2814 2814 hunkbuffer = []
2815 2815
2816 2816 def consumehunkbuffer():
2817 2817 if hunkbuffer:
2818 2818 for token in dodiffhunk(hunkbuffer):
2819 2819 yield token
2820 2820 hunkbuffer[:] = []
2821 2821
2822 2822 for chunk in func(*args, **kw):
2823 2823 lines = chunk.split(b'\n')
2824 2824 linecount = len(lines)
2825 2825 for i, line in enumerate(lines):
2826 2826 if head:
2827 2827 if line.startswith(b'@'):
2828 2828 head = False
2829 2829 else:
2830 2830 if line and not line.startswith(
2831 2831 (b' ', b'+', b'-', b'@', b'\\')
2832 2832 ):
2833 2833 head = True
2834 2834 diffline = False
2835 2835 if not head and line and line.startswith((b'+', b'-')):
2836 2836 diffline = True
2837 2837
2838 2838 prefixes = textprefixes
2839 2839 if head:
2840 2840 prefixes = headprefixes
2841 2841 if diffline:
2842 2842 # buffered
2843 2843 bufferedline = line
2844 2844 if i + 1 < linecount:
2845 2845 bufferedline += b"\n"
2846 2846 hunkbuffer.append(bufferedline)
2847 2847 else:
2848 2848 # unbuffered
2849 2849 for token in consumehunkbuffer():
2850 2850 yield token
2851 2851 stripline = line.rstrip()
2852 2852 for prefix, label in prefixes:
2853 2853 if stripline.startswith(prefix):
2854 2854 yield (stripline, label)
2855 2855 if line != stripline:
2856 2856 yield (
2857 2857 line[len(stripline) :],
2858 2858 b'diff.trailingwhitespace',
2859 2859 )
2860 2860 break
2861 2861 else:
2862 2862 yield (line, b'')
2863 2863 if i + 1 < linecount:
2864 2864 yield (b'\n', b'')
2865 2865 for token in consumehunkbuffer():
2866 2866 yield token
2867 2867
2868 2868
2869 2869 def diffui(*args, **kw):
2870 2870 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2871 2871 return difflabel(diff, *args, **kw)
2872 2872
2873 2873
2874 2874 def _filepairs(modified, added, removed, copy, opts):
2875 2875 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2876 2876 before and f2 is the the name after. For added files, f1 will be None,
2877 2877 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2878 2878 or 'rename' (the latter two only if opts.git is set).'''
2879 2879 gone = set()
2880 2880
2881 2881 copyto = dict([(v, k) for k, v in copy.items()])
2882 2882
2883 2883 addedset, removedset = set(added), set(removed)
2884 2884
2885 2885 for f in sorted(modified + added + removed):
2886 2886 copyop = None
2887 2887 f1, f2 = f, f
2888 2888 if f in addedset:
2889 2889 f1 = None
2890 2890 if f in copy:
2891 2891 if opts.git:
2892 2892 f1 = copy[f]
2893 2893 if f1 in removedset and f1 not in gone:
2894 2894 copyop = b'rename'
2895 2895 gone.add(f1)
2896 2896 else:
2897 2897 copyop = b'copy'
2898 2898 elif f in removedset:
2899 2899 f2 = None
2900 2900 if opts.git:
2901 2901 # have we already reported a copy above?
2902 2902 if (
2903 2903 f in copyto
2904 2904 and copyto[f] in addedset
2905 2905 and copy[copyto[f]] == f
2906 2906 ):
2907 2907 continue
2908 2908 yield f1, f2, copyop
2909 2909
2910 2910
2911 2911 def trydiff(
2912 2912 repo,
2913 2913 revs,
2914 2914 ctx1,
2915 2915 ctx2,
2916 2916 modified,
2917 2917 added,
2918 2918 removed,
2919 2919 copy,
2920 2920 getfilectx,
2921 2921 opts,
2922 2922 losedatafn,
2923 2923 pathfn,
2924 2924 ):
2925 2925 '''given input data, generate a diff and yield it in blocks
2926 2926
2927 2927 If generating a diff would lose data like flags or binary data and
2928 2928 losedatafn is not None, it will be called.
2929 2929
2930 2930 pathfn is applied to every path in the diff output.
2931 2931 '''
2932 2932
2933 2933 def gitindex(text):
2934 2934 if not text:
2935 2935 text = b""
2936 2936 l = len(text)
2937 2937 s = hashlib.sha1(b'blob %d\0' % l)
2938 2938 s.update(text)
2939 2939 return hex(s.digest())
2940 2940
2941 2941 if opts.noprefix:
2942 2942 aprefix = bprefix = b''
2943 2943 else:
2944 2944 aprefix = b'a/'
2945 2945 bprefix = b'b/'
2946 2946
2947 2947 def diffline(f, revs):
2948 2948 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2949 2949 return b'diff %s %s' % (revinfo, f)
2950 2950
2951 2951 def isempty(fctx):
2952 2952 return fctx is None or fctx.size() == 0
2953 2953
2954 2954 date1 = dateutil.datestr(ctx1.date())
2955 2955 date2 = dateutil.datestr(ctx2.date())
2956 2956
2957 2957 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2958 2958
2959 2959 if not pathfn:
2960 2960 pathfn = lambda f: f
2961 2961
2962 2962 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2963 2963 content1 = None
2964 2964 content2 = None
2965 2965 fctx1 = None
2966 2966 fctx2 = None
2967 2967 flag1 = None
2968 2968 flag2 = None
2969 2969 if f1:
2970 2970 fctx1 = getfilectx(f1, ctx1)
2971 2971 if opts.git or losedatafn:
2972 2972 flag1 = ctx1.flags(f1)
2973 2973 if f2:
2974 2974 fctx2 = getfilectx(f2, ctx2)
2975 2975 if opts.git or losedatafn:
2976 2976 flag2 = ctx2.flags(f2)
2977 2977 # if binary is True, output "summary" or "base85", but not "text diff"
2978 2978 if opts.text:
2979 2979 binary = False
2980 2980 else:
2981 2981 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2982 2982
2983 2983 if losedatafn and not opts.git:
2984 2984 if (
2985 2985 binary
2986 2986 or
2987 2987 # copy/rename
2988 2988 f2 in copy
2989 2989 or
2990 2990 # empty file creation
2991 2991 (not f1 and isempty(fctx2))
2992 2992 or
2993 2993 # empty file deletion
2994 2994 (isempty(fctx1) and not f2)
2995 2995 or
2996 2996 # create with flags
2997 2997 (not f1 and flag2)
2998 2998 or
2999 2999 # change flags
3000 3000 (f1 and f2 and flag1 != flag2)
3001 3001 ):
3002 3002 losedatafn(f2 or f1)
3003 3003
3004 3004 path1 = pathfn(f1 or f2)
3005 3005 path2 = pathfn(f2 or f1)
3006 3006 header = []
3007 3007 if opts.git:
3008 3008 header.append(
3009 3009 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3010 3010 )
3011 3011 if not f1: # added
3012 3012 header.append(b'new file mode %s' % gitmode[flag2])
3013 3013 elif not f2: # removed
3014 3014 header.append(b'deleted file mode %s' % gitmode[flag1])
3015 3015 else: # modified/copied/renamed
3016 3016 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3017 3017 if mode1 != mode2:
3018 3018 header.append(b'old mode %s' % mode1)
3019 3019 header.append(b'new mode %s' % mode2)
3020 3020 if copyop is not None:
3021 3021 if opts.showsimilarity:
3022 3022 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3023 3023 header.append(b'similarity index %d%%' % sim)
3024 3024 header.append(b'%s from %s' % (copyop, path1))
3025 3025 header.append(b'%s to %s' % (copyop, path2))
3026 3026 elif revs:
3027 3027 header.append(diffline(path1, revs))
3028 3028
3029 3029 # fctx.is | diffopts | what to | is fctx.data()
3030 3030 # binary() | text nobinary git index | output? | outputted?
3031 3031 # ------------------------------------|----------------------------
3032 3032 # yes | no no no * | summary | no
3033 3033 # yes | no no yes * | base85 | yes
3034 3034 # yes | no yes no * | summary | no
3035 3035 # yes | no yes yes 0 | summary | no
3036 3036 # yes | no yes yes >0 | summary | semi [1]
3037 3037 # yes | yes * * * | text diff | yes
3038 3038 # no | * * * * | text diff | yes
3039 3039 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3040 3040 if binary and (
3041 3041 not opts.git or (opts.git and opts.nobinary and not opts.index)
3042 3042 ):
3043 3043 # fast path: no binary content will be displayed, content1 and
3044 3044 # content2 are only used for equivalent test. cmp() could have a
3045 3045 # fast path.
3046 3046 if fctx1 is not None:
3047 3047 content1 = b'\0'
3048 3048 if fctx2 is not None:
3049 3049 if fctx1 is not None and not fctx1.cmp(fctx2):
3050 3050 content2 = b'\0' # not different
3051 3051 else:
3052 3052 content2 = b'\0\0'
3053 3053 else:
3054 3054 # normal path: load contents
3055 3055 if fctx1 is not None:
3056 3056 content1 = fctx1.data()
3057 3057 if fctx2 is not None:
3058 3058 content2 = fctx2.data()
3059 3059
3060 3060 if binary and opts.git and not opts.nobinary:
3061 3061 text = mdiff.b85diff(content1, content2)
3062 3062 if text:
3063 3063 header.append(
3064 3064 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3065 3065 )
3066 3066 hunks = ((None, [text]),)
3067 3067 else:
3068 3068 if opts.git and opts.index > 0:
3069 3069 flag = flag1
3070 3070 if flag is None:
3071 3071 flag = flag2
3072 3072 header.append(
3073 3073 b'index %s..%s %s'
3074 3074 % (
3075 3075 gitindex(content1)[0 : opts.index],
3076 3076 gitindex(content2)[0 : opts.index],
3077 3077 gitmode[flag],
3078 3078 )
3079 3079 )
3080 3080
3081 3081 uheaders, hunks = mdiff.unidiff(
3082 3082 content1,
3083 3083 date1,
3084 3084 content2,
3085 3085 date2,
3086 3086 path1,
3087 3087 path2,
3088 3088 binary=binary,
3089 3089 opts=opts,
3090 3090 )
3091 3091 header.extend(uheaders)
3092 3092 yield fctx1, fctx2, header, hunks
3093 3093
3094 3094
3095 3095 def diffstatsum(stats):
3096 3096 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3097 3097 for f, a, r, b in stats:
3098 3098 maxfile = max(maxfile, encoding.colwidth(f))
3099 3099 maxtotal = max(maxtotal, a + r)
3100 3100 addtotal += a
3101 3101 removetotal += r
3102 3102 binary = binary or b
3103 3103
3104 3104 return maxfile, maxtotal, addtotal, removetotal, binary
3105 3105
3106 3106
3107 3107 def diffstatdata(lines):
3108 3108 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3109 3109
3110 3110 results = []
3111 3111 filename, adds, removes, isbinary = None, 0, 0, False
3112 3112
3113 3113 def addresult():
3114 3114 if filename:
3115 3115 results.append((filename, adds, removes, isbinary))
3116 3116
3117 3117 # inheader is used to track if a line is in the
3118 3118 # header portion of the diff. This helps properly account
3119 3119 # for lines that start with '--' or '++'
3120 3120 inheader = False
3121 3121
3122 3122 for line in lines:
3123 3123 if line.startswith(b'diff'):
3124 3124 addresult()
3125 3125 # starting a new file diff
3126 3126 # set numbers to 0 and reset inheader
3127 3127 inheader = True
3128 3128 adds, removes, isbinary = 0, 0, False
3129 3129 if line.startswith(b'diff --git a/'):
3130 3130 filename = gitre.search(line).group(2)
3131 3131 elif line.startswith(b'diff -r'):
3132 3132 # format: "diff -r ... -r ... filename"
3133 3133 filename = diffre.search(line).group(1)
3134 3134 elif line.startswith(b'@@'):
3135 3135 inheader = False
3136 3136 elif line.startswith(b'+') and not inheader:
3137 3137 adds += 1
3138 3138 elif line.startswith(b'-') and not inheader:
3139 3139 removes += 1
3140 3140 elif line.startswith(b'GIT binary patch') or line.startswith(
3141 3141 b'Binary file'
3142 3142 ):
3143 3143 isbinary = True
3144 3144 elif line.startswith(b'rename from'):
3145 3145 filename = line[12:]
3146 3146 elif line.startswith(b'rename to'):
3147 3147 filename += b' => %s' % line[10:]
3148 3148 addresult()
3149 3149 return results
3150 3150
3151 3151
3152 3152 def diffstat(lines, width=80):
3153 3153 output = []
3154 3154 stats = diffstatdata(lines)
3155 3155 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3156 3156
3157 3157 countwidth = len(str(maxtotal))
3158 3158 if hasbinary and countwidth < 3:
3159 3159 countwidth = 3
3160 3160 graphwidth = width - countwidth - maxname - 6
3161 3161 if graphwidth < 10:
3162 3162 graphwidth = 10
3163 3163
3164 3164 def scale(i):
3165 3165 if maxtotal <= graphwidth:
3166 3166 return i
3167 3167 # If diffstat runs out of room it doesn't print anything,
3168 3168 # which isn't very useful, so always print at least one + or -
3169 3169 # if there were at least some changes.
3170 3170 return max(i * graphwidth // maxtotal, int(bool(i)))
3171 3171
3172 3172 for filename, adds, removes, isbinary in stats:
3173 3173 if isbinary:
3174 3174 count = b'Bin'
3175 3175 else:
3176 3176 count = b'%d' % (adds + removes)
3177 3177 pluses = b'+' * scale(adds)
3178 3178 minuses = b'-' * scale(removes)
3179 3179 output.append(
3180 3180 b' %s%s | %*s %s%s\n'
3181 3181 % (
3182 3182 filename,
3183 3183 b' ' * (maxname - encoding.colwidth(filename)),
3184 3184 countwidth,
3185 3185 count,
3186 3186 pluses,
3187 3187 minuses,
3188 3188 )
3189 3189 )
3190 3190
3191 3191 if stats:
3192 3192 output.append(
3193 3193 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3194 3194 % (len(stats), totaladds, totalremoves)
3195 3195 )
3196 3196
3197 3197 return b''.join(output)
3198 3198
3199 3199
3200 3200 def diffstatui(*args, **kw):
3201 3201 '''like diffstat(), but yields 2-tuples of (output, label) for
3202 3202 ui.write()
3203 3203 '''
3204 3204
3205 3205 for line in diffstat(*args, **kw).splitlines():
3206 3206 if line and line[-1] in b'+-':
3207 3207 name, graph = line.rsplit(b' ', 1)
3208 3208 yield (name + b' ', b'')
3209 3209 m = re.search(br'\++', graph)
3210 3210 if m:
3211 3211 yield (m.group(0), b'diffstat.inserted')
3212 3212 m = re.search(br'-+', graph)
3213 3213 if m:
3214 3214 yield (m.group(0), b'diffstat.deleted')
3215 3215 else:
3216 3216 yield (line, b'')
3217 3217 yield (b'\n', b'')
General Comments 0
You need to be logged in to leave comments. Login now