##// END OF EJS Templates
diff: use fctx.size() to test empty...
Jun Wu -
r32188:776127b2 default
parent child Browse files
Show More
@@ -1,2702 +1,2705 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import collections
12 12 import copy
13 13 import email
14 14 import errno
15 15 import hashlib
16 16 import os
17 17 import posixpath
18 18 import re
19 19 import shutil
20 20 import tempfile
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 base85,
30 30 copies,
31 31 diffhelpers,
32 32 encoding,
33 33 error,
34 34 mail,
35 35 mdiff,
36 36 pathutil,
37 37 pycompat,
38 38 scmutil,
39 39 similar,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43 stringio = util.stringio
44 44
45 45 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 46 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 47
48 48 class PatchError(Exception):
49 49 pass
50 50
51 51
52 52 # public functions
53 53
54 54 def split(stream):
55 55 '''return an iterator of individual patches from a stream'''
56 56 def isheader(line, inheader):
57 57 if inheader and line[0] in (' ', '\t'):
58 58 # continuation
59 59 return True
60 60 if line[0] in (' ', '-', '+'):
61 61 # diff line - don't check for header pattern in there
62 62 return False
63 63 l = line.split(': ', 1)
64 64 return len(l) == 2 and ' ' not in l[0]
65 65
66 66 def chunk(lines):
67 67 return stringio(''.join(lines))
68 68
69 69 def hgsplit(stream, cur):
70 70 inheader = True
71 71
72 72 for line in stream:
73 73 if not line.strip():
74 74 inheader = False
75 75 if not inheader and line.startswith('# HG changeset patch'):
76 76 yield chunk(cur)
77 77 cur = []
78 78 inheader = True
79 79
80 80 cur.append(line)
81 81
82 82 if cur:
83 83 yield chunk(cur)
84 84
85 85 def mboxsplit(stream, cur):
86 86 for line in stream:
87 87 if line.startswith('From '):
88 88 for c in split(chunk(cur[1:])):
89 89 yield c
90 90 cur = []
91 91
92 92 cur.append(line)
93 93
94 94 if cur:
95 95 for c in split(chunk(cur[1:])):
96 96 yield c
97 97
98 98 def mimesplit(stream, cur):
99 99 def msgfp(m):
100 100 fp = stringio()
101 101 g = email.Generator.Generator(fp, mangle_from_=False)
102 102 g.flatten(m)
103 103 fp.seek(0)
104 104 return fp
105 105
106 106 for line in stream:
107 107 cur.append(line)
108 108 c = chunk(cur)
109 109
110 110 m = email.Parser.Parser().parse(c)
111 111 if not m.is_multipart():
112 112 yield msgfp(m)
113 113 else:
114 114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 115 for part in m.walk():
116 116 ct = part.get_content_type()
117 117 if ct not in ok_types:
118 118 continue
119 119 yield msgfp(part)
120 120
121 121 def headersplit(stream, cur):
122 122 inheader = False
123 123
124 124 for line in stream:
125 125 if not inheader and isheader(line, inheader):
126 126 yield chunk(cur)
127 127 cur = []
128 128 inheader = True
129 129 if inheader and not isheader(line, inheader):
130 130 inheader = False
131 131
132 132 cur.append(line)
133 133
134 134 if cur:
135 135 yield chunk(cur)
136 136
137 137 def remainder(cur):
138 138 yield chunk(cur)
139 139
140 140 class fiter(object):
141 141 def __init__(self, fp):
142 142 self.fp = fp
143 143
144 144 def __iter__(self):
145 145 return self
146 146
147 147 def next(self):
148 148 l = self.fp.readline()
149 149 if not l:
150 150 raise StopIteration
151 151 return l
152 152
153 153 inheader = False
154 154 cur = []
155 155
156 156 mimeheaders = ['content-type']
157 157
158 158 if not util.safehasattr(stream, 'next'):
159 159 # http responses, for example, have readline but not next
160 160 stream = fiter(stream)
161 161
162 162 for line in stream:
163 163 cur.append(line)
164 164 if line.startswith('# HG changeset patch'):
165 165 return hgsplit(stream, cur)
166 166 elif line.startswith('From '):
167 167 return mboxsplit(stream, cur)
168 168 elif isheader(line, inheader):
169 169 inheader = True
170 170 if line.split(':', 1)[0].lower() in mimeheaders:
171 171 # let email parser handle this
172 172 return mimesplit(stream, cur)
173 173 elif line.startswith('--- ') and inheader:
174 174 # No evil headers seen by diff start, split by hand
175 175 return headersplit(stream, cur)
176 176 # Not enough info, keep reading
177 177
178 178 # if we are here, we have a very plain patch
179 179 return remainder(cur)
180 180
181 181 ## Some facility for extensible patch parsing:
182 182 # list of pairs ("header to match", "data key")
183 183 patchheadermap = [('Date', 'date'),
184 184 ('Branch', 'branch'),
185 185 ('Node ID', 'nodeid'),
186 186 ]
187 187
188 188 def extract(ui, fileobj):
189 189 '''extract patch from data read from fileobj.
190 190
191 191 patch can be a normal patch or contained in an email message.
192 192
193 193 return a dictionary. Standard keys are:
194 194 - filename,
195 195 - message,
196 196 - user,
197 197 - date,
198 198 - branch,
199 199 - node,
200 200 - p1,
201 201 - p2.
202 202 Any item can be missing from the dictionary. If filename is missing,
203 203 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 204
205 205 # attempt to detect the start of a patch
206 206 # (this heuristic is borrowed from quilt)
207 207 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 208 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 209 r'---[ \t].*?^\+\+\+[ \t]|'
210 210 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 211
212 212 data = {}
213 213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 215 try:
216 216 msg = email.Parser.Parser().parse(fileobj)
217 217
218 218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 220 if not subject and not data['user']:
221 221 # Not an email, restore parsed headers if any
222 222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 223
224 224 # should try to parse msg['Date']
225 225 parents = []
226 226
227 227 if subject:
228 228 if subject.startswith('[PATCH'):
229 229 pend = subject.find(']')
230 230 if pend >= 0:
231 231 subject = subject[pend + 1:].lstrip()
232 232 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 233 ui.debug('Subject: %s\n' % subject)
234 234 if data['user']:
235 235 ui.debug('From: %s\n' % data['user'])
236 236 diffs_seen = 0
237 237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 238 message = ''
239 239 for part in msg.walk():
240 240 content_type = part.get_content_type()
241 241 ui.debug('Content-Type: %s\n' % content_type)
242 242 if content_type not in ok_types:
243 243 continue
244 244 payload = part.get_payload(decode=True)
245 245 m = diffre.search(payload)
246 246 if m:
247 247 hgpatch = False
248 248 hgpatchheader = False
249 249 ignoretext = False
250 250
251 251 ui.debug('found patch at byte %d\n' % m.start(0))
252 252 diffs_seen += 1
253 253 cfp = stringio()
254 254 for line in payload[:m.start(0)].splitlines():
255 255 if line.startswith('# HG changeset patch') and not hgpatch:
256 256 ui.debug('patch generated by hg export\n')
257 257 hgpatch = True
258 258 hgpatchheader = True
259 259 # drop earlier commit message content
260 260 cfp.seek(0)
261 261 cfp.truncate()
262 262 subject = None
263 263 elif hgpatchheader:
264 264 if line.startswith('# User '):
265 265 data['user'] = line[7:]
266 266 ui.debug('From: %s\n' % data['user'])
267 267 elif line.startswith("# Parent "):
268 268 parents.append(line[9:].lstrip())
269 269 elif line.startswith("# "):
270 270 for header, key in patchheadermap:
271 271 prefix = '# %s ' % header
272 272 if line.startswith(prefix):
273 273 data[key] = line[len(prefix):]
274 274 else:
275 275 hgpatchheader = False
276 276 elif line == '---':
277 277 ignoretext = True
278 278 if not hgpatchheader and not ignoretext:
279 279 cfp.write(line)
280 280 cfp.write('\n')
281 281 message = cfp.getvalue()
282 282 if tmpfp:
283 283 tmpfp.write(payload)
284 284 if not payload.endswith('\n'):
285 285 tmpfp.write('\n')
286 286 elif not diffs_seen and message and content_type == 'text/plain':
287 287 message += '\n' + payload
288 288 except: # re-raises
289 289 tmpfp.close()
290 290 os.unlink(tmpname)
291 291 raise
292 292
293 293 if subject and not message.startswith(subject):
294 294 message = '%s\n%s' % (subject, message)
295 295 data['message'] = message
296 296 tmpfp.close()
297 297 if parents:
298 298 data['p1'] = parents.pop(0)
299 299 if parents:
300 300 data['p2'] = parents.pop(0)
301 301
302 302 if diffs_seen:
303 303 data['filename'] = tmpname
304 304 else:
305 305 os.unlink(tmpname)
306 306 return data
307 307
308 308 class patchmeta(object):
309 309 """Patched file metadata
310 310
311 311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 315 'islink' is True if the file is a symlink and 'isexec' is True if
316 316 the file is executable. Otherwise, 'mode' is None.
317 317 """
318 318 def __init__(self, path):
319 319 self.path = path
320 320 self.oldpath = None
321 321 self.mode = None
322 322 self.op = 'MODIFY'
323 323 self.binary = False
324 324
325 325 def setmode(self, mode):
326 326 islink = mode & 0o20000
327 327 isexec = mode & 0o100
328 328 self.mode = (islink, isexec)
329 329
330 330 def copy(self):
331 331 other = patchmeta(self.path)
332 332 other.oldpath = self.oldpath
333 333 other.mode = self.mode
334 334 other.op = self.op
335 335 other.binary = self.binary
336 336 return other
337 337
338 338 def _ispatchinga(self, afile):
339 339 if afile == '/dev/null':
340 340 return self.op == 'ADD'
341 341 return afile == 'a/' + (self.oldpath or self.path)
342 342
343 343 def _ispatchingb(self, bfile):
344 344 if bfile == '/dev/null':
345 345 return self.op == 'DELETE'
346 346 return bfile == 'b/' + self.path
347 347
348 348 def ispatching(self, afile, bfile):
349 349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 350
351 351 def __repr__(self):
352 352 return "<patchmeta %s %r>" % (self.op, self.path)
353 353
354 354 def readgitpatch(lr):
355 355 """extract git-style metadata about patches from <patchname>"""
356 356
357 357 # Filter patch for git information
358 358 gp = None
359 359 gitpatches = []
360 360 for line in lr:
361 361 line = line.rstrip(' \r\n')
362 362 if line.startswith('diff --git a/'):
363 363 m = gitre.match(line)
364 364 if m:
365 365 if gp:
366 366 gitpatches.append(gp)
367 367 dst = m.group(2)
368 368 gp = patchmeta(dst)
369 369 elif gp:
370 370 if line.startswith('--- '):
371 371 gitpatches.append(gp)
372 372 gp = None
373 373 continue
374 374 if line.startswith('rename from '):
375 375 gp.op = 'RENAME'
376 376 gp.oldpath = line[12:]
377 377 elif line.startswith('rename to '):
378 378 gp.path = line[10:]
379 379 elif line.startswith('copy from '):
380 380 gp.op = 'COPY'
381 381 gp.oldpath = line[10:]
382 382 elif line.startswith('copy to '):
383 383 gp.path = line[8:]
384 384 elif line.startswith('deleted file'):
385 385 gp.op = 'DELETE'
386 386 elif line.startswith('new file mode '):
387 387 gp.op = 'ADD'
388 388 gp.setmode(int(line[-6:], 8))
389 389 elif line.startswith('new mode '):
390 390 gp.setmode(int(line[-6:], 8))
391 391 elif line.startswith('GIT binary patch'):
392 392 gp.binary = True
393 393 if gp:
394 394 gitpatches.append(gp)
395 395
396 396 return gitpatches
397 397
398 398 class linereader(object):
399 399 # simple class to allow pushing lines back into the input stream
400 400 def __init__(self, fp):
401 401 self.fp = fp
402 402 self.buf = []
403 403
404 404 def push(self, line):
405 405 if line is not None:
406 406 self.buf.append(line)
407 407
408 408 def readline(self):
409 409 if self.buf:
410 410 l = self.buf[0]
411 411 del self.buf[0]
412 412 return l
413 413 return self.fp.readline()
414 414
415 415 def __iter__(self):
416 416 return iter(self.readline, '')
417 417
418 418 class abstractbackend(object):
419 419 def __init__(self, ui):
420 420 self.ui = ui
421 421
422 422 def getfile(self, fname):
423 423 """Return target file data and flags as a (data, (islink,
424 424 isexec)) tuple. Data is None if file is missing/deleted.
425 425 """
426 426 raise NotImplementedError
427 427
428 428 def setfile(self, fname, data, mode, copysource):
429 429 """Write data to target file fname and set its mode. mode is a
430 430 (islink, isexec) tuple. If data is None, the file content should
431 431 be left unchanged. If the file is modified after being copied,
432 432 copysource is set to the original file name.
433 433 """
434 434 raise NotImplementedError
435 435
436 436 def unlink(self, fname):
437 437 """Unlink target file."""
438 438 raise NotImplementedError
439 439
440 440 def writerej(self, fname, failed, total, lines):
441 441 """Write rejected lines for fname. total is the number of hunks
442 442 which failed to apply and total the total number of hunks for this
443 443 files.
444 444 """
445 445 pass
446 446
447 447 def exists(self, fname):
448 448 raise NotImplementedError
449 449
450 450 class fsbackend(abstractbackend):
451 451 def __init__(self, ui, basedir):
452 452 super(fsbackend, self).__init__(ui)
453 453 self.opener = vfsmod.vfs(basedir)
454 454
455 455 def _join(self, f):
456 456 return os.path.join(self.opener.base, f)
457 457
458 458 def getfile(self, fname):
459 459 if self.opener.islink(fname):
460 460 return (self.opener.readlink(fname), (True, False))
461 461
462 462 isexec = False
463 463 try:
464 464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 465 except OSError as e:
466 466 if e.errno != errno.ENOENT:
467 467 raise
468 468 try:
469 469 return (self.opener.read(fname), (False, isexec))
470 470 except IOError as e:
471 471 if e.errno != errno.ENOENT:
472 472 raise
473 473 return None, None
474 474
475 475 def setfile(self, fname, data, mode, copysource):
476 476 islink, isexec = mode
477 477 if data is None:
478 478 self.opener.setflags(fname, islink, isexec)
479 479 return
480 480 if islink:
481 481 self.opener.symlink(data, fname)
482 482 else:
483 483 self.opener.write(fname, data)
484 484 if isexec:
485 485 self.opener.setflags(fname, False, True)
486 486
487 487 def unlink(self, fname):
488 488 self.opener.unlinkpath(fname, ignoremissing=True)
489 489
490 490 def writerej(self, fname, failed, total, lines):
491 491 fname = fname + ".rej"
492 492 self.ui.warn(
493 493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 494 (failed, total, fname))
495 495 fp = self.opener(fname, 'w')
496 496 fp.writelines(lines)
497 497 fp.close()
498 498
499 499 def exists(self, fname):
500 500 return self.opener.lexists(fname)
501 501
502 502 class workingbackend(fsbackend):
503 503 def __init__(self, ui, repo, similarity):
504 504 super(workingbackend, self).__init__(ui, repo.root)
505 505 self.repo = repo
506 506 self.similarity = similarity
507 507 self.removed = set()
508 508 self.changed = set()
509 509 self.copied = []
510 510
511 511 def _checkknown(self, fname):
512 512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 514
515 515 def setfile(self, fname, data, mode, copysource):
516 516 self._checkknown(fname)
517 517 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 518 if copysource is not None:
519 519 self.copied.append((copysource, fname))
520 520 self.changed.add(fname)
521 521
522 522 def unlink(self, fname):
523 523 self._checkknown(fname)
524 524 super(workingbackend, self).unlink(fname)
525 525 self.removed.add(fname)
526 526 self.changed.add(fname)
527 527
528 528 def close(self):
529 529 wctx = self.repo[None]
530 530 changed = set(self.changed)
531 531 for src, dst in self.copied:
532 532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 533 if self.removed:
534 534 wctx.forget(sorted(self.removed))
535 535 for f in self.removed:
536 536 if f not in self.repo.dirstate:
537 537 # File was deleted and no longer belongs to the
538 538 # dirstate, it was probably marked added then
539 539 # deleted, and should not be considered by
540 540 # marktouched().
541 541 changed.discard(f)
542 542 if changed:
543 543 scmutil.marktouched(self.repo, changed, self.similarity)
544 544 return sorted(self.changed)
545 545
546 546 class filestore(object):
547 547 def __init__(self, maxsize=None):
548 548 self.opener = None
549 549 self.files = {}
550 550 self.created = 0
551 551 self.maxsize = maxsize
552 552 if self.maxsize is None:
553 553 self.maxsize = 4*(2**20)
554 554 self.size = 0
555 555 self.data = {}
556 556
557 557 def setfile(self, fname, data, mode, copied=None):
558 558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 559 self.data[fname] = (data, mode, copied)
560 560 self.size += len(data)
561 561 else:
562 562 if self.opener is None:
563 563 root = tempfile.mkdtemp(prefix='hg-patch-')
564 564 self.opener = vfsmod.vfs(root)
565 565 # Avoid filename issues with these simple names
566 566 fn = str(self.created)
567 567 self.opener.write(fn, data)
568 568 self.created += 1
569 569 self.files[fname] = (fn, mode, copied)
570 570
571 571 def getfile(self, fname):
572 572 if fname in self.data:
573 573 return self.data[fname]
574 574 if not self.opener or fname not in self.files:
575 575 return None, None, None
576 576 fn, mode, copied = self.files[fname]
577 577 return self.opener.read(fn), mode, copied
578 578
579 579 def close(self):
580 580 if self.opener:
581 581 shutil.rmtree(self.opener.base)
582 582
583 583 class repobackend(abstractbackend):
584 584 def __init__(self, ui, repo, ctx, store):
585 585 super(repobackend, self).__init__(ui)
586 586 self.repo = repo
587 587 self.ctx = ctx
588 588 self.store = store
589 589 self.changed = set()
590 590 self.removed = set()
591 591 self.copied = {}
592 592
593 593 def _checkknown(self, fname):
594 594 if fname not in self.ctx:
595 595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 596
597 597 def getfile(self, fname):
598 598 try:
599 599 fctx = self.ctx[fname]
600 600 except error.LookupError:
601 601 return None, None
602 602 flags = fctx.flags()
603 603 return fctx.data(), ('l' in flags, 'x' in flags)
604 604
605 605 def setfile(self, fname, data, mode, copysource):
606 606 if copysource:
607 607 self._checkknown(copysource)
608 608 if data is None:
609 609 data = self.ctx[fname].data()
610 610 self.store.setfile(fname, data, mode, copysource)
611 611 self.changed.add(fname)
612 612 if copysource:
613 613 self.copied[fname] = copysource
614 614
615 615 def unlink(self, fname):
616 616 self._checkknown(fname)
617 617 self.removed.add(fname)
618 618
619 619 def exists(self, fname):
620 620 return fname in self.ctx
621 621
622 622 def close(self):
623 623 return self.changed | self.removed
624 624
625 625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 629
630 630 class patchfile(object):
631 631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 632 self.fname = gp.path
633 633 self.eolmode = eolmode
634 634 self.eol = None
635 635 self.backend = backend
636 636 self.ui = ui
637 637 self.lines = []
638 638 self.exists = False
639 639 self.missing = True
640 640 self.mode = gp.mode
641 641 self.copysource = gp.oldpath
642 642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 643 self.remove = gp.op == 'DELETE'
644 644 if self.copysource is None:
645 645 data, mode = backend.getfile(self.fname)
646 646 else:
647 647 data, mode = store.getfile(self.copysource)[:2]
648 648 if data is not None:
649 649 self.exists = self.copysource is None or backend.exists(self.fname)
650 650 self.missing = False
651 651 if data:
652 652 self.lines = mdiff.splitnewlines(data)
653 653 if self.mode is None:
654 654 self.mode = mode
655 655 if self.lines:
656 656 # Normalize line endings
657 657 if self.lines[0].endswith('\r\n'):
658 658 self.eol = '\r\n'
659 659 elif self.lines[0].endswith('\n'):
660 660 self.eol = '\n'
661 661 if eolmode != 'strict':
662 662 nlines = []
663 663 for l in self.lines:
664 664 if l.endswith('\r\n'):
665 665 l = l[:-2] + '\n'
666 666 nlines.append(l)
667 667 self.lines = nlines
668 668 else:
669 669 if self.create:
670 670 self.missing = False
671 671 if self.mode is None:
672 672 self.mode = (False, False)
673 673 if self.missing:
674 674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 676 "current directory)\n"))
677 677
678 678 self.hash = {}
679 679 self.dirty = 0
680 680 self.offset = 0
681 681 self.skew = 0
682 682 self.rej = []
683 683 self.fileprinted = False
684 684 self.printfile(False)
685 685 self.hunks = 0
686 686
687 687 def writelines(self, fname, lines, mode):
688 688 if self.eolmode == 'auto':
689 689 eol = self.eol
690 690 elif self.eolmode == 'crlf':
691 691 eol = '\r\n'
692 692 else:
693 693 eol = '\n'
694 694
695 695 if self.eolmode != 'strict' and eol and eol != '\n':
696 696 rawlines = []
697 697 for l in lines:
698 698 if l and l[-1] == '\n':
699 699 l = l[:-1] + eol
700 700 rawlines.append(l)
701 701 lines = rawlines
702 702
703 703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704 704
705 705 def printfile(self, warn):
706 706 if self.fileprinted:
707 707 return
708 708 if warn or self.ui.verbose:
709 709 self.fileprinted = True
710 710 s = _("patching file %s\n") % self.fname
711 711 if warn:
712 712 self.ui.warn(s)
713 713 else:
714 714 self.ui.note(s)
715 715
716 716
717 717 def findlines(self, l, linenum):
718 718 # looks through the hash and finds candidate lines. The
719 719 # result is a list of line numbers sorted based on distance
720 720 # from linenum
721 721
722 722 cand = self.hash.get(l, [])
723 723 if len(cand) > 1:
724 724 # resort our list of potentials forward then back.
725 725 cand.sort(key=lambda x: abs(x - linenum))
726 726 return cand
727 727
728 728 def write_rej(self):
729 729 # our rejects are a little different from patch(1). This always
730 730 # creates rejects in the same form as the original patch. A file
731 731 # header is inserted so that you can run the reject through patch again
732 732 # without having to type the filename.
733 733 if not self.rej:
734 734 return
735 735 base = os.path.basename(self.fname)
736 736 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 737 for x in self.rej:
738 738 for l in x.hunk:
739 739 lines.append(l)
740 740 if l[-1:] != '\n':
741 741 lines.append("\n\ No newline at end of file\n")
742 742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743 743
744 744 def apply(self, h):
745 745 if not h.complete():
746 746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 748 h.lenb))
749 749
750 750 self.hunks += 1
751 751
752 752 if self.missing:
753 753 self.rej.append(h)
754 754 return -1
755 755
756 756 if self.exists and self.create:
757 757 if self.copysource:
758 758 self.ui.warn(_("cannot create %s: destination already "
759 759 "exists\n") % self.fname)
760 760 else:
761 761 self.ui.warn(_("file %s already exists\n") % self.fname)
762 762 self.rej.append(h)
763 763 return -1
764 764
765 765 if isinstance(h, binhunk):
766 766 if self.remove:
767 767 self.backend.unlink(self.fname)
768 768 else:
769 769 l = h.new(self.lines)
770 770 self.lines[:] = l
771 771 self.offset += len(l)
772 772 self.dirty = True
773 773 return 0
774 774
775 775 horig = h
776 776 if (self.eolmode in ('crlf', 'lf')
777 777 or self.eolmode == 'auto' and self.eol):
778 778 # If new eols are going to be normalized, then normalize
779 779 # hunk data before patching. Otherwise, preserve input
780 780 # line-endings.
781 781 h = h.getnormalized()
782 782
783 783 # fast case first, no offsets, no fuzz
784 784 old, oldstart, new, newstart = h.fuzzit(0, False)
785 785 oldstart += self.offset
786 786 orig_start = oldstart
787 787 # if there's skew we want to emit the "(offset %d lines)" even
788 788 # when the hunk cleanly applies at start + skew, so skip the
789 789 # fast case code
790 790 if (self.skew == 0 and
791 791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 792 if self.remove:
793 793 self.backend.unlink(self.fname)
794 794 else:
795 795 self.lines[oldstart:oldstart + len(old)] = new
796 796 self.offset += len(new) - len(old)
797 797 self.dirty = True
798 798 return 0
799 799
800 800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 801 self.hash = {}
802 802 for x, s in enumerate(self.lines):
803 803 self.hash.setdefault(s, []).append(x)
804 804
805 805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
806 806 for toponly in [True, False]:
807 807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 808 oldstart = oldstart + self.offset + self.skew
809 809 oldstart = min(oldstart, len(self.lines))
810 810 if old:
811 811 cand = self.findlines(old[0][1:], oldstart)
812 812 else:
813 813 # Only adding lines with no or fuzzed context, just
814 814 # take the skew in account
815 815 cand = [oldstart]
816 816
817 817 for l in cand:
818 818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 819 self.lines[l : l + len(old)] = new
820 820 self.offset += len(new) - len(old)
821 821 self.skew = l - orig_start
822 822 self.dirty = True
823 823 offset = l - orig_start - fuzzlen
824 824 if fuzzlen:
825 825 msg = _("Hunk #%d succeeded at %d "
826 826 "with fuzz %d "
827 827 "(offset %d lines).\n")
828 828 self.printfile(True)
829 829 self.ui.warn(msg %
830 830 (h.number, l + 1, fuzzlen, offset))
831 831 else:
832 832 msg = _("Hunk #%d succeeded at %d "
833 833 "(offset %d lines).\n")
834 834 self.ui.note(msg % (h.number, l + 1, offset))
835 835 return fuzzlen
836 836 self.printfile(True)
837 837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 838 self.rej.append(horig)
839 839 return -1
840 840
841 841 def close(self):
842 842 if self.dirty:
843 843 self.writelines(self.fname, self.lines, self.mode)
844 844 self.write_rej()
845 845 return len(self.rej)
846 846
847 847 class header(object):
848 848 """patch header
849 849 """
850 850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 851 diff_re = re.compile('diff -r .* (.*)$')
852 852 allhunks_re = re.compile('(?:index|deleted file) ')
853 853 pretty_re = re.compile('(?:new file|deleted file) ')
854 854 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 855 newfile_re = re.compile('(?:new file)')
856 856
857 857 def __init__(self, header):
858 858 self.header = header
859 859 self.hunks = []
860 860
861 861 def binary(self):
862 862 return any(h.startswith('index ') for h in self.header)
863 863
864 864 def pretty(self, fp):
865 865 for h in self.header:
866 866 if h.startswith('index '):
867 867 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 868 break
869 869 if self.pretty_re.match(h):
870 870 fp.write(h)
871 871 if self.binary():
872 872 fp.write(_('this is a binary file\n'))
873 873 break
874 874 if h.startswith('---'):
875 875 fp.write(_('%d hunks, %d lines changed\n') %
876 876 (len(self.hunks),
877 877 sum([max(h.added, h.removed) for h in self.hunks])))
878 878 break
879 879 fp.write(h)
880 880
881 881 def write(self, fp):
882 882 fp.write(''.join(self.header))
883 883
884 884 def allhunks(self):
885 885 return any(self.allhunks_re.match(h) for h in self.header)
886 886
887 887 def files(self):
888 888 match = self.diffgit_re.match(self.header[0])
889 889 if match:
890 890 fromfile, tofile = match.groups()
891 891 if fromfile == tofile:
892 892 return [fromfile]
893 893 return [fromfile, tofile]
894 894 else:
895 895 return self.diff_re.match(self.header[0]).groups()
896 896
897 897 def filename(self):
898 898 return self.files()[-1]
899 899
900 900 def __repr__(self):
901 901 return '<header %s>' % (' '.join(map(repr, self.files())))
902 902
903 903 def isnewfile(self):
904 904 return any(self.newfile_re.match(h) for h in self.header)
905 905
906 906 def special(self):
907 907 # Special files are shown only at the header level and not at the hunk
908 908 # level for example a file that has been deleted is a special file.
909 909 # The user cannot change the content of the operation, in the case of
910 910 # the deleted file he has to take the deletion or not take it, he
911 911 # cannot take some of it.
912 912 # Newly added files are special if they are empty, they are not special
913 913 # if they have some content as we want to be able to change it
914 914 nocontent = len(self.header) == 2
915 915 emptynewfile = self.isnewfile() and nocontent
916 916 return emptynewfile or \
917 917 any(self.special_re.match(h) for h in self.header)
918 918
919 919 class recordhunk(object):
920 920 """patch hunk
921 921
922 922 XXX shouldn't we merge this with the other hunk class?
923 923 """
924 924 maxcontext = 3
925 925
926 926 def __init__(self, header, fromline, toline, proc, before, hunk, after):
927 927 def trimcontext(number, lines):
928 928 delta = len(lines) - self.maxcontext
929 929 if False and delta > 0:
930 930 return number + delta, lines[:self.maxcontext]
931 931 return number, lines
932 932
933 933 self.header = header
934 934 self.fromline, self.before = trimcontext(fromline, before)
935 935 self.toline, self.after = trimcontext(toline, after)
936 936 self.proc = proc
937 937 self.hunk = hunk
938 938 self.added, self.removed = self.countchanges(self.hunk)
939 939
940 940 def __eq__(self, v):
941 941 if not isinstance(v, recordhunk):
942 942 return False
943 943
944 944 return ((v.hunk == self.hunk) and
945 945 (v.proc == self.proc) and
946 946 (self.fromline == v.fromline) and
947 947 (self.header.files() == v.header.files()))
948 948
949 949 def __hash__(self):
950 950 return hash((tuple(self.hunk),
951 951 tuple(self.header.files()),
952 952 self.fromline,
953 953 self.proc))
954 954
955 955 def countchanges(self, hunk):
956 956 """hunk -> (n+,n-)"""
957 957 add = len([h for h in hunk if h[0] == '+'])
958 958 rem = len([h for h in hunk if h[0] == '-'])
959 959 return add, rem
960 960
961 961 def write(self, fp):
962 962 delta = len(self.before) + len(self.after)
963 963 if self.after and self.after[-1] == '\\ No newline at end of file\n':
964 964 delta -= 1
965 965 fromlen = delta + self.removed
966 966 tolen = delta + self.added
967 967 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
968 968 (self.fromline, fromlen, self.toline, tolen,
969 969 self.proc and (' ' + self.proc)))
970 970 fp.write(''.join(self.before + self.hunk + self.after))
971 971
972 972 pretty = write
973 973
974 974 def filename(self):
975 975 return self.header.filename()
976 976
977 977 def __repr__(self):
978 978 return '<hunk %r@%d>' % (self.filename(), self.fromline)
979 979
980 980 def filterpatch(ui, headers, operation=None):
981 981 """Interactively filter patch chunks into applied-only chunks"""
982 982 if operation is None:
983 983 operation = 'record'
984 984 messages = {
985 985 'multiple': {
986 986 'discard': _("discard change %d/%d to '%s'?"),
987 987 'record': _("record change %d/%d to '%s'?"),
988 988 'revert': _("revert change %d/%d to '%s'?"),
989 989 }[operation],
990 990 'single': {
991 991 'discard': _("discard this change to '%s'?"),
992 992 'record': _("record this change to '%s'?"),
993 993 'revert': _("revert this change to '%s'?"),
994 994 }[operation],
995 995 'help': {
996 996 'discard': _('[Ynesfdaq?]'
997 997 '$$ &Yes, discard this change'
998 998 '$$ &No, skip this change'
999 999 '$$ &Edit this change manually'
1000 1000 '$$ &Skip remaining changes to this file'
1001 1001 '$$ Discard remaining changes to this &file'
1002 1002 '$$ &Done, skip remaining changes and files'
1003 1003 '$$ Discard &all changes to all remaining files'
1004 1004 '$$ &Quit, discarding no changes'
1005 1005 '$$ &? (display help)'),
1006 1006 'record': _('[Ynesfdaq?]'
1007 1007 '$$ &Yes, record this change'
1008 1008 '$$ &No, skip this change'
1009 1009 '$$ &Edit this change manually'
1010 1010 '$$ &Skip remaining changes to this file'
1011 1011 '$$ Record remaining changes to this &file'
1012 1012 '$$ &Done, skip remaining changes and files'
1013 1013 '$$ Record &all changes to all remaining files'
1014 1014 '$$ &Quit, recording no changes'
1015 1015 '$$ &? (display help)'),
1016 1016 'revert': _('[Ynesfdaq?]'
1017 1017 '$$ &Yes, revert this change'
1018 1018 '$$ &No, skip this change'
1019 1019 '$$ &Edit this change manually'
1020 1020 '$$ &Skip remaining changes to this file'
1021 1021 '$$ Revert remaining changes to this &file'
1022 1022 '$$ &Done, skip remaining changes and files'
1023 1023 '$$ Revert &all changes to all remaining files'
1024 1024 '$$ &Quit, reverting no changes'
1025 1025 '$$ &? (display help)')
1026 1026 }[operation]
1027 1027 }
1028 1028
1029 1029 def prompt(skipfile, skipall, query, chunk):
1030 1030 """prompt query, and process base inputs
1031 1031
1032 1032 - y/n for the rest of file
1033 1033 - y/n for the rest
1034 1034 - ? (help)
1035 1035 - q (quit)
1036 1036
1037 1037 Return True/False and possibly updated skipfile and skipall.
1038 1038 """
1039 1039 newpatches = None
1040 1040 if skipall is not None:
1041 1041 return skipall, skipfile, skipall, newpatches
1042 1042 if skipfile is not None:
1043 1043 return skipfile, skipfile, skipall, newpatches
1044 1044 while True:
1045 1045 resps = messages['help']
1046 1046 r = ui.promptchoice("%s %s" % (query, resps))
1047 1047 ui.write("\n")
1048 1048 if r == 8: # ?
1049 1049 for c, t in ui.extractchoices(resps)[1]:
1050 1050 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1051 1051 continue
1052 1052 elif r == 0: # yes
1053 1053 ret = True
1054 1054 elif r == 1: # no
1055 1055 ret = False
1056 1056 elif r == 2: # Edit patch
1057 1057 if chunk is None:
1058 1058 ui.write(_('cannot edit patch for whole file'))
1059 1059 ui.write("\n")
1060 1060 continue
1061 1061 if chunk.header.binary():
1062 1062 ui.write(_('cannot edit patch for binary file'))
1063 1063 ui.write("\n")
1064 1064 continue
1065 1065 # Patch comment based on the Git one (based on comment at end of
1066 1066 # https://mercurial-scm.org/wiki/RecordExtension)
1067 1067 phelp = '---' + _("""
1068 1068 To remove '-' lines, make them ' ' lines (context).
1069 1069 To remove '+' lines, delete them.
1070 1070 Lines starting with # will be removed from the patch.
1071 1071
1072 1072 If the patch applies cleanly, the edited hunk will immediately be
1073 1073 added to the record list. If it does not apply cleanly, a rejects
1074 1074 file will be generated: you can use that when you try again. If
1075 1075 all lines of the hunk are removed, then the edit is aborted and
1076 1076 the hunk is left unchanged.
1077 1077 """)
1078 1078 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1079 1079 suffix=".diff", text=True)
1080 1080 ncpatchfp = None
1081 1081 try:
1082 1082 # Write the initial patch
1083 1083 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1084 1084 chunk.header.write(f)
1085 1085 chunk.write(f)
1086 1086 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1087 1087 f.close()
1088 1088 # Start the editor and wait for it to complete
1089 1089 editor = ui.geteditor()
1090 1090 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1091 1091 environ={'HGUSER': ui.username()},
1092 1092 blockedtag='filterpatch')
1093 1093 if ret != 0:
1094 1094 ui.warn(_("editor exited with exit code %d\n") % ret)
1095 1095 continue
1096 1096 # Remove comment lines
1097 1097 patchfp = open(patchfn)
1098 1098 ncpatchfp = stringio()
1099 1099 for line in util.iterfile(patchfp):
1100 1100 if not line.startswith('#'):
1101 1101 ncpatchfp.write(line)
1102 1102 patchfp.close()
1103 1103 ncpatchfp.seek(0)
1104 1104 newpatches = parsepatch(ncpatchfp)
1105 1105 finally:
1106 1106 os.unlink(patchfn)
1107 1107 del ncpatchfp
1108 1108 # Signal that the chunk shouldn't be applied as-is, but
1109 1109 # provide the new patch to be used instead.
1110 1110 ret = False
1111 1111 elif r == 3: # Skip
1112 1112 ret = skipfile = False
1113 1113 elif r == 4: # file (Record remaining)
1114 1114 ret = skipfile = True
1115 1115 elif r == 5: # done, skip remaining
1116 1116 ret = skipall = False
1117 1117 elif r == 6: # all
1118 1118 ret = skipall = True
1119 1119 elif r == 7: # quit
1120 1120 raise error.Abort(_('user quit'))
1121 1121 return ret, skipfile, skipall, newpatches
1122 1122
1123 1123 seen = set()
1124 1124 applied = {} # 'filename' -> [] of chunks
1125 1125 skipfile, skipall = None, None
1126 1126 pos, total = 1, sum(len(h.hunks) for h in headers)
1127 1127 for h in headers:
1128 1128 pos += len(h.hunks)
1129 1129 skipfile = None
1130 1130 fixoffset = 0
1131 1131 hdr = ''.join(h.header)
1132 1132 if hdr in seen:
1133 1133 continue
1134 1134 seen.add(hdr)
1135 1135 if skipall is None:
1136 1136 h.pretty(ui)
1137 1137 msg = (_('examine changes to %s?') %
1138 1138 _(' and ').join("'%s'" % f for f in h.files()))
1139 1139 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1140 1140 if not r:
1141 1141 continue
1142 1142 applied[h.filename()] = [h]
1143 1143 if h.allhunks():
1144 1144 applied[h.filename()] += h.hunks
1145 1145 continue
1146 1146 for i, chunk in enumerate(h.hunks):
1147 1147 if skipfile is None and skipall is None:
1148 1148 chunk.pretty(ui)
1149 1149 if total == 1:
1150 1150 msg = messages['single'] % chunk.filename()
1151 1151 else:
1152 1152 idx = pos - len(h.hunks) + i
1153 1153 msg = messages['multiple'] % (idx, total, chunk.filename())
1154 1154 r, skipfile, skipall, newpatches = prompt(skipfile,
1155 1155 skipall, msg, chunk)
1156 1156 if r:
1157 1157 if fixoffset:
1158 1158 chunk = copy.copy(chunk)
1159 1159 chunk.toline += fixoffset
1160 1160 applied[chunk.filename()].append(chunk)
1161 1161 elif newpatches is not None:
1162 1162 for newpatch in newpatches:
1163 1163 for newhunk in newpatch.hunks:
1164 1164 if fixoffset:
1165 1165 newhunk.toline += fixoffset
1166 1166 applied[newhunk.filename()].append(newhunk)
1167 1167 else:
1168 1168 fixoffset += chunk.removed - chunk.added
1169 1169 return (sum([h for h in applied.itervalues()
1170 1170 if h[0].special() or len(h) > 1], []), {})
1171 1171 class hunk(object):
1172 1172 def __init__(self, desc, num, lr, context):
1173 1173 self.number = num
1174 1174 self.desc = desc
1175 1175 self.hunk = [desc]
1176 1176 self.a = []
1177 1177 self.b = []
1178 1178 self.starta = self.lena = None
1179 1179 self.startb = self.lenb = None
1180 1180 if lr is not None:
1181 1181 if context:
1182 1182 self.read_context_hunk(lr)
1183 1183 else:
1184 1184 self.read_unified_hunk(lr)
1185 1185
1186 1186 def getnormalized(self):
1187 1187 """Return a copy with line endings normalized to LF."""
1188 1188
1189 1189 def normalize(lines):
1190 1190 nlines = []
1191 1191 for line in lines:
1192 1192 if line.endswith('\r\n'):
1193 1193 line = line[:-2] + '\n'
1194 1194 nlines.append(line)
1195 1195 return nlines
1196 1196
1197 1197 # Dummy object, it is rebuilt manually
1198 1198 nh = hunk(self.desc, self.number, None, None)
1199 1199 nh.number = self.number
1200 1200 nh.desc = self.desc
1201 1201 nh.hunk = self.hunk
1202 1202 nh.a = normalize(self.a)
1203 1203 nh.b = normalize(self.b)
1204 1204 nh.starta = self.starta
1205 1205 nh.startb = self.startb
1206 1206 nh.lena = self.lena
1207 1207 nh.lenb = self.lenb
1208 1208 return nh
1209 1209
1210 1210 def read_unified_hunk(self, lr):
1211 1211 m = unidesc.match(self.desc)
1212 1212 if not m:
1213 1213 raise PatchError(_("bad hunk #%d") % self.number)
1214 1214 self.starta, self.lena, self.startb, self.lenb = m.groups()
1215 1215 if self.lena is None:
1216 1216 self.lena = 1
1217 1217 else:
1218 1218 self.lena = int(self.lena)
1219 1219 if self.lenb is None:
1220 1220 self.lenb = 1
1221 1221 else:
1222 1222 self.lenb = int(self.lenb)
1223 1223 self.starta = int(self.starta)
1224 1224 self.startb = int(self.startb)
1225 1225 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1226 1226 self.b)
1227 1227 # if we hit eof before finishing out the hunk, the last line will
1228 1228 # be zero length. Lets try to fix it up.
1229 1229 while len(self.hunk[-1]) == 0:
1230 1230 del self.hunk[-1]
1231 1231 del self.a[-1]
1232 1232 del self.b[-1]
1233 1233 self.lena -= 1
1234 1234 self.lenb -= 1
1235 1235 self._fixnewline(lr)
1236 1236
1237 1237 def read_context_hunk(self, lr):
1238 1238 self.desc = lr.readline()
1239 1239 m = contextdesc.match(self.desc)
1240 1240 if not m:
1241 1241 raise PatchError(_("bad hunk #%d") % self.number)
1242 1242 self.starta, aend = m.groups()
1243 1243 self.starta = int(self.starta)
1244 1244 if aend is None:
1245 1245 aend = self.starta
1246 1246 self.lena = int(aend) - self.starta
1247 1247 if self.starta:
1248 1248 self.lena += 1
1249 1249 for x in xrange(self.lena):
1250 1250 l = lr.readline()
1251 1251 if l.startswith('---'):
1252 1252 # lines addition, old block is empty
1253 1253 lr.push(l)
1254 1254 break
1255 1255 s = l[2:]
1256 1256 if l.startswith('- ') or l.startswith('! '):
1257 1257 u = '-' + s
1258 1258 elif l.startswith(' '):
1259 1259 u = ' ' + s
1260 1260 else:
1261 1261 raise PatchError(_("bad hunk #%d old text line %d") %
1262 1262 (self.number, x))
1263 1263 self.a.append(u)
1264 1264 self.hunk.append(u)
1265 1265
1266 1266 l = lr.readline()
1267 1267 if l.startswith('\ '):
1268 1268 s = self.a[-1][:-1]
1269 1269 self.a[-1] = s
1270 1270 self.hunk[-1] = s
1271 1271 l = lr.readline()
1272 1272 m = contextdesc.match(l)
1273 1273 if not m:
1274 1274 raise PatchError(_("bad hunk #%d") % self.number)
1275 1275 self.startb, bend = m.groups()
1276 1276 self.startb = int(self.startb)
1277 1277 if bend is None:
1278 1278 bend = self.startb
1279 1279 self.lenb = int(bend) - self.startb
1280 1280 if self.startb:
1281 1281 self.lenb += 1
1282 1282 hunki = 1
1283 1283 for x in xrange(self.lenb):
1284 1284 l = lr.readline()
1285 1285 if l.startswith('\ '):
1286 1286 # XXX: the only way to hit this is with an invalid line range.
1287 1287 # The no-eol marker is not counted in the line range, but I
1288 1288 # guess there are diff(1) out there which behave differently.
1289 1289 s = self.b[-1][:-1]
1290 1290 self.b[-1] = s
1291 1291 self.hunk[hunki - 1] = s
1292 1292 continue
1293 1293 if not l:
1294 1294 # line deletions, new block is empty and we hit EOF
1295 1295 lr.push(l)
1296 1296 break
1297 1297 s = l[2:]
1298 1298 if l.startswith('+ ') or l.startswith('! '):
1299 1299 u = '+' + s
1300 1300 elif l.startswith(' '):
1301 1301 u = ' ' + s
1302 1302 elif len(self.b) == 0:
1303 1303 # line deletions, new block is empty
1304 1304 lr.push(l)
1305 1305 break
1306 1306 else:
1307 1307 raise PatchError(_("bad hunk #%d old text line %d") %
1308 1308 (self.number, x))
1309 1309 self.b.append(s)
1310 1310 while True:
1311 1311 if hunki >= len(self.hunk):
1312 1312 h = ""
1313 1313 else:
1314 1314 h = self.hunk[hunki]
1315 1315 hunki += 1
1316 1316 if h == u:
1317 1317 break
1318 1318 elif h.startswith('-'):
1319 1319 continue
1320 1320 else:
1321 1321 self.hunk.insert(hunki - 1, u)
1322 1322 break
1323 1323
1324 1324 if not self.a:
1325 1325 # this happens when lines were only added to the hunk
1326 1326 for x in self.hunk:
1327 1327 if x.startswith('-') or x.startswith(' '):
1328 1328 self.a.append(x)
1329 1329 if not self.b:
1330 1330 # this happens when lines were only deleted from the hunk
1331 1331 for x in self.hunk:
1332 1332 if x.startswith('+') or x.startswith(' '):
1333 1333 self.b.append(x[1:])
1334 1334 # @@ -start,len +start,len @@
1335 1335 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1336 1336 self.startb, self.lenb)
1337 1337 self.hunk[0] = self.desc
1338 1338 self._fixnewline(lr)
1339 1339
1340 1340 def _fixnewline(self, lr):
1341 1341 l = lr.readline()
1342 1342 if l.startswith('\ '):
1343 1343 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1344 1344 else:
1345 1345 lr.push(l)
1346 1346
1347 1347 def complete(self):
1348 1348 return len(self.a) == self.lena and len(self.b) == self.lenb
1349 1349
1350 1350 def _fuzzit(self, old, new, fuzz, toponly):
1351 1351 # this removes context lines from the top and bottom of list 'l'. It
1352 1352 # checks the hunk to make sure only context lines are removed, and then
1353 1353 # returns a new shortened list of lines.
1354 1354 fuzz = min(fuzz, len(old))
1355 1355 if fuzz:
1356 1356 top = 0
1357 1357 bot = 0
1358 1358 hlen = len(self.hunk)
1359 1359 for x in xrange(hlen - 1):
1360 1360 # the hunk starts with the @@ line, so use x+1
1361 1361 if self.hunk[x + 1][0] == ' ':
1362 1362 top += 1
1363 1363 else:
1364 1364 break
1365 1365 if not toponly:
1366 1366 for x in xrange(hlen - 1):
1367 1367 if self.hunk[hlen - bot - 1][0] == ' ':
1368 1368 bot += 1
1369 1369 else:
1370 1370 break
1371 1371
1372 1372 bot = min(fuzz, bot)
1373 1373 top = min(fuzz, top)
1374 1374 return old[top:len(old) - bot], new[top:len(new) - bot], top
1375 1375 return old, new, 0
1376 1376
1377 1377 def fuzzit(self, fuzz, toponly):
1378 1378 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1379 1379 oldstart = self.starta + top
1380 1380 newstart = self.startb + top
1381 1381 # zero length hunk ranges already have their start decremented
1382 1382 if self.lena and oldstart > 0:
1383 1383 oldstart -= 1
1384 1384 if self.lenb and newstart > 0:
1385 1385 newstart -= 1
1386 1386 return old, oldstart, new, newstart
1387 1387
1388 1388 class binhunk(object):
1389 1389 'A binary patch file.'
1390 1390 def __init__(self, lr, fname):
1391 1391 self.text = None
1392 1392 self.delta = False
1393 1393 self.hunk = ['GIT binary patch\n']
1394 1394 self._fname = fname
1395 1395 self._read(lr)
1396 1396
1397 1397 def complete(self):
1398 1398 return self.text is not None
1399 1399
1400 1400 def new(self, lines):
1401 1401 if self.delta:
1402 1402 return [applybindelta(self.text, ''.join(lines))]
1403 1403 return [self.text]
1404 1404
1405 1405 def _read(self, lr):
1406 1406 def getline(lr, hunk):
1407 1407 l = lr.readline()
1408 1408 hunk.append(l)
1409 1409 return l.rstrip('\r\n')
1410 1410
1411 1411 size = 0
1412 1412 while True:
1413 1413 line = getline(lr, self.hunk)
1414 1414 if not line:
1415 1415 raise PatchError(_('could not extract "%s" binary data')
1416 1416 % self._fname)
1417 1417 if line.startswith('literal '):
1418 1418 size = int(line[8:].rstrip())
1419 1419 break
1420 1420 if line.startswith('delta '):
1421 1421 size = int(line[6:].rstrip())
1422 1422 self.delta = True
1423 1423 break
1424 1424 dec = []
1425 1425 line = getline(lr, self.hunk)
1426 1426 while len(line) > 1:
1427 1427 l = line[0]
1428 1428 if l <= 'Z' and l >= 'A':
1429 1429 l = ord(l) - ord('A') + 1
1430 1430 else:
1431 1431 l = ord(l) - ord('a') + 27
1432 1432 try:
1433 1433 dec.append(base85.b85decode(line[1:])[:l])
1434 1434 except ValueError as e:
1435 1435 raise PatchError(_('could not decode "%s" binary patch: %s')
1436 1436 % (self._fname, str(e)))
1437 1437 line = getline(lr, self.hunk)
1438 1438 text = zlib.decompress(''.join(dec))
1439 1439 if len(text) != size:
1440 1440 raise PatchError(_('"%s" length is %d bytes, should be %d')
1441 1441 % (self._fname, len(text), size))
1442 1442 self.text = text
1443 1443
1444 1444 def parsefilename(str):
1445 1445 # --- filename \t|space stuff
1446 1446 s = str[4:].rstrip('\r\n')
1447 1447 i = s.find('\t')
1448 1448 if i < 0:
1449 1449 i = s.find(' ')
1450 1450 if i < 0:
1451 1451 return s
1452 1452 return s[:i]
1453 1453
1454 1454 def reversehunks(hunks):
1455 1455 '''reverse the signs in the hunks given as argument
1456 1456
1457 1457 This function operates on hunks coming out of patch.filterpatch, that is
1458 1458 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1459 1459
1460 1460 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1461 1461 ... --- a/folder1/g
1462 1462 ... +++ b/folder1/g
1463 1463 ... @@ -1,7 +1,7 @@
1464 1464 ... +firstline
1465 1465 ... c
1466 1466 ... 1
1467 1467 ... 2
1468 1468 ... + 3
1469 1469 ... -4
1470 1470 ... 5
1471 1471 ... d
1472 1472 ... +lastline"""
1473 1473 >>> hunks = parsepatch(rawpatch)
1474 1474 >>> hunkscomingfromfilterpatch = []
1475 1475 >>> for h in hunks:
1476 1476 ... hunkscomingfromfilterpatch.append(h)
1477 1477 ... hunkscomingfromfilterpatch.extend(h.hunks)
1478 1478
1479 1479 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1480 1480 >>> from . import util
1481 1481 >>> fp = util.stringio()
1482 1482 >>> for c in reversedhunks:
1483 1483 ... c.write(fp)
1484 1484 >>> fp.seek(0)
1485 1485 >>> reversedpatch = fp.read()
1486 1486 >>> print reversedpatch
1487 1487 diff --git a/folder1/g b/folder1/g
1488 1488 --- a/folder1/g
1489 1489 +++ b/folder1/g
1490 1490 @@ -1,4 +1,3 @@
1491 1491 -firstline
1492 1492 c
1493 1493 1
1494 1494 2
1495 1495 @@ -1,6 +2,6 @@
1496 1496 c
1497 1497 1
1498 1498 2
1499 1499 - 3
1500 1500 +4
1501 1501 5
1502 1502 d
1503 1503 @@ -5,3 +6,2 @@
1504 1504 5
1505 1505 d
1506 1506 -lastline
1507 1507
1508 1508 '''
1509 1509
1510 1510 from . import crecord as crecordmod
1511 1511 newhunks = []
1512 1512 for c in hunks:
1513 1513 if isinstance(c, crecordmod.uihunk):
1514 1514 # curses hunks encapsulate the record hunk in _hunk
1515 1515 c = c._hunk
1516 1516 if isinstance(c, recordhunk):
1517 1517 for j, line in enumerate(c.hunk):
1518 1518 if line.startswith("-"):
1519 1519 c.hunk[j] = "+" + c.hunk[j][1:]
1520 1520 elif line.startswith("+"):
1521 1521 c.hunk[j] = "-" + c.hunk[j][1:]
1522 1522 c.added, c.removed = c.removed, c.added
1523 1523 newhunks.append(c)
1524 1524 return newhunks
1525 1525
1526 1526 def parsepatch(originalchunks):
1527 1527 """patch -> [] of headers -> [] of hunks """
1528 1528 class parser(object):
1529 1529 """patch parsing state machine"""
1530 1530 def __init__(self):
1531 1531 self.fromline = 0
1532 1532 self.toline = 0
1533 1533 self.proc = ''
1534 1534 self.header = None
1535 1535 self.context = []
1536 1536 self.before = []
1537 1537 self.hunk = []
1538 1538 self.headers = []
1539 1539
1540 1540 def addrange(self, limits):
1541 1541 fromstart, fromend, tostart, toend, proc = limits
1542 1542 self.fromline = int(fromstart)
1543 1543 self.toline = int(tostart)
1544 1544 self.proc = proc
1545 1545
1546 1546 def addcontext(self, context):
1547 1547 if self.hunk:
1548 1548 h = recordhunk(self.header, self.fromline, self.toline,
1549 1549 self.proc, self.before, self.hunk, context)
1550 1550 self.header.hunks.append(h)
1551 1551 self.fromline += len(self.before) + h.removed
1552 1552 self.toline += len(self.before) + h.added
1553 1553 self.before = []
1554 1554 self.hunk = []
1555 1555 self.context = context
1556 1556
1557 1557 def addhunk(self, hunk):
1558 1558 if self.context:
1559 1559 self.before = self.context
1560 1560 self.context = []
1561 1561 self.hunk = hunk
1562 1562
1563 1563 def newfile(self, hdr):
1564 1564 self.addcontext([])
1565 1565 h = header(hdr)
1566 1566 self.headers.append(h)
1567 1567 self.header = h
1568 1568
1569 1569 def addother(self, line):
1570 1570 pass # 'other' lines are ignored
1571 1571
1572 1572 def finished(self):
1573 1573 self.addcontext([])
1574 1574 return self.headers
1575 1575
1576 1576 transitions = {
1577 1577 'file': {'context': addcontext,
1578 1578 'file': newfile,
1579 1579 'hunk': addhunk,
1580 1580 'range': addrange},
1581 1581 'context': {'file': newfile,
1582 1582 'hunk': addhunk,
1583 1583 'range': addrange,
1584 1584 'other': addother},
1585 1585 'hunk': {'context': addcontext,
1586 1586 'file': newfile,
1587 1587 'range': addrange},
1588 1588 'range': {'context': addcontext,
1589 1589 'hunk': addhunk},
1590 1590 'other': {'other': addother},
1591 1591 }
1592 1592
1593 1593 p = parser()
1594 1594 fp = stringio()
1595 1595 fp.write(''.join(originalchunks))
1596 1596 fp.seek(0)
1597 1597
1598 1598 state = 'context'
1599 1599 for newstate, data in scanpatch(fp):
1600 1600 try:
1601 1601 p.transitions[state][newstate](p, data)
1602 1602 except KeyError:
1603 1603 raise PatchError('unhandled transition: %s -> %s' %
1604 1604 (state, newstate))
1605 1605 state = newstate
1606 1606 del fp
1607 1607 return p.finished()
1608 1608
1609 1609 def pathtransform(path, strip, prefix):
1610 1610 '''turn a path from a patch into a path suitable for the repository
1611 1611
1612 1612 prefix, if not empty, is expected to be normalized with a / at the end.
1613 1613
1614 1614 Returns (stripped components, path in repository).
1615 1615
1616 1616 >>> pathtransform('a/b/c', 0, '')
1617 1617 ('', 'a/b/c')
1618 1618 >>> pathtransform(' a/b/c ', 0, '')
1619 1619 ('', ' a/b/c')
1620 1620 >>> pathtransform(' a/b/c ', 2, '')
1621 1621 ('a/b/', 'c')
1622 1622 >>> pathtransform('a/b/c', 0, 'd/e/')
1623 1623 ('', 'd/e/a/b/c')
1624 1624 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1625 1625 ('a//b/', 'd/e/c')
1626 1626 >>> pathtransform('a/b/c', 3, '')
1627 1627 Traceback (most recent call last):
1628 1628 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1629 1629 '''
1630 1630 pathlen = len(path)
1631 1631 i = 0
1632 1632 if strip == 0:
1633 1633 return '', prefix + path.rstrip()
1634 1634 count = strip
1635 1635 while count > 0:
1636 1636 i = path.find('/', i)
1637 1637 if i == -1:
1638 1638 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1639 1639 (count, strip, path))
1640 1640 i += 1
1641 1641 # consume '//' in the path
1642 1642 while i < pathlen - 1 and path[i] == '/':
1643 1643 i += 1
1644 1644 count -= 1
1645 1645 return path[:i].lstrip(), prefix + path[i:].rstrip()
1646 1646
1647 1647 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1648 1648 nulla = afile_orig == "/dev/null"
1649 1649 nullb = bfile_orig == "/dev/null"
1650 1650 create = nulla and hunk.starta == 0 and hunk.lena == 0
1651 1651 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1652 1652 abase, afile = pathtransform(afile_orig, strip, prefix)
1653 1653 gooda = not nulla and backend.exists(afile)
1654 1654 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1655 1655 if afile == bfile:
1656 1656 goodb = gooda
1657 1657 else:
1658 1658 goodb = not nullb and backend.exists(bfile)
1659 1659 missing = not goodb and not gooda and not create
1660 1660
1661 1661 # some diff programs apparently produce patches where the afile is
1662 1662 # not /dev/null, but afile starts with bfile
1663 1663 abasedir = afile[:afile.rfind('/') + 1]
1664 1664 bbasedir = bfile[:bfile.rfind('/') + 1]
1665 1665 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1666 1666 and hunk.starta == 0 and hunk.lena == 0):
1667 1667 create = True
1668 1668 missing = False
1669 1669
1670 1670 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1671 1671 # diff is between a file and its backup. In this case, the original
1672 1672 # file should be patched (see original mpatch code).
1673 1673 isbackup = (abase == bbase and bfile.startswith(afile))
1674 1674 fname = None
1675 1675 if not missing:
1676 1676 if gooda and goodb:
1677 1677 if isbackup:
1678 1678 fname = afile
1679 1679 else:
1680 1680 fname = bfile
1681 1681 elif gooda:
1682 1682 fname = afile
1683 1683
1684 1684 if not fname:
1685 1685 if not nullb:
1686 1686 if isbackup:
1687 1687 fname = afile
1688 1688 else:
1689 1689 fname = bfile
1690 1690 elif not nulla:
1691 1691 fname = afile
1692 1692 else:
1693 1693 raise PatchError(_("undefined source and destination files"))
1694 1694
1695 1695 gp = patchmeta(fname)
1696 1696 if create:
1697 1697 gp.op = 'ADD'
1698 1698 elif remove:
1699 1699 gp.op = 'DELETE'
1700 1700 return gp
1701 1701
1702 1702 def scanpatch(fp):
1703 1703 """like patch.iterhunks, but yield different events
1704 1704
1705 1705 - ('file', [header_lines + fromfile + tofile])
1706 1706 - ('context', [context_lines])
1707 1707 - ('hunk', [hunk_lines])
1708 1708 - ('range', (-start,len, +start,len, proc))
1709 1709 """
1710 1710 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1711 1711 lr = linereader(fp)
1712 1712
1713 1713 def scanwhile(first, p):
1714 1714 """scan lr while predicate holds"""
1715 1715 lines = [first]
1716 1716 for line in iter(lr.readline, ''):
1717 1717 if p(line):
1718 1718 lines.append(line)
1719 1719 else:
1720 1720 lr.push(line)
1721 1721 break
1722 1722 return lines
1723 1723
1724 1724 for line in iter(lr.readline, ''):
1725 1725 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1726 1726 def notheader(line):
1727 1727 s = line.split(None, 1)
1728 1728 return not s or s[0] not in ('---', 'diff')
1729 1729 header = scanwhile(line, notheader)
1730 1730 fromfile = lr.readline()
1731 1731 if fromfile.startswith('---'):
1732 1732 tofile = lr.readline()
1733 1733 header += [fromfile, tofile]
1734 1734 else:
1735 1735 lr.push(fromfile)
1736 1736 yield 'file', header
1737 1737 elif line[0] == ' ':
1738 1738 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1739 1739 elif line[0] in '-+':
1740 1740 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1741 1741 else:
1742 1742 m = lines_re.match(line)
1743 1743 if m:
1744 1744 yield 'range', m.groups()
1745 1745 else:
1746 1746 yield 'other', line
1747 1747
1748 1748 def scangitpatch(lr, firstline):
1749 1749 """
1750 1750 Git patches can emit:
1751 1751 - rename a to b
1752 1752 - change b
1753 1753 - copy a to c
1754 1754 - change c
1755 1755
1756 1756 We cannot apply this sequence as-is, the renamed 'a' could not be
1757 1757 found for it would have been renamed already. And we cannot copy
1758 1758 from 'b' instead because 'b' would have been changed already. So
1759 1759 we scan the git patch for copy and rename commands so we can
1760 1760 perform the copies ahead of time.
1761 1761 """
1762 1762 pos = 0
1763 1763 try:
1764 1764 pos = lr.fp.tell()
1765 1765 fp = lr.fp
1766 1766 except IOError:
1767 1767 fp = stringio(lr.fp.read())
1768 1768 gitlr = linereader(fp)
1769 1769 gitlr.push(firstline)
1770 1770 gitpatches = readgitpatch(gitlr)
1771 1771 fp.seek(pos)
1772 1772 return gitpatches
1773 1773
1774 1774 def iterhunks(fp):
1775 1775 """Read a patch and yield the following events:
1776 1776 - ("file", afile, bfile, firsthunk): select a new target file.
1777 1777 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1778 1778 "file" event.
1779 1779 - ("git", gitchanges): current diff is in git format, gitchanges
1780 1780 maps filenames to gitpatch records. Unique event.
1781 1781 """
1782 1782 afile = ""
1783 1783 bfile = ""
1784 1784 state = None
1785 1785 hunknum = 0
1786 1786 emitfile = newfile = False
1787 1787 gitpatches = None
1788 1788
1789 1789 # our states
1790 1790 BFILE = 1
1791 1791 context = None
1792 1792 lr = linereader(fp)
1793 1793
1794 1794 for x in iter(lr.readline, ''):
1795 1795 if state == BFILE and (
1796 1796 (not context and x[0] == '@')
1797 1797 or (context is not False and x.startswith('***************'))
1798 1798 or x.startswith('GIT binary patch')):
1799 1799 gp = None
1800 1800 if (gitpatches and
1801 1801 gitpatches[-1].ispatching(afile, bfile)):
1802 1802 gp = gitpatches.pop()
1803 1803 if x.startswith('GIT binary patch'):
1804 1804 h = binhunk(lr, gp.path)
1805 1805 else:
1806 1806 if context is None and x.startswith('***************'):
1807 1807 context = True
1808 1808 h = hunk(x, hunknum + 1, lr, context)
1809 1809 hunknum += 1
1810 1810 if emitfile:
1811 1811 emitfile = False
1812 1812 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1813 1813 yield 'hunk', h
1814 1814 elif x.startswith('diff --git a/'):
1815 1815 m = gitre.match(x.rstrip(' \r\n'))
1816 1816 if not m:
1817 1817 continue
1818 1818 if gitpatches is None:
1819 1819 # scan whole input for git metadata
1820 1820 gitpatches = scangitpatch(lr, x)
1821 1821 yield 'git', [g.copy() for g in gitpatches
1822 1822 if g.op in ('COPY', 'RENAME')]
1823 1823 gitpatches.reverse()
1824 1824 afile = 'a/' + m.group(1)
1825 1825 bfile = 'b/' + m.group(2)
1826 1826 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1827 1827 gp = gitpatches.pop()
1828 1828 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1829 1829 if not gitpatches:
1830 1830 raise PatchError(_('failed to synchronize metadata for "%s"')
1831 1831 % afile[2:])
1832 1832 gp = gitpatches[-1]
1833 1833 newfile = True
1834 1834 elif x.startswith('---'):
1835 1835 # check for a unified diff
1836 1836 l2 = lr.readline()
1837 1837 if not l2.startswith('+++'):
1838 1838 lr.push(l2)
1839 1839 continue
1840 1840 newfile = True
1841 1841 context = False
1842 1842 afile = parsefilename(x)
1843 1843 bfile = parsefilename(l2)
1844 1844 elif x.startswith('***'):
1845 1845 # check for a context diff
1846 1846 l2 = lr.readline()
1847 1847 if not l2.startswith('---'):
1848 1848 lr.push(l2)
1849 1849 continue
1850 1850 l3 = lr.readline()
1851 1851 lr.push(l3)
1852 1852 if not l3.startswith("***************"):
1853 1853 lr.push(l2)
1854 1854 continue
1855 1855 newfile = True
1856 1856 context = True
1857 1857 afile = parsefilename(x)
1858 1858 bfile = parsefilename(l2)
1859 1859
1860 1860 if newfile:
1861 1861 newfile = False
1862 1862 emitfile = True
1863 1863 state = BFILE
1864 1864 hunknum = 0
1865 1865
1866 1866 while gitpatches:
1867 1867 gp = gitpatches.pop()
1868 1868 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1869 1869
1870 1870 def applybindelta(binchunk, data):
1871 1871 """Apply a binary delta hunk
1872 1872 The algorithm used is the algorithm from git's patch-delta.c
1873 1873 """
1874 1874 def deltahead(binchunk):
1875 1875 i = 0
1876 1876 for c in binchunk:
1877 1877 i += 1
1878 1878 if not (ord(c) & 0x80):
1879 1879 return i
1880 1880 return i
1881 1881 out = ""
1882 1882 s = deltahead(binchunk)
1883 1883 binchunk = binchunk[s:]
1884 1884 s = deltahead(binchunk)
1885 1885 binchunk = binchunk[s:]
1886 1886 i = 0
1887 1887 while i < len(binchunk):
1888 1888 cmd = ord(binchunk[i])
1889 1889 i += 1
1890 1890 if (cmd & 0x80):
1891 1891 offset = 0
1892 1892 size = 0
1893 1893 if (cmd & 0x01):
1894 1894 offset = ord(binchunk[i])
1895 1895 i += 1
1896 1896 if (cmd & 0x02):
1897 1897 offset |= ord(binchunk[i]) << 8
1898 1898 i += 1
1899 1899 if (cmd & 0x04):
1900 1900 offset |= ord(binchunk[i]) << 16
1901 1901 i += 1
1902 1902 if (cmd & 0x08):
1903 1903 offset |= ord(binchunk[i]) << 24
1904 1904 i += 1
1905 1905 if (cmd & 0x10):
1906 1906 size = ord(binchunk[i])
1907 1907 i += 1
1908 1908 if (cmd & 0x20):
1909 1909 size |= ord(binchunk[i]) << 8
1910 1910 i += 1
1911 1911 if (cmd & 0x40):
1912 1912 size |= ord(binchunk[i]) << 16
1913 1913 i += 1
1914 1914 if size == 0:
1915 1915 size = 0x10000
1916 1916 offset_end = offset + size
1917 1917 out += data[offset:offset_end]
1918 1918 elif cmd != 0:
1919 1919 offset_end = i + cmd
1920 1920 out += binchunk[i:offset_end]
1921 1921 i += cmd
1922 1922 else:
1923 1923 raise PatchError(_('unexpected delta opcode 0'))
1924 1924 return out
1925 1925
1926 1926 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1927 1927 """Reads a patch from fp and tries to apply it.
1928 1928
1929 1929 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1930 1930 there was any fuzz.
1931 1931
1932 1932 If 'eolmode' is 'strict', the patch content and patched file are
1933 1933 read in binary mode. Otherwise, line endings are ignored when
1934 1934 patching then normalized according to 'eolmode'.
1935 1935 """
1936 1936 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1937 1937 prefix=prefix, eolmode=eolmode)
1938 1938
1939 1939 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1940 1940 eolmode='strict'):
1941 1941
1942 1942 if prefix:
1943 1943 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1944 1944 prefix)
1945 1945 if prefix != '':
1946 1946 prefix += '/'
1947 1947 def pstrip(p):
1948 1948 return pathtransform(p, strip - 1, prefix)[1]
1949 1949
1950 1950 rejects = 0
1951 1951 err = 0
1952 1952 current_file = None
1953 1953
1954 1954 for state, values in iterhunks(fp):
1955 1955 if state == 'hunk':
1956 1956 if not current_file:
1957 1957 continue
1958 1958 ret = current_file.apply(values)
1959 1959 if ret > 0:
1960 1960 err = 1
1961 1961 elif state == 'file':
1962 1962 if current_file:
1963 1963 rejects += current_file.close()
1964 1964 current_file = None
1965 1965 afile, bfile, first_hunk, gp = values
1966 1966 if gp:
1967 1967 gp.path = pstrip(gp.path)
1968 1968 if gp.oldpath:
1969 1969 gp.oldpath = pstrip(gp.oldpath)
1970 1970 else:
1971 1971 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1972 1972 prefix)
1973 1973 if gp.op == 'RENAME':
1974 1974 backend.unlink(gp.oldpath)
1975 1975 if not first_hunk:
1976 1976 if gp.op == 'DELETE':
1977 1977 backend.unlink(gp.path)
1978 1978 continue
1979 1979 data, mode = None, None
1980 1980 if gp.op in ('RENAME', 'COPY'):
1981 1981 data, mode = store.getfile(gp.oldpath)[:2]
1982 1982 if data is None:
1983 1983 # This means that the old path does not exist
1984 1984 raise PatchError(_("source file '%s' does not exist")
1985 1985 % gp.oldpath)
1986 1986 if gp.mode:
1987 1987 mode = gp.mode
1988 1988 if gp.op == 'ADD':
1989 1989 # Added files without content have no hunk and
1990 1990 # must be created
1991 1991 data = ''
1992 1992 if data or mode:
1993 1993 if (gp.op in ('ADD', 'RENAME', 'COPY')
1994 1994 and backend.exists(gp.path)):
1995 1995 raise PatchError(_("cannot create %s: destination "
1996 1996 "already exists") % gp.path)
1997 1997 backend.setfile(gp.path, data, mode, gp.oldpath)
1998 1998 continue
1999 1999 try:
2000 2000 current_file = patcher(ui, gp, backend, store,
2001 2001 eolmode=eolmode)
2002 2002 except PatchError as inst:
2003 2003 ui.warn(str(inst) + '\n')
2004 2004 current_file = None
2005 2005 rejects += 1
2006 2006 continue
2007 2007 elif state == 'git':
2008 2008 for gp in values:
2009 2009 path = pstrip(gp.oldpath)
2010 2010 data, mode = backend.getfile(path)
2011 2011 if data is None:
2012 2012 # The error ignored here will trigger a getfile()
2013 2013 # error in a place more appropriate for error
2014 2014 # handling, and will not interrupt the patching
2015 2015 # process.
2016 2016 pass
2017 2017 else:
2018 2018 store.setfile(path, data, mode)
2019 2019 else:
2020 2020 raise error.Abort(_('unsupported parser state: %s') % state)
2021 2021
2022 2022 if current_file:
2023 2023 rejects += current_file.close()
2024 2024
2025 2025 if rejects:
2026 2026 return -1
2027 2027 return err
2028 2028
2029 2029 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2030 2030 similarity):
2031 2031 """use <patcher> to apply <patchname> to the working directory.
2032 2032 returns whether patch was applied with fuzz factor."""
2033 2033
2034 2034 fuzz = False
2035 2035 args = []
2036 2036 cwd = repo.root
2037 2037 if cwd:
2038 2038 args.append('-d %s' % util.shellquote(cwd))
2039 2039 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2040 2040 util.shellquote(patchname)))
2041 2041 try:
2042 2042 for line in util.iterfile(fp):
2043 2043 line = line.rstrip()
2044 2044 ui.note(line + '\n')
2045 2045 if line.startswith('patching file '):
2046 2046 pf = util.parsepatchoutput(line)
2047 2047 printed_file = False
2048 2048 files.add(pf)
2049 2049 elif line.find('with fuzz') >= 0:
2050 2050 fuzz = True
2051 2051 if not printed_file:
2052 2052 ui.warn(pf + '\n')
2053 2053 printed_file = True
2054 2054 ui.warn(line + '\n')
2055 2055 elif line.find('saving rejects to file') >= 0:
2056 2056 ui.warn(line + '\n')
2057 2057 elif line.find('FAILED') >= 0:
2058 2058 if not printed_file:
2059 2059 ui.warn(pf + '\n')
2060 2060 printed_file = True
2061 2061 ui.warn(line + '\n')
2062 2062 finally:
2063 2063 if files:
2064 2064 scmutil.marktouched(repo, files, similarity)
2065 2065 code = fp.close()
2066 2066 if code:
2067 2067 raise PatchError(_("patch command failed: %s") %
2068 2068 util.explainexit(code)[0])
2069 2069 return fuzz
2070 2070
2071 2071 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2072 2072 eolmode='strict'):
2073 2073 if files is None:
2074 2074 files = set()
2075 2075 if eolmode is None:
2076 2076 eolmode = ui.config('patch', 'eol', 'strict')
2077 2077 if eolmode.lower() not in eolmodes:
2078 2078 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2079 2079 eolmode = eolmode.lower()
2080 2080
2081 2081 store = filestore()
2082 2082 try:
2083 2083 fp = open(patchobj, 'rb')
2084 2084 except TypeError:
2085 2085 fp = patchobj
2086 2086 try:
2087 2087 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2088 2088 eolmode=eolmode)
2089 2089 finally:
2090 2090 if fp != patchobj:
2091 2091 fp.close()
2092 2092 files.update(backend.close())
2093 2093 store.close()
2094 2094 if ret < 0:
2095 2095 raise PatchError(_('patch failed to apply'))
2096 2096 return ret > 0
2097 2097
2098 2098 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2099 2099 eolmode='strict', similarity=0):
2100 2100 """use builtin patch to apply <patchobj> to the working directory.
2101 2101 returns whether patch was applied with fuzz factor."""
2102 2102 backend = workingbackend(ui, repo, similarity)
2103 2103 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2104 2104
2105 2105 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2106 2106 eolmode='strict'):
2107 2107 backend = repobackend(ui, repo, ctx, store)
2108 2108 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2109 2109
2110 2110 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2111 2111 similarity=0):
2112 2112 """Apply <patchname> to the working directory.
2113 2113
2114 2114 'eolmode' specifies how end of lines should be handled. It can be:
2115 2115 - 'strict': inputs are read in binary mode, EOLs are preserved
2116 2116 - 'crlf': EOLs are ignored when patching and reset to CRLF
2117 2117 - 'lf': EOLs are ignored when patching and reset to LF
2118 2118 - None: get it from user settings, default to 'strict'
2119 2119 'eolmode' is ignored when using an external patcher program.
2120 2120
2121 2121 Returns whether patch was applied with fuzz factor.
2122 2122 """
2123 2123 patcher = ui.config('ui', 'patch')
2124 2124 if files is None:
2125 2125 files = set()
2126 2126 if patcher:
2127 2127 return _externalpatch(ui, repo, patcher, patchname, strip,
2128 2128 files, similarity)
2129 2129 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2130 2130 similarity)
2131 2131
2132 2132 def changedfiles(ui, repo, patchpath, strip=1):
2133 2133 backend = fsbackend(ui, repo.root)
2134 2134 with open(patchpath, 'rb') as fp:
2135 2135 changed = set()
2136 2136 for state, values in iterhunks(fp):
2137 2137 if state == 'file':
2138 2138 afile, bfile, first_hunk, gp = values
2139 2139 if gp:
2140 2140 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2141 2141 if gp.oldpath:
2142 2142 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2143 2143 else:
2144 2144 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2145 2145 '')
2146 2146 changed.add(gp.path)
2147 2147 if gp.op == 'RENAME':
2148 2148 changed.add(gp.oldpath)
2149 2149 elif state not in ('hunk', 'git'):
2150 2150 raise error.Abort(_('unsupported parser state: %s') % state)
2151 2151 return changed
2152 2152
2153 2153 class GitDiffRequired(Exception):
2154 2154 pass
2155 2155
2156 2156 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2157 2157 '''return diffopts with all features supported and parsed'''
2158 2158 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2159 2159 git=True, whitespace=True, formatchanging=True)
2160 2160
2161 2161 diffopts = diffallopts
2162 2162
2163 2163 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2164 2164 whitespace=False, formatchanging=False):
2165 2165 '''return diffopts with only opted-in features parsed
2166 2166
2167 2167 Features:
2168 2168 - git: git-style diffs
2169 2169 - whitespace: whitespace options like ignoreblanklines and ignorews
2170 2170 - formatchanging: options that will likely break or cause correctness issues
2171 2171 with most diff parsers
2172 2172 '''
2173 2173 def get(key, name=None, getter=ui.configbool, forceplain=None):
2174 2174 if opts:
2175 2175 v = opts.get(key)
2176 2176 # diffopts flags are either None-default (which is passed
2177 2177 # through unchanged, so we can identify unset values), or
2178 2178 # some other falsey default (eg --unified, which defaults
2179 2179 # to an empty string). We only want to override the config
2180 2180 # entries from hgrc with command line values if they
2181 2181 # appear to have been set, which is any truthy value,
2182 2182 # True, or False.
2183 2183 if v or isinstance(v, bool):
2184 2184 return v
2185 2185 if forceplain is not None and ui.plain():
2186 2186 return forceplain
2187 2187 return getter(section, name or key, None, untrusted=untrusted)
2188 2188
2189 2189 # core options, expected to be understood by every diff parser
2190 2190 buildopts = {
2191 2191 'nodates': get('nodates'),
2192 2192 'showfunc': get('show_function', 'showfunc'),
2193 2193 'context': get('unified', getter=ui.config),
2194 2194 }
2195 2195
2196 2196 if git:
2197 2197 buildopts['git'] = get('git')
2198 2198
2199 2199 # since this is in the experimental section, we need to call
2200 2200 # ui.configbool directory
2201 2201 buildopts['showsimilarity'] = ui.configbool('experimental',
2202 2202 'extendedheader.similarity')
2203 2203
2204 2204 # need to inspect the ui object instead of using get() since we want to
2205 2205 # test for an int
2206 2206 hconf = ui.config('experimental', 'extendedheader.index')
2207 2207 if hconf is not None:
2208 2208 hlen = None
2209 2209 try:
2210 2210 # the hash config could be an integer (for length of hash) or a
2211 2211 # word (e.g. short, full, none)
2212 2212 hlen = int(hconf)
2213 2213 if hlen < 0 or hlen > 40:
2214 2214 msg = _("invalid length for extendedheader.index: '%d'\n")
2215 2215 ui.warn(msg % hlen)
2216 2216 except ValueError:
2217 2217 # default value
2218 2218 if hconf == 'short' or hconf == '':
2219 2219 hlen = 12
2220 2220 elif hconf == 'full':
2221 2221 hlen = 40
2222 2222 elif hconf != 'none':
2223 2223 msg = _("invalid value for extendedheader.index: '%s'\n")
2224 2224 ui.warn(msg % hconf)
2225 2225 finally:
2226 2226 buildopts['index'] = hlen
2227 2227
2228 2228 if whitespace:
2229 2229 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2230 2230 buildopts['ignorewsamount'] = get('ignore_space_change',
2231 2231 'ignorewsamount')
2232 2232 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2233 2233 'ignoreblanklines')
2234 2234 if formatchanging:
2235 2235 buildopts['text'] = opts and opts.get('text')
2236 2236 binary = None if opts is None else opts.get('binary')
2237 2237 buildopts['nobinary'] = (not binary if binary is not None
2238 2238 else get('nobinary', forceplain=False))
2239 2239 buildopts['noprefix'] = get('noprefix', forceplain=False)
2240 2240
2241 2241 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2242 2242
2243 2243 def diff(repo, node1=None, node2=None, match=None, changes=None,
2244 2244 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2245 2245 '''yields diff of changes to files between two nodes, or node and
2246 2246 working directory.
2247 2247
2248 2248 if node1 is None, use first dirstate parent instead.
2249 2249 if node2 is None, compare node1 with working directory.
2250 2250
2251 2251 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2252 2252 every time some change cannot be represented with the current
2253 2253 patch format. Return False to upgrade to git patch format, True to
2254 2254 accept the loss or raise an exception to abort the diff. It is
2255 2255 called with the name of current file being diffed as 'fn'. If set
2256 2256 to None, patches will always be upgraded to git format when
2257 2257 necessary.
2258 2258
2259 2259 prefix is a filename prefix that is prepended to all filenames on
2260 2260 display (used for subrepos).
2261 2261
2262 2262 relroot, if not empty, must be normalized with a trailing /. Any match
2263 2263 patterns that fall outside it will be ignored.
2264 2264
2265 2265 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2266 2266 information.'''
2267 2267 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2268 2268 changes=changes, opts=opts,
2269 2269 losedatafn=losedatafn, prefix=prefix,
2270 2270 relroot=relroot, copy=copy):
2271 2271 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2272 2272 if header and (text or len(header) > 1):
2273 2273 yield '\n'.join(header) + '\n'
2274 2274 if text:
2275 2275 yield text
2276 2276
2277 2277 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2278 2278 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2279 2279 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2280 2280 where `header` is a list of diff headers and `hunks` is an iterable of
2281 2281 (`hunkrange`, `hunklines`) tuples.
2282 2282
2283 2283 See diff() for the meaning of parameters.
2284 2284 """
2285 2285
2286 2286 if opts is None:
2287 2287 opts = mdiff.defaultopts
2288 2288
2289 2289 if not node1 and not node2:
2290 2290 node1 = repo.dirstate.p1()
2291 2291
2292 2292 def lrugetfilectx():
2293 2293 cache = {}
2294 2294 order = collections.deque()
2295 2295 def getfilectx(f, ctx):
2296 2296 fctx = ctx.filectx(f, filelog=cache.get(f))
2297 2297 if f not in cache:
2298 2298 if len(cache) > 20:
2299 2299 del cache[order.popleft()]
2300 2300 cache[f] = fctx.filelog()
2301 2301 else:
2302 2302 order.remove(f)
2303 2303 order.append(f)
2304 2304 return fctx
2305 2305 return getfilectx
2306 2306 getfilectx = lrugetfilectx()
2307 2307
2308 2308 ctx1 = repo[node1]
2309 2309 ctx2 = repo[node2]
2310 2310
2311 2311 relfiltered = False
2312 2312 if relroot != '' and match.always():
2313 2313 # as a special case, create a new matcher with just the relroot
2314 2314 pats = [relroot]
2315 2315 match = scmutil.match(ctx2, pats, default='path')
2316 2316 relfiltered = True
2317 2317
2318 2318 if not changes:
2319 2319 changes = repo.status(ctx1, ctx2, match=match)
2320 2320 modified, added, removed = changes[:3]
2321 2321
2322 2322 if not modified and not added and not removed:
2323 2323 return []
2324 2324
2325 2325 if repo.ui.debugflag:
2326 2326 hexfunc = hex
2327 2327 else:
2328 2328 hexfunc = short
2329 2329 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2330 2330
2331 2331 if copy is None:
2332 2332 copy = {}
2333 2333 if opts.git or opts.upgrade:
2334 2334 copy = copies.pathcopies(ctx1, ctx2, match=match)
2335 2335
2336 2336 if relroot is not None:
2337 2337 if not relfiltered:
2338 2338 # XXX this would ideally be done in the matcher, but that is
2339 2339 # generally meant to 'or' patterns, not 'and' them. In this case we
2340 2340 # need to 'and' all the patterns from the matcher with relroot.
2341 2341 def filterrel(l):
2342 2342 return [f for f in l if f.startswith(relroot)]
2343 2343 modified = filterrel(modified)
2344 2344 added = filterrel(added)
2345 2345 removed = filterrel(removed)
2346 2346 relfiltered = True
2347 2347 # filter out copies where either side isn't inside the relative root
2348 2348 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2349 2349 if dst.startswith(relroot)
2350 2350 and src.startswith(relroot)))
2351 2351
2352 2352 modifiedset = set(modified)
2353 2353 addedset = set(added)
2354 2354 removedset = set(removed)
2355 2355 for f in modified:
2356 2356 if f not in ctx1:
2357 2357 # Fix up added, since merged-in additions appear as
2358 2358 # modifications during merges
2359 2359 modifiedset.remove(f)
2360 2360 addedset.add(f)
2361 2361 for f in removed:
2362 2362 if f not in ctx1:
2363 2363 # Merged-in additions that are then removed are reported as removed.
2364 2364 # They are not in ctx1, so We don't want to show them in the diff.
2365 2365 removedset.remove(f)
2366 2366 modified = sorted(modifiedset)
2367 2367 added = sorted(addedset)
2368 2368 removed = sorted(removedset)
2369 2369 for dst, src in copy.items():
2370 2370 if src not in ctx1:
2371 2371 # Files merged in during a merge and then copied/renamed are
2372 2372 # reported as copies. We want to show them in the diff as additions.
2373 2373 del copy[dst]
2374 2374
2375 2375 def difffn(opts, losedata):
2376 2376 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2377 2377 copy, getfilectx, opts, losedata, prefix, relroot)
2378 2378 if opts.upgrade and not opts.git:
2379 2379 try:
2380 2380 def losedata(fn):
2381 2381 if not losedatafn or not losedatafn(fn=fn):
2382 2382 raise GitDiffRequired
2383 2383 # Buffer the whole output until we are sure it can be generated
2384 2384 return list(difffn(opts.copy(git=False), losedata))
2385 2385 except GitDiffRequired:
2386 2386 return difffn(opts.copy(git=True), None)
2387 2387 else:
2388 2388 return difffn(opts, None)
2389 2389
2390 2390 def difflabel(func, *args, **kw):
2391 2391 '''yields 2-tuples of (output, label) based on the output of func()'''
2392 2392 headprefixes = [('diff', 'diff.diffline'),
2393 2393 ('copy', 'diff.extended'),
2394 2394 ('rename', 'diff.extended'),
2395 2395 ('old', 'diff.extended'),
2396 2396 ('new', 'diff.extended'),
2397 2397 ('deleted', 'diff.extended'),
2398 2398 ('index', 'diff.extended'),
2399 2399 ('similarity', 'diff.extended'),
2400 2400 ('---', 'diff.file_a'),
2401 2401 ('+++', 'diff.file_b')]
2402 2402 textprefixes = [('@', 'diff.hunk'),
2403 2403 ('-', 'diff.deleted'),
2404 2404 ('+', 'diff.inserted')]
2405 2405 head = False
2406 2406 for chunk in func(*args, **kw):
2407 2407 lines = chunk.split('\n')
2408 2408 for i, line in enumerate(lines):
2409 2409 if i != 0:
2410 2410 yield ('\n', '')
2411 2411 if head:
2412 2412 if line.startswith('@'):
2413 2413 head = False
2414 2414 else:
2415 2415 if line and line[0] not in ' +-@\\':
2416 2416 head = True
2417 2417 stripline = line
2418 2418 diffline = False
2419 2419 if not head and line and line[0] in '+-':
2420 2420 # highlight tabs and trailing whitespace, but only in
2421 2421 # changed lines
2422 2422 stripline = line.rstrip()
2423 2423 diffline = True
2424 2424
2425 2425 prefixes = textprefixes
2426 2426 if head:
2427 2427 prefixes = headprefixes
2428 2428 for prefix, label in prefixes:
2429 2429 if stripline.startswith(prefix):
2430 2430 if diffline:
2431 2431 for token in tabsplitter.findall(stripline):
2432 2432 if '\t' == token[0]:
2433 2433 yield (token, 'diff.tab')
2434 2434 else:
2435 2435 yield (token, label)
2436 2436 else:
2437 2437 yield (stripline, label)
2438 2438 break
2439 2439 else:
2440 2440 yield (line, '')
2441 2441 if line != stripline:
2442 2442 yield (line[len(stripline):], 'diff.trailingwhitespace')
2443 2443
2444 2444 def diffui(*args, **kw):
2445 2445 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2446 2446 return difflabel(diff, *args, **kw)
2447 2447
2448 2448 def _filepairs(modified, added, removed, copy, opts):
2449 2449 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2450 2450 before and f2 is the the name after. For added files, f1 will be None,
2451 2451 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2452 2452 or 'rename' (the latter two only if opts.git is set).'''
2453 2453 gone = set()
2454 2454
2455 2455 copyto = dict([(v, k) for k, v in copy.items()])
2456 2456
2457 2457 addedset, removedset = set(added), set(removed)
2458 2458
2459 2459 for f in sorted(modified + added + removed):
2460 2460 copyop = None
2461 2461 f1, f2 = f, f
2462 2462 if f in addedset:
2463 2463 f1 = None
2464 2464 if f in copy:
2465 2465 if opts.git:
2466 2466 f1 = copy[f]
2467 2467 if f1 in removedset and f1 not in gone:
2468 2468 copyop = 'rename'
2469 2469 gone.add(f1)
2470 2470 else:
2471 2471 copyop = 'copy'
2472 2472 elif f in removedset:
2473 2473 f2 = None
2474 2474 if opts.git:
2475 2475 # have we already reported a copy above?
2476 2476 if (f in copyto and copyto[f] in addedset
2477 2477 and copy[copyto[f]] == f):
2478 2478 continue
2479 2479 yield f1, f2, copyop
2480 2480
2481 2481 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2482 2482 copy, getfilectx, opts, losedatafn, prefix, relroot):
2483 2483 '''given input data, generate a diff and yield it in blocks
2484 2484
2485 2485 If generating a diff would lose data like flags or binary data and
2486 2486 losedatafn is not None, it will be called.
2487 2487
2488 2488 relroot is removed and prefix is added to every path in the diff output.
2489 2489
2490 2490 If relroot is not empty, this function expects every path in modified,
2491 2491 added, removed and copy to start with it.'''
2492 2492
2493 2493 def gitindex(text):
2494 2494 if not text:
2495 2495 text = ""
2496 2496 l = len(text)
2497 2497 s = hashlib.sha1('blob %d\0' % l)
2498 2498 s.update(text)
2499 2499 return s.hexdigest()
2500 2500
2501 2501 if opts.noprefix:
2502 2502 aprefix = bprefix = ''
2503 2503 else:
2504 2504 aprefix = 'a/'
2505 2505 bprefix = 'b/'
2506 2506
2507 2507 def diffline(f, revs):
2508 2508 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2509 2509 return 'diff %s %s' % (revinfo, f)
2510 2510
2511 def isempty(fctx):
2512 return fctx is None or fctx.size() == 0
2513
2511 2514 date1 = util.datestr(ctx1.date())
2512 2515 date2 = util.datestr(ctx2.date())
2513 2516
2514 2517 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2515 2518
2516 2519 if relroot != '' and (repo.ui.configbool('devel', 'all')
2517 2520 or repo.ui.configbool('devel', 'check-relroot')):
2518 2521 for f in modified + added + removed + copy.keys() + copy.values():
2519 2522 if f is not None and not f.startswith(relroot):
2520 2523 raise AssertionError(
2521 2524 "file %s doesn't start with relroot %s" % (f, relroot))
2522 2525
2523 2526 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2524 2527 content1 = None
2525 2528 content2 = None
2526 2529 fctx1 = None
2527 2530 fctx2 = None
2528 2531 flag1 = None
2529 2532 flag2 = None
2530 2533 if f1:
2531 2534 fctx1 = getfilectx(f1, ctx1)
2532 2535 content1 = fctx1.data()
2533 2536 if opts.git or losedatafn:
2534 2537 flag1 = ctx1.flags(f1)
2535 2538 if f2:
2536 2539 fctx2 = getfilectx(f2, ctx2)
2537 2540 content2 = fctx2.data()
2538 2541 if opts.git or losedatafn:
2539 2542 flag2 = ctx2.flags(f2)
2540 2543 binary = False
2541 2544 if opts.git or losedatafn:
2542 2545 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2543 2546
2544 2547 if losedatafn and not opts.git:
2545 2548 if (binary or
2546 2549 # copy/rename
2547 2550 f2 in copy or
2548 2551 # empty file creation
2549 (not f1 and not content2) or
2552 (not f1 and isempty(fctx2)) or
2550 2553 # empty file deletion
2551 (not content1 and not f2) or
2554 (isempty(fctx1) and not f2) or
2552 2555 # create with flags
2553 2556 (not f1 and flag2) or
2554 2557 # change flags
2555 2558 (f1 and f2 and flag1 != flag2)):
2556 2559 losedatafn(f2 or f1)
2557 2560
2558 2561 path1 = f1 or f2
2559 2562 path2 = f2 or f1
2560 2563 path1 = posixpath.join(prefix, path1[len(relroot):])
2561 2564 path2 = posixpath.join(prefix, path2[len(relroot):])
2562 2565 header = []
2563 2566 if opts.git:
2564 2567 header.append('diff --git %s%s %s%s' %
2565 2568 (aprefix, path1, bprefix, path2))
2566 2569 if not f1: # added
2567 2570 header.append('new file mode %s' % gitmode[flag2])
2568 2571 elif not f2: # removed
2569 2572 header.append('deleted file mode %s' % gitmode[flag1])
2570 2573 else: # modified/copied/renamed
2571 2574 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2572 2575 if mode1 != mode2:
2573 2576 header.append('old mode %s' % mode1)
2574 2577 header.append('new mode %s' % mode2)
2575 2578 if copyop is not None:
2576 2579 if opts.showsimilarity:
2577 2580 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2578 2581 header.append('similarity index %d%%' % sim)
2579 2582 header.append('%s from %s' % (copyop, path1))
2580 2583 header.append('%s to %s' % (copyop, path2))
2581 2584 elif revs and not repo.ui.quiet:
2582 2585 header.append(diffline(path1, revs))
2583 2586
2584 2587 if binary and opts.git and not opts.nobinary and not opts.text:
2585 2588 text = mdiff.b85diff(content1, content2)
2586 2589 if text:
2587 2590 header.append('index %s..%s' %
2588 2591 (gitindex(content1), gitindex(content2)))
2589 2592 hunks = (None, [text]),
2590 2593 else:
2591 2594 if opts.git and opts.index > 0:
2592 2595 flag = flag1
2593 2596 if flag is None:
2594 2597 flag = flag2
2595 2598 header.append('index %s..%s %s' %
2596 2599 (gitindex(content1)[0:opts.index],
2597 2600 gitindex(content2)[0:opts.index],
2598 2601 gitmode[flag]))
2599 2602
2600 2603 uheaders, hunks = mdiff.unidiff(content1, date1,
2601 2604 content2, date2,
2602 2605 path1, path2, opts=opts)
2603 2606 header.extend(uheaders)
2604 2607 yield header, hunks
2605 2608
2606 2609 def diffstatsum(stats):
2607 2610 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2608 2611 for f, a, r, b in stats:
2609 2612 maxfile = max(maxfile, encoding.colwidth(f))
2610 2613 maxtotal = max(maxtotal, a + r)
2611 2614 addtotal += a
2612 2615 removetotal += r
2613 2616 binary = binary or b
2614 2617
2615 2618 return maxfile, maxtotal, addtotal, removetotal, binary
2616 2619
2617 2620 def diffstatdata(lines):
2618 2621 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2619 2622
2620 2623 results = []
2621 2624 filename, adds, removes, isbinary = None, 0, 0, False
2622 2625
2623 2626 def addresult():
2624 2627 if filename:
2625 2628 results.append((filename, adds, removes, isbinary))
2626 2629
2627 2630 for line in lines:
2628 2631 if line.startswith('diff'):
2629 2632 addresult()
2630 2633 # set numbers to 0 anyway when starting new file
2631 2634 adds, removes, isbinary = 0, 0, False
2632 2635 if line.startswith('diff --git a/'):
2633 2636 filename = gitre.search(line).group(2)
2634 2637 elif line.startswith('diff -r'):
2635 2638 # format: "diff -r ... -r ... filename"
2636 2639 filename = diffre.search(line).group(1)
2637 2640 elif line.startswith('+') and not line.startswith('+++ '):
2638 2641 adds += 1
2639 2642 elif line.startswith('-') and not line.startswith('--- '):
2640 2643 removes += 1
2641 2644 elif (line.startswith('GIT binary patch') or
2642 2645 line.startswith('Binary file')):
2643 2646 isbinary = True
2644 2647 addresult()
2645 2648 return results
2646 2649
2647 2650 def diffstat(lines, width=80):
2648 2651 output = []
2649 2652 stats = diffstatdata(lines)
2650 2653 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2651 2654
2652 2655 countwidth = len(str(maxtotal))
2653 2656 if hasbinary and countwidth < 3:
2654 2657 countwidth = 3
2655 2658 graphwidth = width - countwidth - maxname - 6
2656 2659 if graphwidth < 10:
2657 2660 graphwidth = 10
2658 2661
2659 2662 def scale(i):
2660 2663 if maxtotal <= graphwidth:
2661 2664 return i
2662 2665 # If diffstat runs out of room it doesn't print anything,
2663 2666 # which isn't very useful, so always print at least one + or -
2664 2667 # if there were at least some changes.
2665 2668 return max(i * graphwidth // maxtotal, int(bool(i)))
2666 2669
2667 2670 for filename, adds, removes, isbinary in stats:
2668 2671 if isbinary:
2669 2672 count = 'Bin'
2670 2673 else:
2671 2674 count = adds + removes
2672 2675 pluses = '+' * scale(adds)
2673 2676 minuses = '-' * scale(removes)
2674 2677 output.append(' %s%s | %*s %s%s\n' %
2675 2678 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2676 2679 countwidth, count, pluses, minuses))
2677 2680
2678 2681 if stats:
2679 2682 output.append(_(' %d files changed, %d insertions(+), '
2680 2683 '%d deletions(-)\n')
2681 2684 % (len(stats), totaladds, totalremoves))
2682 2685
2683 2686 return ''.join(output)
2684 2687
2685 2688 def diffstatui(*args, **kw):
2686 2689 '''like diffstat(), but yields 2-tuples of (output, label) for
2687 2690 ui.write()
2688 2691 '''
2689 2692
2690 2693 for line in diffstat(*args, **kw).splitlines():
2691 2694 if line and line[-1] in '+-':
2692 2695 name, graph = line.rsplit(' ', 1)
2693 2696 yield (name + ' ', '')
2694 2697 m = re.search(r'\++', graph)
2695 2698 if m:
2696 2699 yield (m.group(0), 'diffstat.inserted')
2697 2700 m = re.search(r'-+', graph)
2698 2701 if m:
2699 2702 yield (m.group(0), 'diffstat.deleted')
2700 2703 else:
2701 2704 yield (line, '')
2702 2705 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now