##// END OF EJS Templates
patch: use ctx.node() instead of bare node variable...
Sean Farley -
r21833:c1ceec0c default
parent child Browse files
Show More
@@ -1,1929 +1,1929 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email, os, errno, re, posixpath
10 10 import tempfile, zlib, shutil
11 11 # On python2.4 you have to import these by name or they fail to
12 12 # load. This was not a problem on Python 2.7.
13 13 import email.Generator
14 14 import email.Parser
15 15
16 16 from i18n import _
17 17 from node import hex, short
18 18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 19
20 20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 21
22 22 class PatchError(Exception):
23 23 pass
24 24
25 25
26 26 # public functions
27 27
28 28 def split(stream):
29 29 '''return an iterator of individual patches from a stream'''
30 30 def isheader(line, inheader):
31 31 if inheader and line[0] in (' ', '\t'):
32 32 # continuation
33 33 return True
34 34 if line[0] in (' ', '-', '+'):
35 35 # diff line - don't check for header pattern in there
36 36 return False
37 37 l = line.split(': ', 1)
38 38 return len(l) == 2 and ' ' not in l[0]
39 39
40 40 def chunk(lines):
41 41 return cStringIO.StringIO(''.join(lines))
42 42
43 43 def hgsplit(stream, cur):
44 44 inheader = True
45 45
46 46 for line in stream:
47 47 if not line.strip():
48 48 inheader = False
49 49 if not inheader and line.startswith('# HG changeset patch'):
50 50 yield chunk(cur)
51 51 cur = []
52 52 inheader = True
53 53
54 54 cur.append(line)
55 55
56 56 if cur:
57 57 yield chunk(cur)
58 58
59 59 def mboxsplit(stream, cur):
60 60 for line in stream:
61 61 if line.startswith('From '):
62 62 for c in split(chunk(cur[1:])):
63 63 yield c
64 64 cur = []
65 65
66 66 cur.append(line)
67 67
68 68 if cur:
69 69 for c in split(chunk(cur[1:])):
70 70 yield c
71 71
72 72 def mimesplit(stream, cur):
73 73 def msgfp(m):
74 74 fp = cStringIO.StringIO()
75 75 g = email.Generator.Generator(fp, mangle_from_=False)
76 76 g.flatten(m)
77 77 fp.seek(0)
78 78 return fp
79 79
80 80 for line in stream:
81 81 cur.append(line)
82 82 c = chunk(cur)
83 83
84 84 m = email.Parser.Parser().parse(c)
85 85 if not m.is_multipart():
86 86 yield msgfp(m)
87 87 else:
88 88 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 89 for part in m.walk():
90 90 ct = part.get_content_type()
91 91 if ct not in ok_types:
92 92 continue
93 93 yield msgfp(part)
94 94
95 95 def headersplit(stream, cur):
96 96 inheader = False
97 97
98 98 for line in stream:
99 99 if not inheader and isheader(line, inheader):
100 100 yield chunk(cur)
101 101 cur = []
102 102 inheader = True
103 103 if inheader and not isheader(line, inheader):
104 104 inheader = False
105 105
106 106 cur.append(line)
107 107
108 108 if cur:
109 109 yield chunk(cur)
110 110
111 111 def remainder(cur):
112 112 yield chunk(cur)
113 113
114 114 class fiter(object):
115 115 def __init__(self, fp):
116 116 self.fp = fp
117 117
118 118 def __iter__(self):
119 119 return self
120 120
121 121 def next(self):
122 122 l = self.fp.readline()
123 123 if not l:
124 124 raise StopIteration
125 125 return l
126 126
127 127 inheader = False
128 128 cur = []
129 129
130 130 mimeheaders = ['content-type']
131 131
132 132 if not util.safehasattr(stream, 'next'):
133 133 # http responses, for example, have readline but not next
134 134 stream = fiter(stream)
135 135
136 136 for line in stream:
137 137 cur.append(line)
138 138 if line.startswith('# HG changeset patch'):
139 139 return hgsplit(stream, cur)
140 140 elif line.startswith('From '):
141 141 return mboxsplit(stream, cur)
142 142 elif isheader(line, inheader):
143 143 inheader = True
144 144 if line.split(':', 1)[0].lower() in mimeheaders:
145 145 # let email parser handle this
146 146 return mimesplit(stream, cur)
147 147 elif line.startswith('--- ') and inheader:
148 148 # No evil headers seen by diff start, split by hand
149 149 return headersplit(stream, cur)
150 150 # Not enough info, keep reading
151 151
152 152 # if we are here, we have a very plain patch
153 153 return remainder(cur)
154 154
155 155 def extract(ui, fileobj):
156 156 '''extract patch from data read from fileobj.
157 157
158 158 patch can be a normal patch or contained in an email message.
159 159
160 160 return tuple (filename, message, user, date, branch, node, p1, p2).
161 161 Any item in the returned tuple can be None. If filename is None,
162 162 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 163
164 164 # attempt to detect the start of a patch
165 165 # (this heuristic is borrowed from quilt)
166 166 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 167 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 168 r'---[ \t].*?^\+\+\+[ \t]|'
169 169 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 170
171 171 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 172 tmpfp = os.fdopen(fd, 'w')
173 173 try:
174 174 msg = email.Parser.Parser().parse(fileobj)
175 175
176 176 subject = msg['Subject']
177 177 user = msg['From']
178 178 if not subject and not user:
179 179 # Not an email, restore parsed headers if any
180 180 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 181
182 182 # should try to parse msg['Date']
183 183 date = None
184 184 nodeid = None
185 185 branch = None
186 186 parents = []
187 187
188 188 if subject:
189 189 if subject.startswith('[PATCH'):
190 190 pend = subject.find(']')
191 191 if pend >= 0:
192 192 subject = subject[pend + 1:].lstrip()
193 193 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 194 ui.debug('Subject: %s\n' % subject)
195 195 if user:
196 196 ui.debug('From: %s\n' % user)
197 197 diffs_seen = 0
198 198 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 199 message = ''
200 200 for part in msg.walk():
201 201 content_type = part.get_content_type()
202 202 ui.debug('Content-Type: %s\n' % content_type)
203 203 if content_type not in ok_types:
204 204 continue
205 205 payload = part.get_payload(decode=True)
206 206 m = diffre.search(payload)
207 207 if m:
208 208 hgpatch = False
209 209 hgpatchheader = False
210 210 ignoretext = False
211 211
212 212 ui.debug('found patch at byte %d\n' % m.start(0))
213 213 diffs_seen += 1
214 214 cfp = cStringIO.StringIO()
215 215 for line in payload[:m.start(0)].splitlines():
216 216 if line.startswith('# HG changeset patch') and not hgpatch:
217 217 ui.debug('patch generated by hg export\n')
218 218 hgpatch = True
219 219 hgpatchheader = True
220 220 # drop earlier commit message content
221 221 cfp.seek(0)
222 222 cfp.truncate()
223 223 subject = None
224 224 elif hgpatchheader:
225 225 if line.startswith('# User '):
226 226 user = line[7:]
227 227 ui.debug('From: %s\n' % user)
228 228 elif line.startswith("# Date "):
229 229 date = line[7:]
230 230 elif line.startswith("# Branch "):
231 231 branch = line[9:]
232 232 elif line.startswith("# Node ID "):
233 233 nodeid = line[10:]
234 234 elif line.startswith("# Parent "):
235 235 parents.append(line[9:].lstrip())
236 236 elif not line.startswith("# "):
237 237 hgpatchheader = False
238 238 elif line == '---':
239 239 ignoretext = True
240 240 if not hgpatchheader and not ignoretext:
241 241 cfp.write(line)
242 242 cfp.write('\n')
243 243 message = cfp.getvalue()
244 244 if tmpfp:
245 245 tmpfp.write(payload)
246 246 if not payload.endswith('\n'):
247 247 tmpfp.write('\n')
248 248 elif not diffs_seen and message and content_type == 'text/plain':
249 249 message += '\n' + payload
250 250 except: # re-raises
251 251 tmpfp.close()
252 252 os.unlink(tmpname)
253 253 raise
254 254
255 255 if subject and not message.startswith(subject):
256 256 message = '%s\n%s' % (subject, message)
257 257 tmpfp.close()
258 258 if not diffs_seen:
259 259 os.unlink(tmpname)
260 260 return None, message, user, date, branch, None, None, None
261 261 p1 = parents and parents.pop(0) or None
262 262 p2 = parents and parents.pop(0) or None
263 263 return tmpname, message, user, date, branch, nodeid, p1, p2
264 264
265 265 class patchmeta(object):
266 266 """Patched file metadata
267 267
268 268 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 269 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 270 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 271 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 272 'islink' is True if the file is a symlink and 'isexec' is True if
273 273 the file is executable. Otherwise, 'mode' is None.
274 274 """
275 275 def __init__(self, path):
276 276 self.path = path
277 277 self.oldpath = None
278 278 self.mode = None
279 279 self.op = 'MODIFY'
280 280 self.binary = False
281 281
282 282 def setmode(self, mode):
283 283 islink = mode & 020000
284 284 isexec = mode & 0100
285 285 self.mode = (islink, isexec)
286 286
287 287 def copy(self):
288 288 other = patchmeta(self.path)
289 289 other.oldpath = self.oldpath
290 290 other.mode = self.mode
291 291 other.op = self.op
292 292 other.binary = self.binary
293 293 return other
294 294
295 295 def _ispatchinga(self, afile):
296 296 if afile == '/dev/null':
297 297 return self.op == 'ADD'
298 298 return afile == 'a/' + (self.oldpath or self.path)
299 299
300 300 def _ispatchingb(self, bfile):
301 301 if bfile == '/dev/null':
302 302 return self.op == 'DELETE'
303 303 return bfile == 'b/' + self.path
304 304
305 305 def ispatching(self, afile, bfile):
306 306 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 307
308 308 def __repr__(self):
309 309 return "<patchmeta %s %r>" % (self.op, self.path)
310 310
311 311 def readgitpatch(lr):
312 312 """extract git-style metadata about patches from <patchname>"""
313 313
314 314 # Filter patch for git information
315 315 gp = None
316 316 gitpatches = []
317 317 for line in lr:
318 318 line = line.rstrip(' \r\n')
319 319 if line.startswith('diff --git a/'):
320 320 m = gitre.match(line)
321 321 if m:
322 322 if gp:
323 323 gitpatches.append(gp)
324 324 dst = m.group(2)
325 325 gp = patchmeta(dst)
326 326 elif gp:
327 327 if line.startswith('--- '):
328 328 gitpatches.append(gp)
329 329 gp = None
330 330 continue
331 331 if line.startswith('rename from '):
332 332 gp.op = 'RENAME'
333 333 gp.oldpath = line[12:]
334 334 elif line.startswith('rename to '):
335 335 gp.path = line[10:]
336 336 elif line.startswith('copy from '):
337 337 gp.op = 'COPY'
338 338 gp.oldpath = line[10:]
339 339 elif line.startswith('copy to '):
340 340 gp.path = line[8:]
341 341 elif line.startswith('deleted file'):
342 342 gp.op = 'DELETE'
343 343 elif line.startswith('new file mode '):
344 344 gp.op = 'ADD'
345 345 gp.setmode(int(line[-6:], 8))
346 346 elif line.startswith('new mode '):
347 347 gp.setmode(int(line[-6:], 8))
348 348 elif line.startswith('GIT binary patch'):
349 349 gp.binary = True
350 350 if gp:
351 351 gitpatches.append(gp)
352 352
353 353 return gitpatches
354 354
355 355 class linereader(object):
356 356 # simple class to allow pushing lines back into the input stream
357 357 def __init__(self, fp):
358 358 self.fp = fp
359 359 self.buf = []
360 360
361 361 def push(self, line):
362 362 if line is not None:
363 363 self.buf.append(line)
364 364
365 365 def readline(self):
366 366 if self.buf:
367 367 l = self.buf[0]
368 368 del self.buf[0]
369 369 return l
370 370 return self.fp.readline()
371 371
372 372 def __iter__(self):
373 373 while True:
374 374 l = self.readline()
375 375 if not l:
376 376 break
377 377 yield l
378 378
379 379 class abstractbackend(object):
380 380 def __init__(self, ui):
381 381 self.ui = ui
382 382
383 383 def getfile(self, fname):
384 384 """Return target file data and flags as a (data, (islink,
385 385 isexec)) tuple.
386 386 """
387 387 raise NotImplementedError
388 388
389 389 def setfile(self, fname, data, mode, copysource):
390 390 """Write data to target file fname and set its mode. mode is a
391 391 (islink, isexec) tuple. If data is None, the file content should
392 392 be left unchanged. If the file is modified after being copied,
393 393 copysource is set to the original file name.
394 394 """
395 395 raise NotImplementedError
396 396
397 397 def unlink(self, fname):
398 398 """Unlink target file."""
399 399 raise NotImplementedError
400 400
401 401 def writerej(self, fname, failed, total, lines):
402 402 """Write rejected lines for fname. total is the number of hunks
403 403 which failed to apply and total the total number of hunks for this
404 404 files.
405 405 """
406 406 pass
407 407
408 408 def exists(self, fname):
409 409 raise NotImplementedError
410 410
411 411 class fsbackend(abstractbackend):
412 412 def __init__(self, ui, basedir):
413 413 super(fsbackend, self).__init__(ui)
414 414 self.opener = scmutil.opener(basedir)
415 415
416 416 def _join(self, f):
417 417 return os.path.join(self.opener.base, f)
418 418
419 419 def getfile(self, fname):
420 420 if self.opener.islink(fname):
421 421 return (self.opener.readlink(fname), (True, False))
422 422
423 423 isexec = False
424 424 try:
425 425 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 426 except OSError, e:
427 427 if e.errno != errno.ENOENT:
428 428 raise
429 429 return (self.opener.read(fname), (False, isexec))
430 430
431 431 def setfile(self, fname, data, mode, copysource):
432 432 islink, isexec = mode
433 433 if data is None:
434 434 self.opener.setflags(fname, islink, isexec)
435 435 return
436 436 if islink:
437 437 self.opener.symlink(data, fname)
438 438 else:
439 439 self.opener.write(fname, data)
440 440 if isexec:
441 441 self.opener.setflags(fname, False, True)
442 442
443 443 def unlink(self, fname):
444 444 self.opener.unlinkpath(fname, ignoremissing=True)
445 445
446 446 def writerej(self, fname, failed, total, lines):
447 447 fname = fname + ".rej"
448 448 self.ui.warn(
449 449 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
450 450 (failed, total, fname))
451 451 fp = self.opener(fname, 'w')
452 452 fp.writelines(lines)
453 453 fp.close()
454 454
455 455 def exists(self, fname):
456 456 return self.opener.lexists(fname)
457 457
458 458 class workingbackend(fsbackend):
459 459 def __init__(self, ui, repo, similarity):
460 460 super(workingbackend, self).__init__(ui, repo.root)
461 461 self.repo = repo
462 462 self.similarity = similarity
463 463 self.removed = set()
464 464 self.changed = set()
465 465 self.copied = []
466 466
467 467 def _checkknown(self, fname):
468 468 if self.repo.dirstate[fname] == '?' and self.exists(fname):
469 469 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
470 470
471 471 def setfile(self, fname, data, mode, copysource):
472 472 self._checkknown(fname)
473 473 super(workingbackend, self).setfile(fname, data, mode, copysource)
474 474 if copysource is not None:
475 475 self.copied.append((copysource, fname))
476 476 self.changed.add(fname)
477 477
478 478 def unlink(self, fname):
479 479 self._checkknown(fname)
480 480 super(workingbackend, self).unlink(fname)
481 481 self.removed.add(fname)
482 482 self.changed.add(fname)
483 483
484 484 def close(self):
485 485 wctx = self.repo[None]
486 486 changed = set(self.changed)
487 487 for src, dst in self.copied:
488 488 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
489 489 if self.removed:
490 490 wctx.forget(sorted(self.removed))
491 491 for f in self.removed:
492 492 if f not in self.repo.dirstate:
493 493 # File was deleted and no longer belongs to the
494 494 # dirstate, it was probably marked added then
495 495 # deleted, and should not be considered by
496 496 # marktouched().
497 497 changed.discard(f)
498 498 if changed:
499 499 scmutil.marktouched(self.repo, changed, self.similarity)
500 500 return sorted(self.changed)
501 501
502 502 class filestore(object):
503 503 def __init__(self, maxsize=None):
504 504 self.opener = None
505 505 self.files = {}
506 506 self.created = 0
507 507 self.maxsize = maxsize
508 508 if self.maxsize is None:
509 509 self.maxsize = 4*(2**20)
510 510 self.size = 0
511 511 self.data = {}
512 512
513 513 def setfile(self, fname, data, mode, copied=None):
514 514 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
515 515 self.data[fname] = (data, mode, copied)
516 516 self.size += len(data)
517 517 else:
518 518 if self.opener is None:
519 519 root = tempfile.mkdtemp(prefix='hg-patch-')
520 520 self.opener = scmutil.opener(root)
521 521 # Avoid filename issues with these simple names
522 522 fn = str(self.created)
523 523 self.opener.write(fn, data)
524 524 self.created += 1
525 525 self.files[fname] = (fn, mode, copied)
526 526
527 527 def getfile(self, fname):
528 528 if fname in self.data:
529 529 return self.data[fname]
530 530 if not self.opener or fname not in self.files:
531 531 raise IOError
532 532 fn, mode, copied = self.files[fname]
533 533 return self.opener.read(fn), mode, copied
534 534
535 535 def close(self):
536 536 if self.opener:
537 537 shutil.rmtree(self.opener.base)
538 538
539 539 class repobackend(abstractbackend):
540 540 def __init__(self, ui, repo, ctx, store):
541 541 super(repobackend, self).__init__(ui)
542 542 self.repo = repo
543 543 self.ctx = ctx
544 544 self.store = store
545 545 self.changed = set()
546 546 self.removed = set()
547 547 self.copied = {}
548 548
549 549 def _checkknown(self, fname):
550 550 if fname not in self.ctx:
551 551 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
552 552
553 553 def getfile(self, fname):
554 554 try:
555 555 fctx = self.ctx[fname]
556 556 except error.LookupError:
557 557 raise IOError
558 558 flags = fctx.flags()
559 559 return fctx.data(), ('l' in flags, 'x' in flags)
560 560
561 561 def setfile(self, fname, data, mode, copysource):
562 562 if copysource:
563 563 self._checkknown(copysource)
564 564 if data is None:
565 565 data = self.ctx[fname].data()
566 566 self.store.setfile(fname, data, mode, copysource)
567 567 self.changed.add(fname)
568 568 if copysource:
569 569 self.copied[fname] = copysource
570 570
571 571 def unlink(self, fname):
572 572 self._checkknown(fname)
573 573 self.removed.add(fname)
574 574
575 575 def exists(self, fname):
576 576 return fname in self.ctx
577 577
578 578 def close(self):
579 579 return self.changed | self.removed
580 580
581 581 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
582 582 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
583 583 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
584 584 eolmodes = ['strict', 'crlf', 'lf', 'auto']
585 585
586 586 class patchfile(object):
587 587 def __init__(self, ui, gp, backend, store, eolmode='strict'):
588 588 self.fname = gp.path
589 589 self.eolmode = eolmode
590 590 self.eol = None
591 591 self.backend = backend
592 592 self.ui = ui
593 593 self.lines = []
594 594 self.exists = False
595 595 self.missing = True
596 596 self.mode = gp.mode
597 597 self.copysource = gp.oldpath
598 598 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
599 599 self.remove = gp.op == 'DELETE'
600 600 try:
601 601 if self.copysource is None:
602 602 data, mode = backend.getfile(self.fname)
603 603 self.exists = True
604 604 else:
605 605 data, mode = store.getfile(self.copysource)[:2]
606 606 self.exists = backend.exists(self.fname)
607 607 self.missing = False
608 608 if data:
609 609 self.lines = mdiff.splitnewlines(data)
610 610 if self.mode is None:
611 611 self.mode = mode
612 612 if self.lines:
613 613 # Normalize line endings
614 614 if self.lines[0].endswith('\r\n'):
615 615 self.eol = '\r\n'
616 616 elif self.lines[0].endswith('\n'):
617 617 self.eol = '\n'
618 618 if eolmode != 'strict':
619 619 nlines = []
620 620 for l in self.lines:
621 621 if l.endswith('\r\n'):
622 622 l = l[:-2] + '\n'
623 623 nlines.append(l)
624 624 self.lines = nlines
625 625 except IOError:
626 626 if self.create:
627 627 self.missing = False
628 628 if self.mode is None:
629 629 self.mode = (False, False)
630 630 if self.missing:
631 631 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
632 632
633 633 self.hash = {}
634 634 self.dirty = 0
635 635 self.offset = 0
636 636 self.skew = 0
637 637 self.rej = []
638 638 self.fileprinted = False
639 639 self.printfile(False)
640 640 self.hunks = 0
641 641
642 642 def writelines(self, fname, lines, mode):
643 643 if self.eolmode == 'auto':
644 644 eol = self.eol
645 645 elif self.eolmode == 'crlf':
646 646 eol = '\r\n'
647 647 else:
648 648 eol = '\n'
649 649
650 650 if self.eolmode != 'strict' and eol and eol != '\n':
651 651 rawlines = []
652 652 for l in lines:
653 653 if l and l[-1] == '\n':
654 654 l = l[:-1] + eol
655 655 rawlines.append(l)
656 656 lines = rawlines
657 657
658 658 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
659 659
660 660 def printfile(self, warn):
661 661 if self.fileprinted:
662 662 return
663 663 if warn or self.ui.verbose:
664 664 self.fileprinted = True
665 665 s = _("patching file %s\n") % self.fname
666 666 if warn:
667 667 self.ui.warn(s)
668 668 else:
669 669 self.ui.note(s)
670 670
671 671
672 672 def findlines(self, l, linenum):
673 673 # looks through the hash and finds candidate lines. The
674 674 # result is a list of line numbers sorted based on distance
675 675 # from linenum
676 676
677 677 cand = self.hash.get(l, [])
678 678 if len(cand) > 1:
679 679 # resort our list of potentials forward then back.
680 680 cand.sort(key=lambda x: abs(x - linenum))
681 681 return cand
682 682
683 683 def write_rej(self):
684 684 # our rejects are a little different from patch(1). This always
685 685 # creates rejects in the same form as the original patch. A file
686 686 # header is inserted so that you can run the reject through patch again
687 687 # without having to type the filename.
688 688 if not self.rej:
689 689 return
690 690 base = os.path.basename(self.fname)
691 691 lines = ["--- %s\n+++ %s\n" % (base, base)]
692 692 for x in self.rej:
693 693 for l in x.hunk:
694 694 lines.append(l)
695 695 if l[-1] != '\n':
696 696 lines.append("\n\ No newline at end of file\n")
697 697 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
698 698
699 699 def apply(self, h):
700 700 if not h.complete():
701 701 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
702 702 (h.number, h.desc, len(h.a), h.lena, len(h.b),
703 703 h.lenb))
704 704
705 705 self.hunks += 1
706 706
707 707 if self.missing:
708 708 self.rej.append(h)
709 709 return -1
710 710
711 711 if self.exists and self.create:
712 712 if self.copysource:
713 713 self.ui.warn(_("cannot create %s: destination already "
714 714 "exists\n") % self.fname)
715 715 else:
716 716 self.ui.warn(_("file %s already exists\n") % self.fname)
717 717 self.rej.append(h)
718 718 return -1
719 719
720 720 if isinstance(h, binhunk):
721 721 if self.remove:
722 722 self.backend.unlink(self.fname)
723 723 else:
724 724 l = h.new(self.lines)
725 725 self.lines[:] = l
726 726 self.offset += len(l)
727 727 self.dirty = True
728 728 return 0
729 729
730 730 horig = h
731 731 if (self.eolmode in ('crlf', 'lf')
732 732 or self.eolmode == 'auto' and self.eol):
733 733 # If new eols are going to be normalized, then normalize
734 734 # hunk data before patching. Otherwise, preserve input
735 735 # line-endings.
736 736 h = h.getnormalized()
737 737
738 738 # fast case first, no offsets, no fuzz
739 739 old, oldstart, new, newstart = h.fuzzit(0, False)
740 740 oldstart += self.offset
741 741 orig_start = oldstart
742 742 # if there's skew we want to emit the "(offset %d lines)" even
743 743 # when the hunk cleanly applies at start + skew, so skip the
744 744 # fast case code
745 745 if (self.skew == 0 and
746 746 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
747 747 if self.remove:
748 748 self.backend.unlink(self.fname)
749 749 else:
750 750 self.lines[oldstart:oldstart + len(old)] = new
751 751 self.offset += len(new) - len(old)
752 752 self.dirty = True
753 753 return 0
754 754
755 755 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
756 756 self.hash = {}
757 757 for x, s in enumerate(self.lines):
758 758 self.hash.setdefault(s, []).append(x)
759 759
760 760 for fuzzlen in xrange(3):
761 761 for toponly in [True, False]:
762 762 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
763 763 oldstart = oldstart + self.offset + self.skew
764 764 oldstart = min(oldstart, len(self.lines))
765 765 if old:
766 766 cand = self.findlines(old[0][1:], oldstart)
767 767 else:
768 768 # Only adding lines with no or fuzzed context, just
769 769 # take the skew in account
770 770 cand = [oldstart]
771 771
772 772 for l in cand:
773 773 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
774 774 self.lines[l : l + len(old)] = new
775 775 self.offset += len(new) - len(old)
776 776 self.skew = l - orig_start
777 777 self.dirty = True
778 778 offset = l - orig_start - fuzzlen
779 779 if fuzzlen:
780 780 msg = _("Hunk #%d succeeded at %d "
781 781 "with fuzz %d "
782 782 "(offset %d lines).\n")
783 783 self.printfile(True)
784 784 self.ui.warn(msg %
785 785 (h.number, l + 1, fuzzlen, offset))
786 786 else:
787 787 msg = _("Hunk #%d succeeded at %d "
788 788 "(offset %d lines).\n")
789 789 self.ui.note(msg % (h.number, l + 1, offset))
790 790 return fuzzlen
791 791 self.printfile(True)
792 792 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
793 793 self.rej.append(horig)
794 794 return -1
795 795
796 796 def close(self):
797 797 if self.dirty:
798 798 self.writelines(self.fname, self.lines, self.mode)
799 799 self.write_rej()
800 800 return len(self.rej)
801 801
802 802 class hunk(object):
803 803 def __init__(self, desc, num, lr, context):
804 804 self.number = num
805 805 self.desc = desc
806 806 self.hunk = [desc]
807 807 self.a = []
808 808 self.b = []
809 809 self.starta = self.lena = None
810 810 self.startb = self.lenb = None
811 811 if lr is not None:
812 812 if context:
813 813 self.read_context_hunk(lr)
814 814 else:
815 815 self.read_unified_hunk(lr)
816 816
817 817 def getnormalized(self):
818 818 """Return a copy with line endings normalized to LF."""
819 819
820 820 def normalize(lines):
821 821 nlines = []
822 822 for line in lines:
823 823 if line.endswith('\r\n'):
824 824 line = line[:-2] + '\n'
825 825 nlines.append(line)
826 826 return nlines
827 827
828 828 # Dummy object, it is rebuilt manually
829 829 nh = hunk(self.desc, self.number, None, None)
830 830 nh.number = self.number
831 831 nh.desc = self.desc
832 832 nh.hunk = self.hunk
833 833 nh.a = normalize(self.a)
834 834 nh.b = normalize(self.b)
835 835 nh.starta = self.starta
836 836 nh.startb = self.startb
837 837 nh.lena = self.lena
838 838 nh.lenb = self.lenb
839 839 return nh
840 840
841 841 def read_unified_hunk(self, lr):
842 842 m = unidesc.match(self.desc)
843 843 if not m:
844 844 raise PatchError(_("bad hunk #%d") % self.number)
845 845 self.starta, self.lena, self.startb, self.lenb = m.groups()
846 846 if self.lena is None:
847 847 self.lena = 1
848 848 else:
849 849 self.lena = int(self.lena)
850 850 if self.lenb is None:
851 851 self.lenb = 1
852 852 else:
853 853 self.lenb = int(self.lenb)
854 854 self.starta = int(self.starta)
855 855 self.startb = int(self.startb)
856 856 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
857 857 self.b)
858 858 # if we hit eof before finishing out the hunk, the last line will
859 859 # be zero length. Lets try to fix it up.
860 860 while len(self.hunk[-1]) == 0:
861 861 del self.hunk[-1]
862 862 del self.a[-1]
863 863 del self.b[-1]
864 864 self.lena -= 1
865 865 self.lenb -= 1
866 866 self._fixnewline(lr)
867 867
868 868 def read_context_hunk(self, lr):
869 869 self.desc = lr.readline()
870 870 m = contextdesc.match(self.desc)
871 871 if not m:
872 872 raise PatchError(_("bad hunk #%d") % self.number)
873 873 self.starta, aend = m.groups()
874 874 self.starta = int(self.starta)
875 875 if aend is None:
876 876 aend = self.starta
877 877 self.lena = int(aend) - self.starta
878 878 if self.starta:
879 879 self.lena += 1
880 880 for x in xrange(self.lena):
881 881 l = lr.readline()
882 882 if l.startswith('---'):
883 883 # lines addition, old block is empty
884 884 lr.push(l)
885 885 break
886 886 s = l[2:]
887 887 if l.startswith('- ') or l.startswith('! '):
888 888 u = '-' + s
889 889 elif l.startswith(' '):
890 890 u = ' ' + s
891 891 else:
892 892 raise PatchError(_("bad hunk #%d old text line %d") %
893 893 (self.number, x))
894 894 self.a.append(u)
895 895 self.hunk.append(u)
896 896
897 897 l = lr.readline()
898 898 if l.startswith('\ '):
899 899 s = self.a[-1][:-1]
900 900 self.a[-1] = s
901 901 self.hunk[-1] = s
902 902 l = lr.readline()
903 903 m = contextdesc.match(l)
904 904 if not m:
905 905 raise PatchError(_("bad hunk #%d") % self.number)
906 906 self.startb, bend = m.groups()
907 907 self.startb = int(self.startb)
908 908 if bend is None:
909 909 bend = self.startb
910 910 self.lenb = int(bend) - self.startb
911 911 if self.startb:
912 912 self.lenb += 1
913 913 hunki = 1
914 914 for x in xrange(self.lenb):
915 915 l = lr.readline()
916 916 if l.startswith('\ '):
917 917 # XXX: the only way to hit this is with an invalid line range.
918 918 # The no-eol marker is not counted in the line range, but I
919 919 # guess there are diff(1) out there which behave differently.
920 920 s = self.b[-1][:-1]
921 921 self.b[-1] = s
922 922 self.hunk[hunki - 1] = s
923 923 continue
924 924 if not l:
925 925 # line deletions, new block is empty and we hit EOF
926 926 lr.push(l)
927 927 break
928 928 s = l[2:]
929 929 if l.startswith('+ ') or l.startswith('! '):
930 930 u = '+' + s
931 931 elif l.startswith(' '):
932 932 u = ' ' + s
933 933 elif len(self.b) == 0:
934 934 # line deletions, new block is empty
935 935 lr.push(l)
936 936 break
937 937 else:
938 938 raise PatchError(_("bad hunk #%d old text line %d") %
939 939 (self.number, x))
940 940 self.b.append(s)
941 941 while True:
942 942 if hunki >= len(self.hunk):
943 943 h = ""
944 944 else:
945 945 h = self.hunk[hunki]
946 946 hunki += 1
947 947 if h == u:
948 948 break
949 949 elif h.startswith('-'):
950 950 continue
951 951 else:
952 952 self.hunk.insert(hunki - 1, u)
953 953 break
954 954
955 955 if not self.a:
956 956 # this happens when lines were only added to the hunk
957 957 for x in self.hunk:
958 958 if x.startswith('-') or x.startswith(' '):
959 959 self.a.append(x)
960 960 if not self.b:
961 961 # this happens when lines were only deleted from the hunk
962 962 for x in self.hunk:
963 963 if x.startswith('+') or x.startswith(' '):
964 964 self.b.append(x[1:])
965 965 # @@ -start,len +start,len @@
966 966 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
967 967 self.startb, self.lenb)
968 968 self.hunk[0] = self.desc
969 969 self._fixnewline(lr)
970 970
971 971 def _fixnewline(self, lr):
972 972 l = lr.readline()
973 973 if l.startswith('\ '):
974 974 diffhelpers.fix_newline(self.hunk, self.a, self.b)
975 975 else:
976 976 lr.push(l)
977 977
978 978 def complete(self):
979 979 return len(self.a) == self.lena and len(self.b) == self.lenb
980 980
981 981 def _fuzzit(self, old, new, fuzz, toponly):
982 982 # this removes context lines from the top and bottom of list 'l'. It
983 983 # checks the hunk to make sure only context lines are removed, and then
984 984 # returns a new shortened list of lines.
985 985 fuzz = min(fuzz, len(old))
986 986 if fuzz:
987 987 top = 0
988 988 bot = 0
989 989 hlen = len(self.hunk)
990 990 for x in xrange(hlen - 1):
991 991 # the hunk starts with the @@ line, so use x+1
992 992 if self.hunk[x + 1][0] == ' ':
993 993 top += 1
994 994 else:
995 995 break
996 996 if not toponly:
997 997 for x in xrange(hlen - 1):
998 998 if self.hunk[hlen - bot - 1][0] == ' ':
999 999 bot += 1
1000 1000 else:
1001 1001 break
1002 1002
1003 1003 bot = min(fuzz, bot)
1004 1004 top = min(fuzz, top)
1005 1005 return old[top:len(old) - bot], new[top:len(new) - bot], top
1006 1006 return old, new, 0
1007 1007
1008 1008 def fuzzit(self, fuzz, toponly):
1009 1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1010 1010 oldstart = self.starta + top
1011 1011 newstart = self.startb + top
1012 1012 # zero length hunk ranges already have their start decremented
1013 1013 if self.lena and oldstart > 0:
1014 1014 oldstart -= 1
1015 1015 if self.lenb and newstart > 0:
1016 1016 newstart -= 1
1017 1017 return old, oldstart, new, newstart
1018 1018
1019 1019 class binhunk(object):
1020 1020 'A binary patch file.'
1021 1021 def __init__(self, lr, fname):
1022 1022 self.text = None
1023 1023 self.delta = False
1024 1024 self.hunk = ['GIT binary patch\n']
1025 1025 self._fname = fname
1026 1026 self._read(lr)
1027 1027
1028 1028 def complete(self):
1029 1029 return self.text is not None
1030 1030
1031 1031 def new(self, lines):
1032 1032 if self.delta:
1033 1033 return [applybindelta(self.text, ''.join(lines))]
1034 1034 return [self.text]
1035 1035
1036 1036 def _read(self, lr):
1037 1037 def getline(lr, hunk):
1038 1038 l = lr.readline()
1039 1039 hunk.append(l)
1040 1040 return l.rstrip('\r\n')
1041 1041
1042 1042 size = 0
1043 1043 while True:
1044 1044 line = getline(lr, self.hunk)
1045 1045 if not line:
1046 1046 raise PatchError(_('could not extract "%s" binary data')
1047 1047 % self._fname)
1048 1048 if line.startswith('literal '):
1049 1049 size = int(line[8:].rstrip())
1050 1050 break
1051 1051 if line.startswith('delta '):
1052 1052 size = int(line[6:].rstrip())
1053 1053 self.delta = True
1054 1054 break
1055 1055 dec = []
1056 1056 line = getline(lr, self.hunk)
1057 1057 while len(line) > 1:
1058 1058 l = line[0]
1059 1059 if l <= 'Z' and l >= 'A':
1060 1060 l = ord(l) - ord('A') + 1
1061 1061 else:
1062 1062 l = ord(l) - ord('a') + 27
1063 1063 try:
1064 1064 dec.append(base85.b85decode(line[1:])[:l])
1065 1065 except ValueError, e:
1066 1066 raise PatchError(_('could not decode "%s" binary patch: %s')
1067 1067 % (self._fname, str(e)))
1068 1068 line = getline(lr, self.hunk)
1069 1069 text = zlib.decompress(''.join(dec))
1070 1070 if len(text) != size:
1071 1071 raise PatchError(_('"%s" length is %d bytes, should be %d')
1072 1072 % (self._fname, len(text), size))
1073 1073 self.text = text
1074 1074
1075 1075 def parsefilename(str):
1076 1076 # --- filename \t|space stuff
1077 1077 s = str[4:].rstrip('\r\n')
1078 1078 i = s.find('\t')
1079 1079 if i < 0:
1080 1080 i = s.find(' ')
1081 1081 if i < 0:
1082 1082 return s
1083 1083 return s[:i]
1084 1084
1085 1085 def pathstrip(path, strip):
1086 1086 pathlen = len(path)
1087 1087 i = 0
1088 1088 if strip == 0:
1089 1089 return '', path.rstrip()
1090 1090 count = strip
1091 1091 while count > 0:
1092 1092 i = path.find('/', i)
1093 1093 if i == -1:
1094 1094 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1095 1095 (count, strip, path))
1096 1096 i += 1
1097 1097 # consume '//' in the path
1098 1098 while i < pathlen - 1 and path[i] == '/':
1099 1099 i += 1
1100 1100 count -= 1
1101 1101 return path[:i].lstrip(), path[i:].rstrip()
1102 1102
1103 1103 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1104 1104 nulla = afile_orig == "/dev/null"
1105 1105 nullb = bfile_orig == "/dev/null"
1106 1106 create = nulla and hunk.starta == 0 and hunk.lena == 0
1107 1107 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1108 1108 abase, afile = pathstrip(afile_orig, strip)
1109 1109 gooda = not nulla and backend.exists(afile)
1110 1110 bbase, bfile = pathstrip(bfile_orig, strip)
1111 1111 if afile == bfile:
1112 1112 goodb = gooda
1113 1113 else:
1114 1114 goodb = not nullb and backend.exists(bfile)
1115 1115 missing = not goodb and not gooda and not create
1116 1116
1117 1117 # some diff programs apparently produce patches where the afile is
1118 1118 # not /dev/null, but afile starts with bfile
1119 1119 abasedir = afile[:afile.rfind('/') + 1]
1120 1120 bbasedir = bfile[:bfile.rfind('/') + 1]
1121 1121 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1122 1122 and hunk.starta == 0 and hunk.lena == 0):
1123 1123 create = True
1124 1124 missing = False
1125 1125
1126 1126 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1127 1127 # diff is between a file and its backup. In this case, the original
1128 1128 # file should be patched (see original mpatch code).
1129 1129 isbackup = (abase == bbase and bfile.startswith(afile))
1130 1130 fname = None
1131 1131 if not missing:
1132 1132 if gooda and goodb:
1133 1133 fname = isbackup and afile or bfile
1134 1134 elif gooda:
1135 1135 fname = afile
1136 1136
1137 1137 if not fname:
1138 1138 if not nullb:
1139 1139 fname = isbackup and afile or bfile
1140 1140 elif not nulla:
1141 1141 fname = afile
1142 1142 else:
1143 1143 raise PatchError(_("undefined source and destination files"))
1144 1144
1145 1145 gp = patchmeta(fname)
1146 1146 if create:
1147 1147 gp.op = 'ADD'
1148 1148 elif remove:
1149 1149 gp.op = 'DELETE'
1150 1150 return gp
1151 1151
1152 1152 def scangitpatch(lr, firstline):
1153 1153 """
1154 1154 Git patches can emit:
1155 1155 - rename a to b
1156 1156 - change b
1157 1157 - copy a to c
1158 1158 - change c
1159 1159
1160 1160 We cannot apply this sequence as-is, the renamed 'a' could not be
1161 1161 found for it would have been renamed already. And we cannot copy
1162 1162 from 'b' instead because 'b' would have been changed already. So
1163 1163 we scan the git patch for copy and rename commands so we can
1164 1164 perform the copies ahead of time.
1165 1165 """
1166 1166 pos = 0
1167 1167 try:
1168 1168 pos = lr.fp.tell()
1169 1169 fp = lr.fp
1170 1170 except IOError:
1171 1171 fp = cStringIO.StringIO(lr.fp.read())
1172 1172 gitlr = linereader(fp)
1173 1173 gitlr.push(firstline)
1174 1174 gitpatches = readgitpatch(gitlr)
1175 1175 fp.seek(pos)
1176 1176 return gitpatches
1177 1177
1178 1178 def iterhunks(fp):
1179 1179 """Read a patch and yield the following events:
1180 1180 - ("file", afile, bfile, firsthunk): select a new target file.
1181 1181 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1182 1182 "file" event.
1183 1183 - ("git", gitchanges): current diff is in git format, gitchanges
1184 1184 maps filenames to gitpatch records. Unique event.
1185 1185 """
1186 1186 afile = ""
1187 1187 bfile = ""
1188 1188 state = None
1189 1189 hunknum = 0
1190 1190 emitfile = newfile = False
1191 1191 gitpatches = None
1192 1192
1193 1193 # our states
1194 1194 BFILE = 1
1195 1195 context = None
1196 1196 lr = linereader(fp)
1197 1197
1198 1198 while True:
1199 1199 x = lr.readline()
1200 1200 if not x:
1201 1201 break
1202 1202 if state == BFILE and (
1203 1203 (not context and x[0] == '@')
1204 1204 or (context is not False and x.startswith('***************'))
1205 1205 or x.startswith('GIT binary patch')):
1206 1206 gp = None
1207 1207 if (gitpatches and
1208 1208 gitpatches[-1].ispatching(afile, bfile)):
1209 1209 gp = gitpatches.pop()
1210 1210 if x.startswith('GIT binary patch'):
1211 1211 h = binhunk(lr, gp.path)
1212 1212 else:
1213 1213 if context is None and x.startswith('***************'):
1214 1214 context = True
1215 1215 h = hunk(x, hunknum + 1, lr, context)
1216 1216 hunknum += 1
1217 1217 if emitfile:
1218 1218 emitfile = False
1219 1219 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1220 1220 yield 'hunk', h
1221 1221 elif x.startswith('diff --git a/'):
1222 1222 m = gitre.match(x.rstrip(' \r\n'))
1223 1223 if not m:
1224 1224 continue
1225 1225 if gitpatches is None:
1226 1226 # scan whole input for git metadata
1227 1227 gitpatches = scangitpatch(lr, x)
1228 1228 yield 'git', [g.copy() for g in gitpatches
1229 1229 if g.op in ('COPY', 'RENAME')]
1230 1230 gitpatches.reverse()
1231 1231 afile = 'a/' + m.group(1)
1232 1232 bfile = 'b/' + m.group(2)
1233 1233 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1234 1234 gp = gitpatches.pop()
1235 1235 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1236 1236 if not gitpatches:
1237 1237 raise PatchError(_('failed to synchronize metadata for "%s"')
1238 1238 % afile[2:])
1239 1239 gp = gitpatches[-1]
1240 1240 newfile = True
1241 1241 elif x.startswith('---'):
1242 1242 # check for a unified diff
1243 1243 l2 = lr.readline()
1244 1244 if not l2.startswith('+++'):
1245 1245 lr.push(l2)
1246 1246 continue
1247 1247 newfile = True
1248 1248 context = False
1249 1249 afile = parsefilename(x)
1250 1250 bfile = parsefilename(l2)
1251 1251 elif x.startswith('***'):
1252 1252 # check for a context diff
1253 1253 l2 = lr.readline()
1254 1254 if not l2.startswith('---'):
1255 1255 lr.push(l2)
1256 1256 continue
1257 1257 l3 = lr.readline()
1258 1258 lr.push(l3)
1259 1259 if not l3.startswith("***************"):
1260 1260 lr.push(l2)
1261 1261 continue
1262 1262 newfile = True
1263 1263 context = True
1264 1264 afile = parsefilename(x)
1265 1265 bfile = parsefilename(l2)
1266 1266
1267 1267 if newfile:
1268 1268 newfile = False
1269 1269 emitfile = True
1270 1270 state = BFILE
1271 1271 hunknum = 0
1272 1272
1273 1273 while gitpatches:
1274 1274 gp = gitpatches.pop()
1275 1275 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1276 1276
1277 1277 def applybindelta(binchunk, data):
1278 1278 """Apply a binary delta hunk
1279 1279 The algorithm used is the algorithm from git's patch-delta.c
1280 1280 """
1281 1281 def deltahead(binchunk):
1282 1282 i = 0
1283 1283 for c in binchunk:
1284 1284 i += 1
1285 1285 if not (ord(c) & 0x80):
1286 1286 return i
1287 1287 return i
1288 1288 out = ""
1289 1289 s = deltahead(binchunk)
1290 1290 binchunk = binchunk[s:]
1291 1291 s = deltahead(binchunk)
1292 1292 binchunk = binchunk[s:]
1293 1293 i = 0
1294 1294 while i < len(binchunk):
1295 1295 cmd = ord(binchunk[i])
1296 1296 i += 1
1297 1297 if (cmd & 0x80):
1298 1298 offset = 0
1299 1299 size = 0
1300 1300 if (cmd & 0x01):
1301 1301 offset = ord(binchunk[i])
1302 1302 i += 1
1303 1303 if (cmd & 0x02):
1304 1304 offset |= ord(binchunk[i]) << 8
1305 1305 i += 1
1306 1306 if (cmd & 0x04):
1307 1307 offset |= ord(binchunk[i]) << 16
1308 1308 i += 1
1309 1309 if (cmd & 0x08):
1310 1310 offset |= ord(binchunk[i]) << 24
1311 1311 i += 1
1312 1312 if (cmd & 0x10):
1313 1313 size = ord(binchunk[i])
1314 1314 i += 1
1315 1315 if (cmd & 0x20):
1316 1316 size |= ord(binchunk[i]) << 8
1317 1317 i += 1
1318 1318 if (cmd & 0x40):
1319 1319 size |= ord(binchunk[i]) << 16
1320 1320 i += 1
1321 1321 if size == 0:
1322 1322 size = 0x10000
1323 1323 offset_end = offset + size
1324 1324 out += data[offset:offset_end]
1325 1325 elif cmd != 0:
1326 1326 offset_end = i + cmd
1327 1327 out += binchunk[i:offset_end]
1328 1328 i += cmd
1329 1329 else:
1330 1330 raise PatchError(_('unexpected delta opcode 0'))
1331 1331 return out
1332 1332
1333 1333 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1334 1334 """Reads a patch from fp and tries to apply it.
1335 1335
1336 1336 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1337 1337 there was any fuzz.
1338 1338
1339 1339 If 'eolmode' is 'strict', the patch content and patched file are
1340 1340 read in binary mode. Otherwise, line endings are ignored when
1341 1341 patching then normalized according to 'eolmode'.
1342 1342 """
1343 1343 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1344 1344 eolmode=eolmode)
1345 1345
1346 1346 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1347 1347 eolmode='strict'):
1348 1348
1349 1349 def pstrip(p):
1350 1350 return pathstrip(p, strip - 1)[1]
1351 1351
1352 1352 rejects = 0
1353 1353 err = 0
1354 1354 current_file = None
1355 1355
1356 1356 for state, values in iterhunks(fp):
1357 1357 if state == 'hunk':
1358 1358 if not current_file:
1359 1359 continue
1360 1360 ret = current_file.apply(values)
1361 1361 if ret > 0:
1362 1362 err = 1
1363 1363 elif state == 'file':
1364 1364 if current_file:
1365 1365 rejects += current_file.close()
1366 1366 current_file = None
1367 1367 afile, bfile, first_hunk, gp = values
1368 1368 if gp:
1369 1369 gp.path = pstrip(gp.path)
1370 1370 if gp.oldpath:
1371 1371 gp.oldpath = pstrip(gp.oldpath)
1372 1372 else:
1373 1373 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1374 1374 if gp.op == 'RENAME':
1375 1375 backend.unlink(gp.oldpath)
1376 1376 if not first_hunk:
1377 1377 if gp.op == 'DELETE':
1378 1378 backend.unlink(gp.path)
1379 1379 continue
1380 1380 data, mode = None, None
1381 1381 if gp.op in ('RENAME', 'COPY'):
1382 1382 data, mode = store.getfile(gp.oldpath)[:2]
1383 1383 if gp.mode:
1384 1384 mode = gp.mode
1385 1385 if gp.op == 'ADD':
1386 1386 # Added files without content have no hunk and
1387 1387 # must be created
1388 1388 data = ''
1389 1389 if data or mode:
1390 1390 if (gp.op in ('ADD', 'RENAME', 'COPY')
1391 1391 and backend.exists(gp.path)):
1392 1392 raise PatchError(_("cannot create %s: destination "
1393 1393 "already exists") % gp.path)
1394 1394 backend.setfile(gp.path, data, mode, gp.oldpath)
1395 1395 continue
1396 1396 try:
1397 1397 current_file = patcher(ui, gp, backend, store,
1398 1398 eolmode=eolmode)
1399 1399 except PatchError, inst:
1400 1400 ui.warn(str(inst) + '\n')
1401 1401 current_file = None
1402 1402 rejects += 1
1403 1403 continue
1404 1404 elif state == 'git':
1405 1405 for gp in values:
1406 1406 path = pstrip(gp.oldpath)
1407 1407 try:
1408 1408 data, mode = backend.getfile(path)
1409 1409 except IOError, e:
1410 1410 if e.errno != errno.ENOENT:
1411 1411 raise
1412 1412 # The error ignored here will trigger a getfile()
1413 1413 # error in a place more appropriate for error
1414 1414 # handling, and will not interrupt the patching
1415 1415 # process.
1416 1416 else:
1417 1417 store.setfile(path, data, mode)
1418 1418 else:
1419 1419 raise util.Abort(_('unsupported parser state: %s') % state)
1420 1420
1421 1421 if current_file:
1422 1422 rejects += current_file.close()
1423 1423
1424 1424 if rejects:
1425 1425 return -1
1426 1426 return err
1427 1427
1428 1428 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1429 1429 similarity):
1430 1430 """use <patcher> to apply <patchname> to the working directory.
1431 1431 returns whether patch was applied with fuzz factor."""
1432 1432
1433 1433 fuzz = False
1434 1434 args = []
1435 1435 cwd = repo.root
1436 1436 if cwd:
1437 1437 args.append('-d %s' % util.shellquote(cwd))
1438 1438 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1439 1439 util.shellquote(patchname)))
1440 1440 try:
1441 1441 for line in fp:
1442 1442 line = line.rstrip()
1443 1443 ui.note(line + '\n')
1444 1444 if line.startswith('patching file '):
1445 1445 pf = util.parsepatchoutput(line)
1446 1446 printed_file = False
1447 1447 files.add(pf)
1448 1448 elif line.find('with fuzz') >= 0:
1449 1449 fuzz = True
1450 1450 if not printed_file:
1451 1451 ui.warn(pf + '\n')
1452 1452 printed_file = True
1453 1453 ui.warn(line + '\n')
1454 1454 elif line.find('saving rejects to file') >= 0:
1455 1455 ui.warn(line + '\n')
1456 1456 elif line.find('FAILED') >= 0:
1457 1457 if not printed_file:
1458 1458 ui.warn(pf + '\n')
1459 1459 printed_file = True
1460 1460 ui.warn(line + '\n')
1461 1461 finally:
1462 1462 if files:
1463 1463 scmutil.marktouched(repo, files, similarity)
1464 1464 code = fp.close()
1465 1465 if code:
1466 1466 raise PatchError(_("patch command failed: %s") %
1467 1467 util.explainexit(code)[0])
1468 1468 return fuzz
1469 1469
1470 1470 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1471 1471 if files is None:
1472 1472 files = set()
1473 1473 if eolmode is None:
1474 1474 eolmode = ui.config('patch', 'eol', 'strict')
1475 1475 if eolmode.lower() not in eolmodes:
1476 1476 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1477 1477 eolmode = eolmode.lower()
1478 1478
1479 1479 store = filestore()
1480 1480 try:
1481 1481 fp = open(patchobj, 'rb')
1482 1482 except TypeError:
1483 1483 fp = patchobj
1484 1484 try:
1485 1485 ret = applydiff(ui, fp, backend, store, strip=strip,
1486 1486 eolmode=eolmode)
1487 1487 finally:
1488 1488 if fp != patchobj:
1489 1489 fp.close()
1490 1490 files.update(backend.close())
1491 1491 store.close()
1492 1492 if ret < 0:
1493 1493 raise PatchError(_('patch failed to apply'))
1494 1494 return ret > 0
1495 1495
1496 1496 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1497 1497 similarity=0):
1498 1498 """use builtin patch to apply <patchobj> to the working directory.
1499 1499 returns whether patch was applied with fuzz factor."""
1500 1500 backend = workingbackend(ui, repo, similarity)
1501 1501 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1502 1502
1503 1503 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1504 1504 eolmode='strict'):
1505 1505 backend = repobackend(ui, repo, ctx, store)
1506 1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1507 1507
1508 1508 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1509 1509 similarity=0):
1510 1510 """Apply <patchname> to the working directory.
1511 1511
1512 1512 'eolmode' specifies how end of lines should be handled. It can be:
1513 1513 - 'strict': inputs are read in binary mode, EOLs are preserved
1514 1514 - 'crlf': EOLs are ignored when patching and reset to CRLF
1515 1515 - 'lf': EOLs are ignored when patching and reset to LF
1516 1516 - None: get it from user settings, default to 'strict'
1517 1517 'eolmode' is ignored when using an external patcher program.
1518 1518
1519 1519 Returns whether patch was applied with fuzz factor.
1520 1520 """
1521 1521 patcher = ui.config('ui', 'patch')
1522 1522 if files is None:
1523 1523 files = set()
1524 1524 if patcher:
1525 1525 return _externalpatch(ui, repo, patcher, patchname, strip,
1526 1526 files, similarity)
1527 1527 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1528 1528 similarity)
1529 1529
1530 1530 def changedfiles(ui, repo, patchpath, strip=1):
1531 1531 backend = fsbackend(ui, repo.root)
1532 1532 fp = open(patchpath, 'rb')
1533 1533 try:
1534 1534 changed = set()
1535 1535 for state, values in iterhunks(fp):
1536 1536 if state == 'file':
1537 1537 afile, bfile, first_hunk, gp = values
1538 1538 if gp:
1539 1539 gp.path = pathstrip(gp.path, strip - 1)[1]
1540 1540 if gp.oldpath:
1541 1541 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1542 1542 else:
1543 1543 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1544 1544 changed.add(gp.path)
1545 1545 if gp.op == 'RENAME':
1546 1546 changed.add(gp.oldpath)
1547 1547 elif state not in ('hunk', 'git'):
1548 1548 raise util.Abort(_('unsupported parser state: %s') % state)
1549 1549 return changed
1550 1550 finally:
1551 1551 fp.close()
1552 1552
1553 1553 class GitDiffRequired(Exception):
1554 1554 pass
1555 1555
1556 1556 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1557 1557 def get(key, name=None, getter=ui.configbool):
1558 1558 return ((opts and opts.get(key)) or
1559 1559 getter(section, name or key, None, untrusted=untrusted))
1560 1560 return mdiff.diffopts(
1561 1561 text=opts and opts.get('text'),
1562 1562 git=get('git'),
1563 1563 nodates=get('nodates'),
1564 1564 nobinary=get('nobinary'),
1565 1565 showfunc=get('show_function', 'showfunc'),
1566 1566 ignorews=get('ignore_all_space', 'ignorews'),
1567 1567 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1568 1568 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1569 1569 context=get('unified', getter=ui.config))
1570 1570
1571 1571 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1572 1572 losedatafn=None, prefix=''):
1573 1573 '''yields diff of changes to files between two nodes, or node and
1574 1574 working directory.
1575 1575
1576 1576 if node1 is None, use first dirstate parent instead.
1577 1577 if node2 is None, compare node1 with working directory.
1578 1578
1579 1579 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1580 1580 every time some change cannot be represented with the current
1581 1581 patch format. Return False to upgrade to git patch format, True to
1582 1582 accept the loss or raise an exception to abort the diff. It is
1583 1583 called with the name of current file being diffed as 'fn'. If set
1584 1584 to None, patches will always be upgraded to git format when
1585 1585 necessary.
1586 1586
1587 1587 prefix is a filename prefix that is prepended to all filenames on
1588 1588 display (used for subrepos).
1589 1589 '''
1590 1590
1591 1591 if opts is None:
1592 1592 opts = mdiff.defaultopts
1593 1593
1594 1594 if not node1 and not node2:
1595 1595 node1 = repo.dirstate.p1()
1596 1596
1597 1597 def lrugetfilectx():
1598 1598 cache = {}
1599 1599 order = util.deque()
1600 1600 def getfilectx(f, ctx):
1601 1601 fctx = ctx.filectx(f, filelog=cache.get(f))
1602 1602 if f not in cache:
1603 1603 if len(cache) > 20:
1604 1604 del cache[order.popleft()]
1605 1605 cache[f] = fctx.filelog()
1606 1606 else:
1607 1607 order.remove(f)
1608 1608 order.append(f)
1609 1609 return fctx
1610 1610 return getfilectx
1611 1611 getfilectx = lrugetfilectx()
1612 1612
1613 1613 ctx1 = repo[node1]
1614 1614 ctx2 = repo[node2]
1615 1615
1616 1616 if not changes:
1617 1617 changes = repo.status(ctx1, ctx2, match=match)
1618 1618 modified, added, removed = changes[:3]
1619 1619
1620 1620 if not modified and not added and not removed:
1621 1621 return []
1622 1622
1623 1623 revs = None
1624 1624 hexfunc = repo.ui.debugflag and hex or short
1625 revs = [hexfunc(node) for node in [node1, node2] if node]
1625 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1626 1626
1627 1627 copy = {}
1628 1628 if opts.git or opts.upgrade:
1629 1629 copy = copies.pathcopies(ctx1, ctx2)
1630 1630
1631 1631 def difffn(opts, losedata):
1632 1632 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1633 1633 copy, getfilectx, opts, losedata, prefix)
1634 1634 if opts.upgrade and not opts.git:
1635 1635 try:
1636 1636 def losedata(fn):
1637 1637 if not losedatafn or not losedatafn(fn=fn):
1638 1638 raise GitDiffRequired
1639 1639 # Buffer the whole output until we are sure it can be generated
1640 1640 return list(difffn(opts.copy(git=False), losedata))
1641 1641 except GitDiffRequired:
1642 1642 return difffn(opts.copy(git=True), None)
1643 1643 else:
1644 1644 return difffn(opts, None)
1645 1645
1646 1646 def difflabel(func, *args, **kw):
1647 1647 '''yields 2-tuples of (output, label) based on the output of func()'''
1648 1648 headprefixes = [('diff', 'diff.diffline'),
1649 1649 ('copy', 'diff.extended'),
1650 1650 ('rename', 'diff.extended'),
1651 1651 ('old', 'diff.extended'),
1652 1652 ('new', 'diff.extended'),
1653 1653 ('deleted', 'diff.extended'),
1654 1654 ('---', 'diff.file_a'),
1655 1655 ('+++', 'diff.file_b')]
1656 1656 textprefixes = [('@', 'diff.hunk'),
1657 1657 ('-', 'diff.deleted'),
1658 1658 ('+', 'diff.inserted')]
1659 1659 head = False
1660 1660 for chunk in func(*args, **kw):
1661 1661 lines = chunk.split('\n')
1662 1662 for i, line in enumerate(lines):
1663 1663 if i != 0:
1664 1664 yield ('\n', '')
1665 1665 if head:
1666 1666 if line.startswith('@'):
1667 1667 head = False
1668 1668 else:
1669 1669 if line and line[0] not in ' +-@\\':
1670 1670 head = True
1671 1671 stripline = line
1672 1672 if not head and line and line[0] in '+-':
1673 1673 # highlight trailing whitespace, but only in changed lines
1674 1674 stripline = line.rstrip()
1675 1675 prefixes = textprefixes
1676 1676 if head:
1677 1677 prefixes = headprefixes
1678 1678 for prefix, label in prefixes:
1679 1679 if stripline.startswith(prefix):
1680 1680 yield (stripline, label)
1681 1681 break
1682 1682 else:
1683 1683 yield (line, '')
1684 1684 if line != stripline:
1685 1685 yield (line[len(stripline):], 'diff.trailingwhitespace')
1686 1686
1687 1687 def diffui(*args, **kw):
1688 1688 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1689 1689 return difflabel(diff, *args, **kw)
1690 1690
1691 1691 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1692 1692 copy, getfilectx, opts, losedatafn, prefix):
1693 1693
1694 1694 def join(f):
1695 1695 return posixpath.join(prefix, f)
1696 1696
1697 1697 def addmodehdr(header, omode, nmode):
1698 1698 if omode != nmode:
1699 1699 header.append('old mode %s\n' % omode)
1700 1700 header.append('new mode %s\n' % nmode)
1701 1701
1702 1702 def addindexmeta(meta, revs):
1703 1703 if opts.git:
1704 1704 i = len(revs)
1705 1705 if i==2:
1706 1706 meta.append('index %s..%s\n' % tuple(revs))
1707 1707 elif i==3:
1708 1708 meta.append('index %s,%s..%s\n' % tuple(revs))
1709 1709
1710 1710 def gitindex(text):
1711 1711 if not text:
1712 1712 text = ""
1713 1713 l = len(text)
1714 1714 s = util.sha1('blob %d\0' % l)
1715 1715 s.update(text)
1716 1716 return s.hexdigest()
1717 1717
1718 1718 def diffline(a, b, revs):
1719 1719 if opts.git:
1720 1720 line = 'diff --git a/%s b/%s\n' % (a, b)
1721 1721 elif not repo.ui.quiet:
1722 1722 if revs:
1723 1723 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1724 1724 line = 'diff %s %s\n' % (revinfo, a)
1725 1725 else:
1726 1726 line = 'diff %s\n' % a
1727 1727 else:
1728 1728 line = ''
1729 1729 return line
1730 1730
1731 1731 date1 = util.datestr(ctx1.date())
1732 1732 man1 = ctx1.manifest()
1733 1733
1734 1734 gone = set()
1735 1735 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1736 1736
1737 1737 copyto = dict([(v, k) for k, v in copy.items()])
1738 1738
1739 1739 if opts.git:
1740 1740 revs = None
1741 1741
1742 1742 for f in sorted(modified + added + removed):
1743 1743 to = None
1744 1744 tn = None
1745 1745 dodiff = True
1746 1746 header = []
1747 1747 if f in man1:
1748 1748 to = getfilectx(f, ctx1).data()
1749 1749 if f not in removed:
1750 1750 tn = getfilectx(f, ctx2).data()
1751 1751 a, b = f, f
1752 1752 if opts.git or losedatafn:
1753 1753 if f in added or (f in modified and to is None):
1754 1754 mode = gitmode[ctx2.flags(f)]
1755 1755 if f in copy or f in copyto:
1756 1756 if opts.git:
1757 1757 if f in copy:
1758 1758 a = copy[f]
1759 1759 else:
1760 1760 a = copyto[f]
1761 1761 omode = gitmode[man1.flags(a)]
1762 1762 addmodehdr(header, omode, mode)
1763 1763 if a in removed and a not in gone:
1764 1764 op = 'rename'
1765 1765 gone.add(a)
1766 1766 else:
1767 1767 op = 'copy'
1768 1768 header.append('%s from %s\n' % (op, join(a)))
1769 1769 header.append('%s to %s\n' % (op, join(f)))
1770 1770 to = getfilectx(a, ctx1).data()
1771 1771 else:
1772 1772 losedatafn(f)
1773 1773 else:
1774 1774 if opts.git:
1775 1775 header.append('new file mode %s\n' % mode)
1776 1776 elif ctx2.flags(f):
1777 1777 losedatafn(f)
1778 1778 # In theory, if tn was copied or renamed we should check
1779 1779 # if the source is binary too but the copy record already
1780 1780 # forces git mode.
1781 1781 if util.binary(tn):
1782 1782 if opts.git:
1783 1783 dodiff = 'binary'
1784 1784 else:
1785 1785 losedatafn(f)
1786 1786 if not opts.git and not tn:
1787 1787 # regular diffs cannot represent new empty file
1788 1788 losedatafn(f)
1789 1789 elif f in removed or (f in modified and tn is None):
1790 1790 if opts.git:
1791 1791 # have we already reported a copy above?
1792 1792 if ((f in copy and copy[f] in added
1793 1793 and copyto[copy[f]] == f) or
1794 1794 (f in copyto and copyto[f] in added
1795 1795 and copy[copyto[f]] == f)):
1796 1796 dodiff = False
1797 1797 else:
1798 1798 header.append('deleted file mode %s\n' %
1799 1799 gitmode[man1.flags(f)])
1800 1800 if util.binary(to):
1801 1801 dodiff = 'binary'
1802 1802 elif not to or util.binary(to):
1803 1803 # regular diffs cannot represent empty file deletion
1804 1804 losedatafn(f)
1805 1805 else:
1806 1806 oflag = man1.flags(f)
1807 1807 nflag = ctx2.flags(f)
1808 1808 binary = util.binary(to) or util.binary(tn)
1809 1809 if opts.git:
1810 1810 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1811 1811 if binary:
1812 1812 dodiff = 'binary'
1813 1813 elif binary or nflag != oflag:
1814 1814 losedatafn(f)
1815 1815
1816 1816 if dodiff:
1817 1817 if opts.git or revs:
1818 1818 header.insert(0, diffline(join(a), join(b), revs))
1819 1819 if dodiff == 'binary' and not opts.nobinary:
1820 1820 text = mdiff.b85diff(to, tn)
1821 1821 if text:
1822 1822 addindexmeta(header, [gitindex(to), gitindex(tn)])
1823 1823 else:
1824 1824 text = mdiff.unidiff(to, date1,
1825 1825 # ctx2 date may be dynamic
1826 1826 tn, util.datestr(ctx2.date()),
1827 1827 join(a), join(b), opts=opts)
1828 1828 if header and (text or len(header) > 1):
1829 1829 yield ''.join(header)
1830 1830 if text:
1831 1831 yield text
1832 1832
1833 1833 def diffstatsum(stats):
1834 1834 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1835 1835 for f, a, r, b in stats:
1836 1836 maxfile = max(maxfile, encoding.colwidth(f))
1837 1837 maxtotal = max(maxtotal, a + r)
1838 1838 addtotal += a
1839 1839 removetotal += r
1840 1840 binary = binary or b
1841 1841
1842 1842 return maxfile, maxtotal, addtotal, removetotal, binary
1843 1843
1844 1844 def diffstatdata(lines):
1845 1845 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1846 1846
1847 1847 results = []
1848 1848 filename, adds, removes, isbinary = None, 0, 0, False
1849 1849
1850 1850 def addresult():
1851 1851 if filename:
1852 1852 results.append((filename, adds, removes, isbinary))
1853 1853
1854 1854 for line in lines:
1855 1855 if line.startswith('diff'):
1856 1856 addresult()
1857 1857 # set numbers to 0 anyway when starting new file
1858 1858 adds, removes, isbinary = 0, 0, False
1859 1859 if line.startswith('diff --git a/'):
1860 1860 filename = gitre.search(line).group(2)
1861 1861 elif line.startswith('diff -r'):
1862 1862 # format: "diff -r ... -r ... filename"
1863 1863 filename = diffre.search(line).group(1)
1864 1864 elif line.startswith('+') and not line.startswith('+++ '):
1865 1865 adds += 1
1866 1866 elif line.startswith('-') and not line.startswith('--- '):
1867 1867 removes += 1
1868 1868 elif (line.startswith('GIT binary patch') or
1869 1869 line.startswith('Binary file')):
1870 1870 isbinary = True
1871 1871 addresult()
1872 1872 return results
1873 1873
1874 1874 def diffstat(lines, width=80, git=False):
1875 1875 output = []
1876 1876 stats = diffstatdata(lines)
1877 1877 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1878 1878
1879 1879 countwidth = len(str(maxtotal))
1880 1880 if hasbinary and countwidth < 3:
1881 1881 countwidth = 3
1882 1882 graphwidth = width - countwidth - maxname - 6
1883 1883 if graphwidth < 10:
1884 1884 graphwidth = 10
1885 1885
1886 1886 def scale(i):
1887 1887 if maxtotal <= graphwidth:
1888 1888 return i
1889 1889 # If diffstat runs out of room it doesn't print anything,
1890 1890 # which isn't very useful, so always print at least one + or -
1891 1891 # if there were at least some changes.
1892 1892 return max(i * graphwidth // maxtotal, int(bool(i)))
1893 1893
1894 1894 for filename, adds, removes, isbinary in stats:
1895 1895 if isbinary:
1896 1896 count = 'Bin'
1897 1897 else:
1898 1898 count = adds + removes
1899 1899 pluses = '+' * scale(adds)
1900 1900 minuses = '-' * scale(removes)
1901 1901 output.append(' %s%s | %*s %s%s\n' %
1902 1902 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1903 1903 countwidth, count, pluses, minuses))
1904 1904
1905 1905 if stats:
1906 1906 output.append(_(' %d files changed, %d insertions(+), '
1907 1907 '%d deletions(-)\n')
1908 1908 % (len(stats), totaladds, totalremoves))
1909 1909
1910 1910 return ''.join(output)
1911 1911
1912 1912 def diffstatui(*args, **kw):
1913 1913 '''like diffstat(), but yields 2-tuples of (output, label) for
1914 1914 ui.write()
1915 1915 '''
1916 1916
1917 1917 for line in diffstat(*args, **kw).splitlines():
1918 1918 if line and line[-1] in '+-':
1919 1919 name, graph = line.rsplit(' ', 1)
1920 1920 yield (name + ' ', '')
1921 1921 m = re.search(r'\++', graph)
1922 1922 if m:
1923 1923 yield (m.group(0), 'diffstat.inserted')
1924 1924 m = re.search(r'-+', graph)
1925 1925 if m:
1926 1926 yield (m.group(0), 'diffstat.deleted')
1927 1927 else:
1928 1928 yield (line, '')
1929 1929 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now