##// END OF EJS Templates
diff: pass the diff matcher to the copy logic...
Durham Goode -
r24783:a7f8e358 default
parent child Browse files
Show More
@@ -1,2464 +1,2464 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email, os, errno, re, posixpath, copy
10 10 import tempfile, zlib, shutil
11 11 # On python2.4 you have to import these by name or they fail to
12 12 # load. This was not a problem on Python 2.7.
13 13 import email.Generator
14 14 import email.Parser
15 15
16 16 from i18n import _
17 17 from node import hex, short
18 18 import cStringIO
19 19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
20 20 import pathutil
21 21
22 22 gitre = re.compile('diff --git a/(.*) b/(.*)')
23 23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
24 24
25 25 class PatchError(Exception):
26 26 pass
27 27
28 28
29 29 # public functions
30 30
31 31 def split(stream):
32 32 '''return an iterator of individual patches from a stream'''
33 33 def isheader(line, inheader):
34 34 if inheader and line[0] in (' ', '\t'):
35 35 # continuation
36 36 return True
37 37 if line[0] in (' ', '-', '+'):
38 38 # diff line - don't check for header pattern in there
39 39 return False
40 40 l = line.split(': ', 1)
41 41 return len(l) == 2 and ' ' not in l[0]
42 42
43 43 def chunk(lines):
44 44 return cStringIO.StringIO(''.join(lines))
45 45
46 46 def hgsplit(stream, cur):
47 47 inheader = True
48 48
49 49 for line in stream:
50 50 if not line.strip():
51 51 inheader = False
52 52 if not inheader and line.startswith('# HG changeset patch'):
53 53 yield chunk(cur)
54 54 cur = []
55 55 inheader = True
56 56
57 57 cur.append(line)
58 58
59 59 if cur:
60 60 yield chunk(cur)
61 61
62 62 def mboxsplit(stream, cur):
63 63 for line in stream:
64 64 if line.startswith('From '):
65 65 for c in split(chunk(cur[1:])):
66 66 yield c
67 67 cur = []
68 68
69 69 cur.append(line)
70 70
71 71 if cur:
72 72 for c in split(chunk(cur[1:])):
73 73 yield c
74 74
75 75 def mimesplit(stream, cur):
76 76 def msgfp(m):
77 77 fp = cStringIO.StringIO()
78 78 g = email.Generator.Generator(fp, mangle_from_=False)
79 79 g.flatten(m)
80 80 fp.seek(0)
81 81 return fp
82 82
83 83 for line in stream:
84 84 cur.append(line)
85 85 c = chunk(cur)
86 86
87 87 m = email.Parser.Parser().parse(c)
88 88 if not m.is_multipart():
89 89 yield msgfp(m)
90 90 else:
91 91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
92 92 for part in m.walk():
93 93 ct = part.get_content_type()
94 94 if ct not in ok_types:
95 95 continue
96 96 yield msgfp(part)
97 97
98 98 def headersplit(stream, cur):
99 99 inheader = False
100 100
101 101 for line in stream:
102 102 if not inheader and isheader(line, inheader):
103 103 yield chunk(cur)
104 104 cur = []
105 105 inheader = True
106 106 if inheader and not isheader(line, inheader):
107 107 inheader = False
108 108
109 109 cur.append(line)
110 110
111 111 if cur:
112 112 yield chunk(cur)
113 113
114 114 def remainder(cur):
115 115 yield chunk(cur)
116 116
117 117 class fiter(object):
118 118 def __init__(self, fp):
119 119 self.fp = fp
120 120
121 121 def __iter__(self):
122 122 return self
123 123
124 124 def next(self):
125 125 l = self.fp.readline()
126 126 if not l:
127 127 raise StopIteration
128 128 return l
129 129
130 130 inheader = False
131 131 cur = []
132 132
133 133 mimeheaders = ['content-type']
134 134
135 135 if not util.safehasattr(stream, 'next'):
136 136 # http responses, for example, have readline but not next
137 137 stream = fiter(stream)
138 138
139 139 for line in stream:
140 140 cur.append(line)
141 141 if line.startswith('# HG changeset patch'):
142 142 return hgsplit(stream, cur)
143 143 elif line.startswith('From '):
144 144 return mboxsplit(stream, cur)
145 145 elif isheader(line, inheader):
146 146 inheader = True
147 147 if line.split(':', 1)[0].lower() in mimeheaders:
148 148 # let email parser handle this
149 149 return mimesplit(stream, cur)
150 150 elif line.startswith('--- ') and inheader:
151 151 # No evil headers seen by diff start, split by hand
152 152 return headersplit(stream, cur)
153 153 # Not enough info, keep reading
154 154
155 155 # if we are here, we have a very plain patch
156 156 return remainder(cur)
157 157
158 158 def extract(ui, fileobj):
159 159 '''extract patch from data read from fileobj.
160 160
161 161 patch can be a normal patch or contained in an email message.
162 162
163 163 return tuple (filename, message, user, date, branch, node, p1, p2).
164 164 Any item in the returned tuple can be None. If filename is None,
165 165 fileobj did not contain a patch. Caller must unlink filename when done.'''
166 166
167 167 # attempt to detect the start of a patch
168 168 # (this heuristic is borrowed from quilt)
169 169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
170 170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
171 171 r'---[ \t].*?^\+\+\+[ \t]|'
172 172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
173 173
174 174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
175 175 tmpfp = os.fdopen(fd, 'w')
176 176 try:
177 177 msg = email.Parser.Parser().parse(fileobj)
178 178
179 179 subject = msg['Subject']
180 180 user = msg['From']
181 181 if not subject and not user:
182 182 # Not an email, restore parsed headers if any
183 183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
184 184
185 185 # should try to parse msg['Date']
186 186 date = None
187 187 nodeid = None
188 188 branch = None
189 189 parents = []
190 190
191 191 if subject:
192 192 if subject.startswith('[PATCH'):
193 193 pend = subject.find(']')
194 194 if pend >= 0:
195 195 subject = subject[pend + 1:].lstrip()
196 196 subject = re.sub(r'\n[ \t]+', ' ', subject)
197 197 ui.debug('Subject: %s\n' % subject)
198 198 if user:
199 199 ui.debug('From: %s\n' % user)
200 200 diffs_seen = 0
201 201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
202 202 message = ''
203 203 for part in msg.walk():
204 204 content_type = part.get_content_type()
205 205 ui.debug('Content-Type: %s\n' % content_type)
206 206 if content_type not in ok_types:
207 207 continue
208 208 payload = part.get_payload(decode=True)
209 209 m = diffre.search(payload)
210 210 if m:
211 211 hgpatch = False
212 212 hgpatchheader = False
213 213 ignoretext = False
214 214
215 215 ui.debug('found patch at byte %d\n' % m.start(0))
216 216 diffs_seen += 1
217 217 cfp = cStringIO.StringIO()
218 218 for line in payload[:m.start(0)].splitlines():
219 219 if line.startswith('# HG changeset patch') and not hgpatch:
220 220 ui.debug('patch generated by hg export\n')
221 221 hgpatch = True
222 222 hgpatchheader = True
223 223 # drop earlier commit message content
224 224 cfp.seek(0)
225 225 cfp.truncate()
226 226 subject = None
227 227 elif hgpatchheader:
228 228 if line.startswith('# User '):
229 229 user = line[7:]
230 230 ui.debug('From: %s\n' % user)
231 231 elif line.startswith("# Date "):
232 232 date = line[7:]
233 233 elif line.startswith("# Branch "):
234 234 branch = line[9:]
235 235 elif line.startswith("# Node ID "):
236 236 nodeid = line[10:]
237 237 elif line.startswith("# Parent "):
238 238 parents.append(line[9:].lstrip())
239 239 elif not line.startswith("# "):
240 240 hgpatchheader = False
241 241 elif line == '---':
242 242 ignoretext = True
243 243 if not hgpatchheader and not ignoretext:
244 244 cfp.write(line)
245 245 cfp.write('\n')
246 246 message = cfp.getvalue()
247 247 if tmpfp:
248 248 tmpfp.write(payload)
249 249 if not payload.endswith('\n'):
250 250 tmpfp.write('\n')
251 251 elif not diffs_seen and message and content_type == 'text/plain':
252 252 message += '\n' + payload
253 253 except: # re-raises
254 254 tmpfp.close()
255 255 os.unlink(tmpname)
256 256 raise
257 257
258 258 if subject and not message.startswith(subject):
259 259 message = '%s\n%s' % (subject, message)
260 260 tmpfp.close()
261 261 if not diffs_seen:
262 262 os.unlink(tmpname)
263 263 return None, message, user, date, branch, None, None, None
264 264
265 265 if parents:
266 266 p1 = parents.pop(0)
267 267 else:
268 268 p1 = None
269 269
270 270 if parents:
271 271 p2 = parents.pop(0)
272 272 else:
273 273 p2 = None
274 274
275 275 return tmpname, message, user, date, branch, nodeid, p1, p2
276 276
277 277 class patchmeta(object):
278 278 """Patched file metadata
279 279
280 280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
281 281 or COPY. 'path' is patched file path. 'oldpath' is set to the
282 282 origin file when 'op' is either COPY or RENAME, None otherwise. If
283 283 file mode is changed, 'mode' is a tuple (islink, isexec) where
284 284 'islink' is True if the file is a symlink and 'isexec' is True if
285 285 the file is executable. Otherwise, 'mode' is None.
286 286 """
287 287 def __init__(self, path):
288 288 self.path = path
289 289 self.oldpath = None
290 290 self.mode = None
291 291 self.op = 'MODIFY'
292 292 self.binary = False
293 293
294 294 def setmode(self, mode):
295 295 islink = mode & 020000
296 296 isexec = mode & 0100
297 297 self.mode = (islink, isexec)
298 298
299 299 def copy(self):
300 300 other = patchmeta(self.path)
301 301 other.oldpath = self.oldpath
302 302 other.mode = self.mode
303 303 other.op = self.op
304 304 other.binary = self.binary
305 305 return other
306 306
307 307 def _ispatchinga(self, afile):
308 308 if afile == '/dev/null':
309 309 return self.op == 'ADD'
310 310 return afile == 'a/' + (self.oldpath or self.path)
311 311
312 312 def _ispatchingb(self, bfile):
313 313 if bfile == '/dev/null':
314 314 return self.op == 'DELETE'
315 315 return bfile == 'b/' + self.path
316 316
317 317 def ispatching(self, afile, bfile):
318 318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
319 319
320 320 def __repr__(self):
321 321 return "<patchmeta %s %r>" % (self.op, self.path)
322 322
323 323 def readgitpatch(lr):
324 324 """extract git-style metadata about patches from <patchname>"""
325 325
326 326 # Filter patch for git information
327 327 gp = None
328 328 gitpatches = []
329 329 for line in lr:
330 330 line = line.rstrip(' \r\n')
331 331 if line.startswith('diff --git a/'):
332 332 m = gitre.match(line)
333 333 if m:
334 334 if gp:
335 335 gitpatches.append(gp)
336 336 dst = m.group(2)
337 337 gp = patchmeta(dst)
338 338 elif gp:
339 339 if line.startswith('--- '):
340 340 gitpatches.append(gp)
341 341 gp = None
342 342 continue
343 343 if line.startswith('rename from '):
344 344 gp.op = 'RENAME'
345 345 gp.oldpath = line[12:]
346 346 elif line.startswith('rename to '):
347 347 gp.path = line[10:]
348 348 elif line.startswith('copy from '):
349 349 gp.op = 'COPY'
350 350 gp.oldpath = line[10:]
351 351 elif line.startswith('copy to '):
352 352 gp.path = line[8:]
353 353 elif line.startswith('deleted file'):
354 354 gp.op = 'DELETE'
355 355 elif line.startswith('new file mode '):
356 356 gp.op = 'ADD'
357 357 gp.setmode(int(line[-6:], 8))
358 358 elif line.startswith('new mode '):
359 359 gp.setmode(int(line[-6:], 8))
360 360 elif line.startswith('GIT binary patch'):
361 361 gp.binary = True
362 362 if gp:
363 363 gitpatches.append(gp)
364 364
365 365 return gitpatches
366 366
367 367 class linereader(object):
368 368 # simple class to allow pushing lines back into the input stream
369 369 def __init__(self, fp):
370 370 self.fp = fp
371 371 self.buf = []
372 372
373 373 def push(self, line):
374 374 if line is not None:
375 375 self.buf.append(line)
376 376
377 377 def readline(self):
378 378 if self.buf:
379 379 l = self.buf[0]
380 380 del self.buf[0]
381 381 return l
382 382 return self.fp.readline()
383 383
384 384 def __iter__(self):
385 385 while True:
386 386 l = self.readline()
387 387 if not l:
388 388 break
389 389 yield l
390 390
391 391 class abstractbackend(object):
392 392 def __init__(self, ui):
393 393 self.ui = ui
394 394
395 395 def getfile(self, fname):
396 396 """Return target file data and flags as a (data, (islink,
397 397 isexec)) tuple. Data is None if file is missing/deleted.
398 398 """
399 399 raise NotImplementedError
400 400
401 401 def setfile(self, fname, data, mode, copysource):
402 402 """Write data to target file fname and set its mode. mode is a
403 403 (islink, isexec) tuple. If data is None, the file content should
404 404 be left unchanged. If the file is modified after being copied,
405 405 copysource is set to the original file name.
406 406 """
407 407 raise NotImplementedError
408 408
409 409 def unlink(self, fname):
410 410 """Unlink target file."""
411 411 raise NotImplementedError
412 412
413 413 def writerej(self, fname, failed, total, lines):
414 414 """Write rejected lines for fname. total is the number of hunks
415 415 which failed to apply and total the total number of hunks for this
416 416 files.
417 417 """
418 418 pass
419 419
420 420 def exists(self, fname):
421 421 raise NotImplementedError
422 422
423 423 class fsbackend(abstractbackend):
424 424 def __init__(self, ui, basedir):
425 425 super(fsbackend, self).__init__(ui)
426 426 self.opener = scmutil.opener(basedir)
427 427
428 428 def _join(self, f):
429 429 return os.path.join(self.opener.base, f)
430 430
431 431 def getfile(self, fname):
432 432 if self.opener.islink(fname):
433 433 return (self.opener.readlink(fname), (True, False))
434 434
435 435 isexec = False
436 436 try:
437 437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
438 438 except OSError, e:
439 439 if e.errno != errno.ENOENT:
440 440 raise
441 441 try:
442 442 return (self.opener.read(fname), (False, isexec))
443 443 except IOError, e:
444 444 if e.errno != errno.ENOENT:
445 445 raise
446 446 return None, None
447 447
448 448 def setfile(self, fname, data, mode, copysource):
449 449 islink, isexec = mode
450 450 if data is None:
451 451 self.opener.setflags(fname, islink, isexec)
452 452 return
453 453 if islink:
454 454 self.opener.symlink(data, fname)
455 455 else:
456 456 self.opener.write(fname, data)
457 457 if isexec:
458 458 self.opener.setflags(fname, False, True)
459 459
460 460 def unlink(self, fname):
461 461 self.opener.unlinkpath(fname, ignoremissing=True)
462 462
463 463 def writerej(self, fname, failed, total, lines):
464 464 fname = fname + ".rej"
465 465 self.ui.warn(
466 466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
467 467 (failed, total, fname))
468 468 fp = self.opener(fname, 'w')
469 469 fp.writelines(lines)
470 470 fp.close()
471 471
472 472 def exists(self, fname):
473 473 return self.opener.lexists(fname)
474 474
475 475 class workingbackend(fsbackend):
476 476 def __init__(self, ui, repo, similarity):
477 477 super(workingbackend, self).__init__(ui, repo.root)
478 478 self.repo = repo
479 479 self.similarity = similarity
480 480 self.removed = set()
481 481 self.changed = set()
482 482 self.copied = []
483 483
484 484 def _checkknown(self, fname):
485 485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
486 486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
487 487
488 488 def setfile(self, fname, data, mode, copysource):
489 489 self._checkknown(fname)
490 490 super(workingbackend, self).setfile(fname, data, mode, copysource)
491 491 if copysource is not None:
492 492 self.copied.append((copysource, fname))
493 493 self.changed.add(fname)
494 494
495 495 def unlink(self, fname):
496 496 self._checkknown(fname)
497 497 super(workingbackend, self).unlink(fname)
498 498 self.removed.add(fname)
499 499 self.changed.add(fname)
500 500
501 501 def close(self):
502 502 wctx = self.repo[None]
503 503 changed = set(self.changed)
504 504 for src, dst in self.copied:
505 505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
506 506 if self.removed:
507 507 wctx.forget(sorted(self.removed))
508 508 for f in self.removed:
509 509 if f not in self.repo.dirstate:
510 510 # File was deleted and no longer belongs to the
511 511 # dirstate, it was probably marked added then
512 512 # deleted, and should not be considered by
513 513 # marktouched().
514 514 changed.discard(f)
515 515 if changed:
516 516 scmutil.marktouched(self.repo, changed, self.similarity)
517 517 return sorted(self.changed)
518 518
519 519 class filestore(object):
520 520 def __init__(self, maxsize=None):
521 521 self.opener = None
522 522 self.files = {}
523 523 self.created = 0
524 524 self.maxsize = maxsize
525 525 if self.maxsize is None:
526 526 self.maxsize = 4*(2**20)
527 527 self.size = 0
528 528 self.data = {}
529 529
530 530 def setfile(self, fname, data, mode, copied=None):
531 531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
532 532 self.data[fname] = (data, mode, copied)
533 533 self.size += len(data)
534 534 else:
535 535 if self.opener is None:
536 536 root = tempfile.mkdtemp(prefix='hg-patch-')
537 537 self.opener = scmutil.opener(root)
538 538 # Avoid filename issues with these simple names
539 539 fn = str(self.created)
540 540 self.opener.write(fn, data)
541 541 self.created += 1
542 542 self.files[fname] = (fn, mode, copied)
543 543
544 544 def getfile(self, fname):
545 545 if fname in self.data:
546 546 return self.data[fname]
547 547 if not self.opener or fname not in self.files:
548 548 return None, None, None
549 549 fn, mode, copied = self.files[fname]
550 550 return self.opener.read(fn), mode, copied
551 551
552 552 def close(self):
553 553 if self.opener:
554 554 shutil.rmtree(self.opener.base)
555 555
556 556 class repobackend(abstractbackend):
557 557 def __init__(self, ui, repo, ctx, store):
558 558 super(repobackend, self).__init__(ui)
559 559 self.repo = repo
560 560 self.ctx = ctx
561 561 self.store = store
562 562 self.changed = set()
563 563 self.removed = set()
564 564 self.copied = {}
565 565
566 566 def _checkknown(self, fname):
567 567 if fname not in self.ctx:
568 568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
569 569
570 570 def getfile(self, fname):
571 571 try:
572 572 fctx = self.ctx[fname]
573 573 except error.LookupError:
574 574 return None, None
575 575 flags = fctx.flags()
576 576 return fctx.data(), ('l' in flags, 'x' in flags)
577 577
578 578 def setfile(self, fname, data, mode, copysource):
579 579 if copysource:
580 580 self._checkknown(copysource)
581 581 if data is None:
582 582 data = self.ctx[fname].data()
583 583 self.store.setfile(fname, data, mode, copysource)
584 584 self.changed.add(fname)
585 585 if copysource:
586 586 self.copied[fname] = copysource
587 587
588 588 def unlink(self, fname):
589 589 self._checkknown(fname)
590 590 self.removed.add(fname)
591 591
592 592 def exists(self, fname):
593 593 return fname in self.ctx
594 594
595 595 def close(self):
596 596 return self.changed | self.removed
597 597
598 598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
599 599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
600 600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
601 601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
602 602
603 603 class patchfile(object):
604 604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
605 605 self.fname = gp.path
606 606 self.eolmode = eolmode
607 607 self.eol = None
608 608 self.backend = backend
609 609 self.ui = ui
610 610 self.lines = []
611 611 self.exists = False
612 612 self.missing = True
613 613 self.mode = gp.mode
614 614 self.copysource = gp.oldpath
615 615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
616 616 self.remove = gp.op == 'DELETE'
617 617 if self.copysource is None:
618 618 data, mode = backend.getfile(self.fname)
619 619 else:
620 620 data, mode = store.getfile(self.copysource)[:2]
621 621 if data is not None:
622 622 self.exists = self.copysource is None or backend.exists(self.fname)
623 623 self.missing = False
624 624 if data:
625 625 self.lines = mdiff.splitnewlines(data)
626 626 if self.mode is None:
627 627 self.mode = mode
628 628 if self.lines:
629 629 # Normalize line endings
630 630 if self.lines[0].endswith('\r\n'):
631 631 self.eol = '\r\n'
632 632 elif self.lines[0].endswith('\n'):
633 633 self.eol = '\n'
634 634 if eolmode != 'strict':
635 635 nlines = []
636 636 for l in self.lines:
637 637 if l.endswith('\r\n'):
638 638 l = l[:-2] + '\n'
639 639 nlines.append(l)
640 640 self.lines = nlines
641 641 else:
642 642 if self.create:
643 643 self.missing = False
644 644 if self.mode is None:
645 645 self.mode = (False, False)
646 646 if self.missing:
647 647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
648 648
649 649 self.hash = {}
650 650 self.dirty = 0
651 651 self.offset = 0
652 652 self.skew = 0
653 653 self.rej = []
654 654 self.fileprinted = False
655 655 self.printfile(False)
656 656 self.hunks = 0
657 657
658 658 def writelines(self, fname, lines, mode):
659 659 if self.eolmode == 'auto':
660 660 eol = self.eol
661 661 elif self.eolmode == 'crlf':
662 662 eol = '\r\n'
663 663 else:
664 664 eol = '\n'
665 665
666 666 if self.eolmode != 'strict' and eol and eol != '\n':
667 667 rawlines = []
668 668 for l in lines:
669 669 if l and l[-1] == '\n':
670 670 l = l[:-1] + eol
671 671 rawlines.append(l)
672 672 lines = rawlines
673 673
674 674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
675 675
676 676 def printfile(self, warn):
677 677 if self.fileprinted:
678 678 return
679 679 if warn or self.ui.verbose:
680 680 self.fileprinted = True
681 681 s = _("patching file %s\n") % self.fname
682 682 if warn:
683 683 self.ui.warn(s)
684 684 else:
685 685 self.ui.note(s)
686 686
687 687
688 688 def findlines(self, l, linenum):
689 689 # looks through the hash and finds candidate lines. The
690 690 # result is a list of line numbers sorted based on distance
691 691 # from linenum
692 692
693 693 cand = self.hash.get(l, [])
694 694 if len(cand) > 1:
695 695 # resort our list of potentials forward then back.
696 696 cand.sort(key=lambda x: abs(x - linenum))
697 697 return cand
698 698
699 699 def write_rej(self):
700 700 # our rejects are a little different from patch(1). This always
701 701 # creates rejects in the same form as the original patch. A file
702 702 # header is inserted so that you can run the reject through patch again
703 703 # without having to type the filename.
704 704 if not self.rej:
705 705 return
706 706 base = os.path.basename(self.fname)
707 707 lines = ["--- %s\n+++ %s\n" % (base, base)]
708 708 for x in self.rej:
709 709 for l in x.hunk:
710 710 lines.append(l)
711 711 if l[-1] != '\n':
712 712 lines.append("\n\ No newline at end of file\n")
713 713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
714 714
715 715 def apply(self, h):
716 716 if not h.complete():
717 717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
718 718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
719 719 h.lenb))
720 720
721 721 self.hunks += 1
722 722
723 723 if self.missing:
724 724 self.rej.append(h)
725 725 return -1
726 726
727 727 if self.exists and self.create:
728 728 if self.copysource:
729 729 self.ui.warn(_("cannot create %s: destination already "
730 730 "exists\n") % self.fname)
731 731 else:
732 732 self.ui.warn(_("file %s already exists\n") % self.fname)
733 733 self.rej.append(h)
734 734 return -1
735 735
736 736 if isinstance(h, binhunk):
737 737 if self.remove:
738 738 self.backend.unlink(self.fname)
739 739 else:
740 740 l = h.new(self.lines)
741 741 self.lines[:] = l
742 742 self.offset += len(l)
743 743 self.dirty = True
744 744 return 0
745 745
746 746 horig = h
747 747 if (self.eolmode in ('crlf', 'lf')
748 748 or self.eolmode == 'auto' and self.eol):
749 749 # If new eols are going to be normalized, then normalize
750 750 # hunk data before patching. Otherwise, preserve input
751 751 # line-endings.
752 752 h = h.getnormalized()
753 753
754 754 # fast case first, no offsets, no fuzz
755 755 old, oldstart, new, newstart = h.fuzzit(0, False)
756 756 oldstart += self.offset
757 757 orig_start = oldstart
758 758 # if there's skew we want to emit the "(offset %d lines)" even
759 759 # when the hunk cleanly applies at start + skew, so skip the
760 760 # fast case code
761 761 if (self.skew == 0 and
762 762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
763 763 if self.remove:
764 764 self.backend.unlink(self.fname)
765 765 else:
766 766 self.lines[oldstart:oldstart + len(old)] = new
767 767 self.offset += len(new) - len(old)
768 768 self.dirty = True
769 769 return 0
770 770
771 771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
772 772 self.hash = {}
773 773 for x, s in enumerate(self.lines):
774 774 self.hash.setdefault(s, []).append(x)
775 775
776 776 for fuzzlen in xrange(3):
777 777 for toponly in [True, False]:
778 778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
779 779 oldstart = oldstart + self.offset + self.skew
780 780 oldstart = min(oldstart, len(self.lines))
781 781 if old:
782 782 cand = self.findlines(old[0][1:], oldstart)
783 783 else:
784 784 # Only adding lines with no or fuzzed context, just
785 785 # take the skew in account
786 786 cand = [oldstart]
787 787
788 788 for l in cand:
789 789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
790 790 self.lines[l : l + len(old)] = new
791 791 self.offset += len(new) - len(old)
792 792 self.skew = l - orig_start
793 793 self.dirty = True
794 794 offset = l - orig_start - fuzzlen
795 795 if fuzzlen:
796 796 msg = _("Hunk #%d succeeded at %d "
797 797 "with fuzz %d "
798 798 "(offset %d lines).\n")
799 799 self.printfile(True)
800 800 self.ui.warn(msg %
801 801 (h.number, l + 1, fuzzlen, offset))
802 802 else:
803 803 msg = _("Hunk #%d succeeded at %d "
804 804 "(offset %d lines).\n")
805 805 self.ui.note(msg % (h.number, l + 1, offset))
806 806 return fuzzlen
807 807 self.printfile(True)
808 808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
809 809 self.rej.append(horig)
810 810 return -1
811 811
812 812 def close(self):
813 813 if self.dirty:
814 814 self.writelines(self.fname, self.lines, self.mode)
815 815 self.write_rej()
816 816 return len(self.rej)
817 817
818 818 class header(object):
819 819 """patch header
820 820 """
821 821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
822 822 diff_re = re.compile('diff -r .* (.*)$')
823 823 allhunks_re = re.compile('(?:index|deleted file) ')
824 824 pretty_re = re.compile('(?:new file|deleted file) ')
825 825 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
826 826
827 827 def __init__(self, header):
828 828 self.header = header
829 829 self.hunks = []
830 830
831 831 def binary(self):
832 832 return util.any(h.startswith('index ') for h in self.header)
833 833
834 834 def pretty(self, fp):
835 835 for h in self.header:
836 836 if h.startswith('index '):
837 837 fp.write(_('this modifies a binary file (all or nothing)\n'))
838 838 break
839 839 if self.pretty_re.match(h):
840 840 fp.write(h)
841 841 if self.binary():
842 842 fp.write(_('this is a binary file\n'))
843 843 break
844 844 if h.startswith('---'):
845 845 fp.write(_('%d hunks, %d lines changed\n') %
846 846 (len(self.hunks),
847 847 sum([max(h.added, h.removed) for h in self.hunks])))
848 848 break
849 849 fp.write(h)
850 850
851 851 def write(self, fp):
852 852 fp.write(''.join(self.header))
853 853
854 854 def allhunks(self):
855 855 return util.any(self.allhunks_re.match(h) for h in self.header)
856 856
857 857 def files(self):
858 858 match = self.diffgit_re.match(self.header[0])
859 859 if match:
860 860 fromfile, tofile = match.groups()
861 861 if fromfile == tofile:
862 862 return [fromfile]
863 863 return [fromfile, tofile]
864 864 else:
865 865 return self.diff_re.match(self.header[0]).groups()
866 866
867 867 def filename(self):
868 868 return self.files()[-1]
869 869
870 870 def __repr__(self):
871 871 return '<header %s>' % (' '.join(map(repr, self.files())))
872 872
873 873 def special(self):
874 874 return util.any(self.special_re.match(h) for h in self.header)
875 875
876 876 class recordhunk(object):
877 877 """patch hunk
878 878
879 879 XXX shouldn't we merge this with the other hunk class?
880 880 """
881 881 maxcontext = 3
882 882
883 883 def __init__(self, header, fromline, toline, proc, before, hunk, after):
884 884 def trimcontext(number, lines):
885 885 delta = len(lines) - self.maxcontext
886 886 if False and delta > 0:
887 887 return number + delta, lines[:self.maxcontext]
888 888 return number, lines
889 889
890 890 self.header = header
891 891 self.fromline, self.before = trimcontext(fromline, before)
892 892 self.toline, self.after = trimcontext(toline, after)
893 893 self.proc = proc
894 894 self.hunk = hunk
895 895 self.added, self.removed = self.countchanges(self.hunk)
896 896
897 897 def __eq__(self, v):
898 898 if not isinstance(v, recordhunk):
899 899 return False
900 900
901 901 return ((v.hunk == self.hunk) and
902 902 (v.proc == self.proc) and
903 903 (self.fromline == v.fromline) and
904 904 (self.header.files() == v.header.files()))
905 905
906 906 def __hash__(self):
907 907 return hash((tuple(self.hunk),
908 908 tuple(self.header.files()),
909 909 self.fromline,
910 910 self.proc))
911 911
912 912 def countchanges(self, hunk):
913 913 """hunk -> (n+,n-)"""
914 914 add = len([h for h in hunk if h[0] == '+'])
915 915 rem = len([h for h in hunk if h[0] == '-'])
916 916 return add, rem
917 917
918 918 def write(self, fp):
919 919 delta = len(self.before) + len(self.after)
920 920 if self.after and self.after[-1] == '\\ No newline at end of file\n':
921 921 delta -= 1
922 922 fromlen = delta + self.removed
923 923 tolen = delta + self.added
924 924 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
925 925 (self.fromline, fromlen, self.toline, tolen,
926 926 self.proc and (' ' + self.proc)))
927 927 fp.write(''.join(self.before + self.hunk + self.after))
928 928
929 929 pretty = write
930 930
931 931 def filename(self):
932 932 return self.header.filename()
933 933
934 934 def __repr__(self):
935 935 return '<hunk %r@%d>' % (self.filename(), self.fromline)
936 936
937 937 def filterpatch(ui, headers):
938 938 """Interactively filter patch chunks into applied-only chunks"""
939 939
940 940 def prompt(skipfile, skipall, query, chunk):
941 941 """prompt query, and process base inputs
942 942
943 943 - y/n for the rest of file
944 944 - y/n for the rest
945 945 - ? (help)
946 946 - q (quit)
947 947
948 948 Return True/False and possibly updated skipfile and skipall.
949 949 """
950 950 newpatches = None
951 951 if skipall is not None:
952 952 return skipall, skipfile, skipall, newpatches
953 953 if skipfile is not None:
954 954 return skipfile, skipfile, skipall, newpatches
955 955 while True:
956 956 resps = _('[Ynesfdaq?]'
957 957 '$$ &Yes, record this change'
958 958 '$$ &No, skip this change'
959 959 '$$ &Edit this change manually'
960 960 '$$ &Skip remaining changes to this file'
961 961 '$$ Record remaining changes to this &file'
962 962 '$$ &Done, skip remaining changes and files'
963 963 '$$ Record &all changes to all remaining files'
964 964 '$$ &Quit, recording no changes'
965 965 '$$ &? (display help)')
966 966 r = ui.promptchoice("%s %s" % (query, resps))
967 967 ui.write("\n")
968 968 if r == 8: # ?
969 969 for c, t in ui.extractchoices(resps)[1]:
970 970 ui.write('%s - %s\n' % (c, t.lower()))
971 971 continue
972 972 elif r == 0: # yes
973 973 ret = True
974 974 elif r == 1: # no
975 975 ret = False
976 976 elif r == 2: # Edit patch
977 977 if chunk is None:
978 978 ui.write(_('cannot edit patch for whole file'))
979 979 ui.write("\n")
980 980 continue
981 981 if chunk.header.binary():
982 982 ui.write(_('cannot edit patch for binary file'))
983 983 ui.write("\n")
984 984 continue
985 985 # Patch comment based on the Git one (based on comment at end of
986 986 # http://mercurial.selenic.com/wiki/RecordExtension)
987 987 phelp = '---' + _("""
988 988 To remove '-' lines, make them ' ' lines (context).
989 989 To remove '+' lines, delete them.
990 990 Lines starting with # will be removed from the patch.
991 991
992 992 If the patch applies cleanly, the edited hunk will immediately be
993 993 added to the record list. If it does not apply cleanly, a rejects
994 994 file will be generated: you can use that when you try again. If
995 995 all lines of the hunk are removed, then the edit is aborted and
996 996 the hunk is left unchanged.
997 997 """)
998 998 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
999 999 suffix=".diff", text=True)
1000 1000 ncpatchfp = None
1001 1001 try:
1002 1002 # Write the initial patch
1003 1003 f = os.fdopen(patchfd, "w")
1004 1004 chunk.header.write(f)
1005 1005 chunk.write(f)
1006 1006 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1007 1007 f.close()
1008 1008 # Start the editor and wait for it to complete
1009 1009 editor = ui.geteditor()
1010 1010 ui.system("%s \"%s\"" % (editor, patchfn),
1011 1011 environ={'HGUSER': ui.username()},
1012 1012 onerr=util.Abort, errprefix=_("edit failed"))
1013 1013 # Remove comment lines
1014 1014 patchfp = open(patchfn)
1015 1015 ncpatchfp = cStringIO.StringIO()
1016 1016 for line in patchfp:
1017 1017 if not line.startswith('#'):
1018 1018 ncpatchfp.write(line)
1019 1019 patchfp.close()
1020 1020 ncpatchfp.seek(0)
1021 1021 newpatches = parsepatch(ncpatchfp)
1022 1022 finally:
1023 1023 os.unlink(patchfn)
1024 1024 del ncpatchfp
1025 1025 # Signal that the chunk shouldn't be applied as-is, but
1026 1026 # provide the new patch to be used instead.
1027 1027 ret = False
1028 1028 elif r == 3: # Skip
1029 1029 ret = skipfile = False
1030 1030 elif r == 4: # file (Record remaining)
1031 1031 ret = skipfile = True
1032 1032 elif r == 5: # done, skip remaining
1033 1033 ret = skipall = False
1034 1034 elif r == 6: # all
1035 1035 ret = skipall = True
1036 1036 elif r == 7: # quit
1037 1037 raise util.Abort(_('user quit'))
1038 1038 return ret, skipfile, skipall, newpatches
1039 1039
1040 1040 seen = set()
1041 1041 applied = {} # 'filename' -> [] of chunks
1042 1042 skipfile, skipall = None, None
1043 1043 pos, total = 1, sum(len(h.hunks) for h in headers)
1044 1044 for h in headers:
1045 1045 pos += len(h.hunks)
1046 1046 skipfile = None
1047 1047 fixoffset = 0
1048 1048 hdr = ''.join(h.header)
1049 1049 if hdr in seen:
1050 1050 continue
1051 1051 seen.add(hdr)
1052 1052 if skipall is None:
1053 1053 h.pretty(ui)
1054 1054 msg = (_('examine changes to %s?') %
1055 1055 _(' and ').join("'%s'" % f for f in h.files()))
1056 1056 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1057 1057 if not r:
1058 1058 continue
1059 1059 applied[h.filename()] = [h]
1060 1060 if h.allhunks():
1061 1061 applied[h.filename()] += h.hunks
1062 1062 continue
1063 1063 for i, chunk in enumerate(h.hunks):
1064 1064 if skipfile is None and skipall is None:
1065 1065 chunk.pretty(ui)
1066 1066 if total == 1:
1067 1067 msg = _("record this change to '%s'?") % chunk.filename()
1068 1068 else:
1069 1069 idx = pos - len(h.hunks) + i
1070 1070 msg = _("record change %d/%d to '%s'?") % (idx, total,
1071 1071 chunk.filename())
1072 1072 r, skipfile, skipall, newpatches = prompt(skipfile,
1073 1073 skipall, msg, chunk)
1074 1074 if r:
1075 1075 if fixoffset:
1076 1076 chunk = copy.copy(chunk)
1077 1077 chunk.toline += fixoffset
1078 1078 applied[chunk.filename()].append(chunk)
1079 1079 elif newpatches is not None:
1080 1080 for newpatch in newpatches:
1081 1081 for newhunk in newpatch.hunks:
1082 1082 if fixoffset:
1083 1083 newhunk.toline += fixoffset
1084 1084 applied[newhunk.filename()].append(newhunk)
1085 1085 else:
1086 1086 fixoffset += chunk.removed - chunk.added
1087 1087 return sum([h for h in applied.itervalues()
1088 1088 if h[0].special() or len(h) > 1], [])
1089 1089 class hunk(object):
1090 1090 def __init__(self, desc, num, lr, context):
1091 1091 self.number = num
1092 1092 self.desc = desc
1093 1093 self.hunk = [desc]
1094 1094 self.a = []
1095 1095 self.b = []
1096 1096 self.starta = self.lena = None
1097 1097 self.startb = self.lenb = None
1098 1098 if lr is not None:
1099 1099 if context:
1100 1100 self.read_context_hunk(lr)
1101 1101 else:
1102 1102 self.read_unified_hunk(lr)
1103 1103
1104 1104 def getnormalized(self):
1105 1105 """Return a copy with line endings normalized to LF."""
1106 1106
1107 1107 def normalize(lines):
1108 1108 nlines = []
1109 1109 for line in lines:
1110 1110 if line.endswith('\r\n'):
1111 1111 line = line[:-2] + '\n'
1112 1112 nlines.append(line)
1113 1113 return nlines
1114 1114
1115 1115 # Dummy object, it is rebuilt manually
1116 1116 nh = hunk(self.desc, self.number, None, None)
1117 1117 nh.number = self.number
1118 1118 nh.desc = self.desc
1119 1119 nh.hunk = self.hunk
1120 1120 nh.a = normalize(self.a)
1121 1121 nh.b = normalize(self.b)
1122 1122 nh.starta = self.starta
1123 1123 nh.startb = self.startb
1124 1124 nh.lena = self.lena
1125 1125 nh.lenb = self.lenb
1126 1126 return nh
1127 1127
1128 1128 def read_unified_hunk(self, lr):
1129 1129 m = unidesc.match(self.desc)
1130 1130 if not m:
1131 1131 raise PatchError(_("bad hunk #%d") % self.number)
1132 1132 self.starta, self.lena, self.startb, self.lenb = m.groups()
1133 1133 if self.lena is None:
1134 1134 self.lena = 1
1135 1135 else:
1136 1136 self.lena = int(self.lena)
1137 1137 if self.lenb is None:
1138 1138 self.lenb = 1
1139 1139 else:
1140 1140 self.lenb = int(self.lenb)
1141 1141 self.starta = int(self.starta)
1142 1142 self.startb = int(self.startb)
1143 1143 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1144 1144 self.b)
1145 1145 # if we hit eof before finishing out the hunk, the last line will
1146 1146 # be zero length. Lets try to fix it up.
1147 1147 while len(self.hunk[-1]) == 0:
1148 1148 del self.hunk[-1]
1149 1149 del self.a[-1]
1150 1150 del self.b[-1]
1151 1151 self.lena -= 1
1152 1152 self.lenb -= 1
1153 1153 self._fixnewline(lr)
1154 1154
1155 1155 def read_context_hunk(self, lr):
1156 1156 self.desc = lr.readline()
1157 1157 m = contextdesc.match(self.desc)
1158 1158 if not m:
1159 1159 raise PatchError(_("bad hunk #%d") % self.number)
1160 1160 self.starta, aend = m.groups()
1161 1161 self.starta = int(self.starta)
1162 1162 if aend is None:
1163 1163 aend = self.starta
1164 1164 self.lena = int(aend) - self.starta
1165 1165 if self.starta:
1166 1166 self.lena += 1
1167 1167 for x in xrange(self.lena):
1168 1168 l = lr.readline()
1169 1169 if l.startswith('---'):
1170 1170 # lines addition, old block is empty
1171 1171 lr.push(l)
1172 1172 break
1173 1173 s = l[2:]
1174 1174 if l.startswith('- ') or l.startswith('! '):
1175 1175 u = '-' + s
1176 1176 elif l.startswith(' '):
1177 1177 u = ' ' + s
1178 1178 else:
1179 1179 raise PatchError(_("bad hunk #%d old text line %d") %
1180 1180 (self.number, x))
1181 1181 self.a.append(u)
1182 1182 self.hunk.append(u)
1183 1183
1184 1184 l = lr.readline()
1185 1185 if l.startswith('\ '):
1186 1186 s = self.a[-1][:-1]
1187 1187 self.a[-1] = s
1188 1188 self.hunk[-1] = s
1189 1189 l = lr.readline()
1190 1190 m = contextdesc.match(l)
1191 1191 if not m:
1192 1192 raise PatchError(_("bad hunk #%d") % self.number)
1193 1193 self.startb, bend = m.groups()
1194 1194 self.startb = int(self.startb)
1195 1195 if bend is None:
1196 1196 bend = self.startb
1197 1197 self.lenb = int(bend) - self.startb
1198 1198 if self.startb:
1199 1199 self.lenb += 1
1200 1200 hunki = 1
1201 1201 for x in xrange(self.lenb):
1202 1202 l = lr.readline()
1203 1203 if l.startswith('\ '):
1204 1204 # XXX: the only way to hit this is with an invalid line range.
1205 1205 # The no-eol marker is not counted in the line range, but I
1206 1206 # guess there are diff(1) out there which behave differently.
1207 1207 s = self.b[-1][:-1]
1208 1208 self.b[-1] = s
1209 1209 self.hunk[hunki - 1] = s
1210 1210 continue
1211 1211 if not l:
1212 1212 # line deletions, new block is empty and we hit EOF
1213 1213 lr.push(l)
1214 1214 break
1215 1215 s = l[2:]
1216 1216 if l.startswith('+ ') or l.startswith('! '):
1217 1217 u = '+' + s
1218 1218 elif l.startswith(' '):
1219 1219 u = ' ' + s
1220 1220 elif len(self.b) == 0:
1221 1221 # line deletions, new block is empty
1222 1222 lr.push(l)
1223 1223 break
1224 1224 else:
1225 1225 raise PatchError(_("bad hunk #%d old text line %d") %
1226 1226 (self.number, x))
1227 1227 self.b.append(s)
1228 1228 while True:
1229 1229 if hunki >= len(self.hunk):
1230 1230 h = ""
1231 1231 else:
1232 1232 h = self.hunk[hunki]
1233 1233 hunki += 1
1234 1234 if h == u:
1235 1235 break
1236 1236 elif h.startswith('-'):
1237 1237 continue
1238 1238 else:
1239 1239 self.hunk.insert(hunki - 1, u)
1240 1240 break
1241 1241
1242 1242 if not self.a:
1243 1243 # this happens when lines were only added to the hunk
1244 1244 for x in self.hunk:
1245 1245 if x.startswith('-') or x.startswith(' '):
1246 1246 self.a.append(x)
1247 1247 if not self.b:
1248 1248 # this happens when lines were only deleted from the hunk
1249 1249 for x in self.hunk:
1250 1250 if x.startswith('+') or x.startswith(' '):
1251 1251 self.b.append(x[1:])
1252 1252 # @@ -start,len +start,len @@
1253 1253 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1254 1254 self.startb, self.lenb)
1255 1255 self.hunk[0] = self.desc
1256 1256 self._fixnewline(lr)
1257 1257
1258 1258 def _fixnewline(self, lr):
1259 1259 l = lr.readline()
1260 1260 if l.startswith('\ '):
1261 1261 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1262 1262 else:
1263 1263 lr.push(l)
1264 1264
1265 1265 def complete(self):
1266 1266 return len(self.a) == self.lena and len(self.b) == self.lenb
1267 1267
1268 1268 def _fuzzit(self, old, new, fuzz, toponly):
1269 1269 # this removes context lines from the top and bottom of list 'l'. It
1270 1270 # checks the hunk to make sure only context lines are removed, and then
1271 1271 # returns a new shortened list of lines.
1272 1272 fuzz = min(fuzz, len(old))
1273 1273 if fuzz:
1274 1274 top = 0
1275 1275 bot = 0
1276 1276 hlen = len(self.hunk)
1277 1277 for x in xrange(hlen - 1):
1278 1278 # the hunk starts with the @@ line, so use x+1
1279 1279 if self.hunk[x + 1][0] == ' ':
1280 1280 top += 1
1281 1281 else:
1282 1282 break
1283 1283 if not toponly:
1284 1284 for x in xrange(hlen - 1):
1285 1285 if self.hunk[hlen - bot - 1][0] == ' ':
1286 1286 bot += 1
1287 1287 else:
1288 1288 break
1289 1289
1290 1290 bot = min(fuzz, bot)
1291 1291 top = min(fuzz, top)
1292 1292 return old[top:len(old) - bot], new[top:len(new) - bot], top
1293 1293 return old, new, 0
1294 1294
1295 1295 def fuzzit(self, fuzz, toponly):
1296 1296 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1297 1297 oldstart = self.starta + top
1298 1298 newstart = self.startb + top
1299 1299 # zero length hunk ranges already have their start decremented
1300 1300 if self.lena and oldstart > 0:
1301 1301 oldstart -= 1
1302 1302 if self.lenb and newstart > 0:
1303 1303 newstart -= 1
1304 1304 return old, oldstart, new, newstart
1305 1305
1306 1306 class binhunk(object):
1307 1307 'A binary patch file.'
1308 1308 def __init__(self, lr, fname):
1309 1309 self.text = None
1310 1310 self.delta = False
1311 1311 self.hunk = ['GIT binary patch\n']
1312 1312 self._fname = fname
1313 1313 self._read(lr)
1314 1314
1315 1315 def complete(self):
1316 1316 return self.text is not None
1317 1317
1318 1318 def new(self, lines):
1319 1319 if self.delta:
1320 1320 return [applybindelta(self.text, ''.join(lines))]
1321 1321 return [self.text]
1322 1322
1323 1323 def _read(self, lr):
1324 1324 def getline(lr, hunk):
1325 1325 l = lr.readline()
1326 1326 hunk.append(l)
1327 1327 return l.rstrip('\r\n')
1328 1328
1329 1329 size = 0
1330 1330 while True:
1331 1331 line = getline(lr, self.hunk)
1332 1332 if not line:
1333 1333 raise PatchError(_('could not extract "%s" binary data')
1334 1334 % self._fname)
1335 1335 if line.startswith('literal '):
1336 1336 size = int(line[8:].rstrip())
1337 1337 break
1338 1338 if line.startswith('delta '):
1339 1339 size = int(line[6:].rstrip())
1340 1340 self.delta = True
1341 1341 break
1342 1342 dec = []
1343 1343 line = getline(lr, self.hunk)
1344 1344 while len(line) > 1:
1345 1345 l = line[0]
1346 1346 if l <= 'Z' and l >= 'A':
1347 1347 l = ord(l) - ord('A') + 1
1348 1348 else:
1349 1349 l = ord(l) - ord('a') + 27
1350 1350 try:
1351 1351 dec.append(base85.b85decode(line[1:])[:l])
1352 1352 except ValueError, e:
1353 1353 raise PatchError(_('could not decode "%s" binary patch: %s')
1354 1354 % (self._fname, str(e)))
1355 1355 line = getline(lr, self.hunk)
1356 1356 text = zlib.decompress(''.join(dec))
1357 1357 if len(text) != size:
1358 1358 raise PatchError(_('"%s" length is %d bytes, should be %d')
1359 1359 % (self._fname, len(text), size))
1360 1360 self.text = text
1361 1361
1362 1362 def parsefilename(str):
1363 1363 # --- filename \t|space stuff
1364 1364 s = str[4:].rstrip('\r\n')
1365 1365 i = s.find('\t')
1366 1366 if i < 0:
1367 1367 i = s.find(' ')
1368 1368 if i < 0:
1369 1369 return s
1370 1370 return s[:i]
1371 1371
1372 1372 def parsepatch(originalchunks):
1373 1373 """patch -> [] of headers -> [] of hunks """
1374 1374 class parser(object):
1375 1375 """patch parsing state machine"""
1376 1376 def __init__(self):
1377 1377 self.fromline = 0
1378 1378 self.toline = 0
1379 1379 self.proc = ''
1380 1380 self.header = None
1381 1381 self.context = []
1382 1382 self.before = []
1383 1383 self.hunk = []
1384 1384 self.headers = []
1385 1385
1386 1386 def addrange(self, limits):
1387 1387 fromstart, fromend, tostart, toend, proc = limits
1388 1388 self.fromline = int(fromstart)
1389 1389 self.toline = int(tostart)
1390 1390 self.proc = proc
1391 1391
1392 1392 def addcontext(self, context):
1393 1393 if self.hunk:
1394 1394 h = recordhunk(self.header, self.fromline, self.toline,
1395 1395 self.proc, self.before, self.hunk, context)
1396 1396 self.header.hunks.append(h)
1397 1397 self.fromline += len(self.before) + h.removed
1398 1398 self.toline += len(self.before) + h.added
1399 1399 self.before = []
1400 1400 self.hunk = []
1401 1401 self.proc = ''
1402 1402 self.context = context
1403 1403
1404 1404 def addhunk(self, hunk):
1405 1405 if self.context:
1406 1406 self.before = self.context
1407 1407 self.context = []
1408 1408 self.hunk = hunk
1409 1409
1410 1410 def newfile(self, hdr):
1411 1411 self.addcontext([])
1412 1412 h = header(hdr)
1413 1413 self.headers.append(h)
1414 1414 self.header = h
1415 1415
1416 1416 def addother(self, line):
1417 1417 pass # 'other' lines are ignored
1418 1418
1419 1419 def finished(self):
1420 1420 self.addcontext([])
1421 1421 return self.headers
1422 1422
1423 1423 transitions = {
1424 1424 'file': {'context': addcontext,
1425 1425 'file': newfile,
1426 1426 'hunk': addhunk,
1427 1427 'range': addrange},
1428 1428 'context': {'file': newfile,
1429 1429 'hunk': addhunk,
1430 1430 'range': addrange,
1431 1431 'other': addother},
1432 1432 'hunk': {'context': addcontext,
1433 1433 'file': newfile,
1434 1434 'range': addrange},
1435 1435 'range': {'context': addcontext,
1436 1436 'hunk': addhunk},
1437 1437 'other': {'other': addother},
1438 1438 }
1439 1439
1440 1440 p = parser()
1441 1441 fp = cStringIO.StringIO()
1442 1442 fp.write(''.join(originalchunks))
1443 1443 fp.seek(0)
1444 1444
1445 1445 state = 'context'
1446 1446 for newstate, data in scanpatch(fp):
1447 1447 try:
1448 1448 p.transitions[state][newstate](p, data)
1449 1449 except KeyError:
1450 1450 raise PatchError('unhandled transition: %s -> %s' %
1451 1451 (state, newstate))
1452 1452 state = newstate
1453 1453 del fp
1454 1454 return p.finished()
1455 1455
1456 1456 def pathtransform(path, strip, prefix):
1457 1457 '''turn a path from a patch into a path suitable for the repository
1458 1458
1459 1459 prefix, if not empty, is expected to be normalized with a / at the end.
1460 1460
1461 1461 Returns (stripped components, path in repository).
1462 1462
1463 1463 >>> pathtransform('a/b/c', 0, '')
1464 1464 ('', 'a/b/c')
1465 1465 >>> pathtransform(' a/b/c ', 0, '')
1466 1466 ('', ' a/b/c')
1467 1467 >>> pathtransform(' a/b/c ', 2, '')
1468 1468 ('a/b/', 'c')
1469 1469 >>> pathtransform('a/b/c', 0, 'd/e/')
1470 1470 ('', 'd/e/a/b/c')
1471 1471 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1472 1472 ('a//b/', 'd/e/c')
1473 1473 >>> pathtransform('a/b/c', 3, '')
1474 1474 Traceback (most recent call last):
1475 1475 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1476 1476 '''
1477 1477 pathlen = len(path)
1478 1478 i = 0
1479 1479 if strip == 0:
1480 1480 return '', prefix + path.rstrip()
1481 1481 count = strip
1482 1482 while count > 0:
1483 1483 i = path.find('/', i)
1484 1484 if i == -1:
1485 1485 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1486 1486 (count, strip, path))
1487 1487 i += 1
1488 1488 # consume '//' in the path
1489 1489 while i < pathlen - 1 and path[i] == '/':
1490 1490 i += 1
1491 1491 count -= 1
1492 1492 return path[:i].lstrip(), prefix + path[i:].rstrip()
1493 1493
1494 1494 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1495 1495 nulla = afile_orig == "/dev/null"
1496 1496 nullb = bfile_orig == "/dev/null"
1497 1497 create = nulla and hunk.starta == 0 and hunk.lena == 0
1498 1498 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1499 1499 abase, afile = pathtransform(afile_orig, strip, prefix)
1500 1500 gooda = not nulla and backend.exists(afile)
1501 1501 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1502 1502 if afile == bfile:
1503 1503 goodb = gooda
1504 1504 else:
1505 1505 goodb = not nullb and backend.exists(bfile)
1506 1506 missing = not goodb and not gooda and not create
1507 1507
1508 1508 # some diff programs apparently produce patches where the afile is
1509 1509 # not /dev/null, but afile starts with bfile
1510 1510 abasedir = afile[:afile.rfind('/') + 1]
1511 1511 bbasedir = bfile[:bfile.rfind('/') + 1]
1512 1512 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1513 1513 and hunk.starta == 0 and hunk.lena == 0):
1514 1514 create = True
1515 1515 missing = False
1516 1516
1517 1517 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1518 1518 # diff is between a file and its backup. In this case, the original
1519 1519 # file should be patched (see original mpatch code).
1520 1520 isbackup = (abase == bbase and bfile.startswith(afile))
1521 1521 fname = None
1522 1522 if not missing:
1523 1523 if gooda and goodb:
1524 1524 if isbackup:
1525 1525 fname = afile
1526 1526 else:
1527 1527 fname = bfile
1528 1528 elif gooda:
1529 1529 fname = afile
1530 1530
1531 1531 if not fname:
1532 1532 if not nullb:
1533 1533 if isbackup:
1534 1534 fname = afile
1535 1535 else:
1536 1536 fname = bfile
1537 1537 elif not nulla:
1538 1538 fname = afile
1539 1539 else:
1540 1540 raise PatchError(_("undefined source and destination files"))
1541 1541
1542 1542 gp = patchmeta(fname)
1543 1543 if create:
1544 1544 gp.op = 'ADD'
1545 1545 elif remove:
1546 1546 gp.op = 'DELETE'
1547 1547 return gp
1548 1548
1549 1549 def scanpatch(fp):
1550 1550 """like patch.iterhunks, but yield different events
1551 1551
1552 1552 - ('file', [header_lines + fromfile + tofile])
1553 1553 - ('context', [context_lines])
1554 1554 - ('hunk', [hunk_lines])
1555 1555 - ('range', (-start,len, +start,len, proc))
1556 1556 """
1557 1557 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1558 1558 lr = linereader(fp)
1559 1559
1560 1560 def scanwhile(first, p):
1561 1561 """scan lr while predicate holds"""
1562 1562 lines = [first]
1563 1563 while True:
1564 1564 line = lr.readline()
1565 1565 if not line:
1566 1566 break
1567 1567 if p(line):
1568 1568 lines.append(line)
1569 1569 else:
1570 1570 lr.push(line)
1571 1571 break
1572 1572 return lines
1573 1573
1574 1574 while True:
1575 1575 line = lr.readline()
1576 1576 if not line:
1577 1577 break
1578 1578 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1579 1579 def notheader(line):
1580 1580 s = line.split(None, 1)
1581 1581 return not s or s[0] not in ('---', 'diff')
1582 1582 header = scanwhile(line, notheader)
1583 1583 fromfile = lr.readline()
1584 1584 if fromfile.startswith('---'):
1585 1585 tofile = lr.readline()
1586 1586 header += [fromfile, tofile]
1587 1587 else:
1588 1588 lr.push(fromfile)
1589 1589 yield 'file', header
1590 1590 elif line[0] == ' ':
1591 1591 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1592 1592 elif line[0] in '-+':
1593 1593 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1594 1594 else:
1595 1595 m = lines_re.match(line)
1596 1596 if m:
1597 1597 yield 'range', m.groups()
1598 1598 else:
1599 1599 yield 'other', line
1600 1600
1601 1601 def scangitpatch(lr, firstline):
1602 1602 """
1603 1603 Git patches can emit:
1604 1604 - rename a to b
1605 1605 - change b
1606 1606 - copy a to c
1607 1607 - change c
1608 1608
1609 1609 We cannot apply this sequence as-is, the renamed 'a' could not be
1610 1610 found for it would have been renamed already. And we cannot copy
1611 1611 from 'b' instead because 'b' would have been changed already. So
1612 1612 we scan the git patch for copy and rename commands so we can
1613 1613 perform the copies ahead of time.
1614 1614 """
1615 1615 pos = 0
1616 1616 try:
1617 1617 pos = lr.fp.tell()
1618 1618 fp = lr.fp
1619 1619 except IOError:
1620 1620 fp = cStringIO.StringIO(lr.fp.read())
1621 1621 gitlr = linereader(fp)
1622 1622 gitlr.push(firstline)
1623 1623 gitpatches = readgitpatch(gitlr)
1624 1624 fp.seek(pos)
1625 1625 return gitpatches
1626 1626
1627 1627 def iterhunks(fp):
1628 1628 """Read a patch and yield the following events:
1629 1629 - ("file", afile, bfile, firsthunk): select a new target file.
1630 1630 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1631 1631 "file" event.
1632 1632 - ("git", gitchanges): current diff is in git format, gitchanges
1633 1633 maps filenames to gitpatch records. Unique event.
1634 1634 """
1635 1635 afile = ""
1636 1636 bfile = ""
1637 1637 state = None
1638 1638 hunknum = 0
1639 1639 emitfile = newfile = False
1640 1640 gitpatches = None
1641 1641
1642 1642 # our states
1643 1643 BFILE = 1
1644 1644 context = None
1645 1645 lr = linereader(fp)
1646 1646
1647 1647 while True:
1648 1648 x = lr.readline()
1649 1649 if not x:
1650 1650 break
1651 1651 if state == BFILE and (
1652 1652 (not context and x[0] == '@')
1653 1653 or (context is not False and x.startswith('***************'))
1654 1654 or x.startswith('GIT binary patch')):
1655 1655 gp = None
1656 1656 if (gitpatches and
1657 1657 gitpatches[-1].ispatching(afile, bfile)):
1658 1658 gp = gitpatches.pop()
1659 1659 if x.startswith('GIT binary patch'):
1660 1660 h = binhunk(lr, gp.path)
1661 1661 else:
1662 1662 if context is None and x.startswith('***************'):
1663 1663 context = True
1664 1664 h = hunk(x, hunknum + 1, lr, context)
1665 1665 hunknum += 1
1666 1666 if emitfile:
1667 1667 emitfile = False
1668 1668 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1669 1669 yield 'hunk', h
1670 1670 elif x.startswith('diff --git a/'):
1671 1671 m = gitre.match(x.rstrip(' \r\n'))
1672 1672 if not m:
1673 1673 continue
1674 1674 if gitpatches is None:
1675 1675 # scan whole input for git metadata
1676 1676 gitpatches = scangitpatch(lr, x)
1677 1677 yield 'git', [g.copy() for g in gitpatches
1678 1678 if g.op in ('COPY', 'RENAME')]
1679 1679 gitpatches.reverse()
1680 1680 afile = 'a/' + m.group(1)
1681 1681 bfile = 'b/' + m.group(2)
1682 1682 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1683 1683 gp = gitpatches.pop()
1684 1684 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1685 1685 if not gitpatches:
1686 1686 raise PatchError(_('failed to synchronize metadata for "%s"')
1687 1687 % afile[2:])
1688 1688 gp = gitpatches[-1]
1689 1689 newfile = True
1690 1690 elif x.startswith('---'):
1691 1691 # check for a unified diff
1692 1692 l2 = lr.readline()
1693 1693 if not l2.startswith('+++'):
1694 1694 lr.push(l2)
1695 1695 continue
1696 1696 newfile = True
1697 1697 context = False
1698 1698 afile = parsefilename(x)
1699 1699 bfile = parsefilename(l2)
1700 1700 elif x.startswith('***'):
1701 1701 # check for a context diff
1702 1702 l2 = lr.readline()
1703 1703 if not l2.startswith('---'):
1704 1704 lr.push(l2)
1705 1705 continue
1706 1706 l3 = lr.readline()
1707 1707 lr.push(l3)
1708 1708 if not l3.startswith("***************"):
1709 1709 lr.push(l2)
1710 1710 continue
1711 1711 newfile = True
1712 1712 context = True
1713 1713 afile = parsefilename(x)
1714 1714 bfile = parsefilename(l2)
1715 1715
1716 1716 if newfile:
1717 1717 newfile = False
1718 1718 emitfile = True
1719 1719 state = BFILE
1720 1720 hunknum = 0
1721 1721
1722 1722 while gitpatches:
1723 1723 gp = gitpatches.pop()
1724 1724 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1725 1725
1726 1726 def applybindelta(binchunk, data):
1727 1727 """Apply a binary delta hunk
1728 1728 The algorithm used is the algorithm from git's patch-delta.c
1729 1729 """
1730 1730 def deltahead(binchunk):
1731 1731 i = 0
1732 1732 for c in binchunk:
1733 1733 i += 1
1734 1734 if not (ord(c) & 0x80):
1735 1735 return i
1736 1736 return i
1737 1737 out = ""
1738 1738 s = deltahead(binchunk)
1739 1739 binchunk = binchunk[s:]
1740 1740 s = deltahead(binchunk)
1741 1741 binchunk = binchunk[s:]
1742 1742 i = 0
1743 1743 while i < len(binchunk):
1744 1744 cmd = ord(binchunk[i])
1745 1745 i += 1
1746 1746 if (cmd & 0x80):
1747 1747 offset = 0
1748 1748 size = 0
1749 1749 if (cmd & 0x01):
1750 1750 offset = ord(binchunk[i])
1751 1751 i += 1
1752 1752 if (cmd & 0x02):
1753 1753 offset |= ord(binchunk[i]) << 8
1754 1754 i += 1
1755 1755 if (cmd & 0x04):
1756 1756 offset |= ord(binchunk[i]) << 16
1757 1757 i += 1
1758 1758 if (cmd & 0x08):
1759 1759 offset |= ord(binchunk[i]) << 24
1760 1760 i += 1
1761 1761 if (cmd & 0x10):
1762 1762 size = ord(binchunk[i])
1763 1763 i += 1
1764 1764 if (cmd & 0x20):
1765 1765 size |= ord(binchunk[i]) << 8
1766 1766 i += 1
1767 1767 if (cmd & 0x40):
1768 1768 size |= ord(binchunk[i]) << 16
1769 1769 i += 1
1770 1770 if size == 0:
1771 1771 size = 0x10000
1772 1772 offset_end = offset + size
1773 1773 out += data[offset:offset_end]
1774 1774 elif cmd != 0:
1775 1775 offset_end = i + cmd
1776 1776 out += binchunk[i:offset_end]
1777 1777 i += cmd
1778 1778 else:
1779 1779 raise PatchError(_('unexpected delta opcode 0'))
1780 1780 return out
1781 1781
1782 1782 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1783 1783 """Reads a patch from fp and tries to apply it.
1784 1784
1785 1785 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1786 1786 there was any fuzz.
1787 1787
1788 1788 If 'eolmode' is 'strict', the patch content and patched file are
1789 1789 read in binary mode. Otherwise, line endings are ignored when
1790 1790 patching then normalized according to 'eolmode'.
1791 1791 """
1792 1792 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1793 1793 prefix=prefix, eolmode=eolmode)
1794 1794
1795 1795 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1796 1796 eolmode='strict'):
1797 1797
1798 1798 if prefix:
1799 1799 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1800 1800 prefix)
1801 1801 if prefix != '':
1802 1802 prefix += '/'
1803 1803 def pstrip(p):
1804 1804 return pathtransform(p, strip - 1, prefix)[1]
1805 1805
1806 1806 rejects = 0
1807 1807 err = 0
1808 1808 current_file = None
1809 1809
1810 1810 for state, values in iterhunks(fp):
1811 1811 if state == 'hunk':
1812 1812 if not current_file:
1813 1813 continue
1814 1814 ret = current_file.apply(values)
1815 1815 if ret > 0:
1816 1816 err = 1
1817 1817 elif state == 'file':
1818 1818 if current_file:
1819 1819 rejects += current_file.close()
1820 1820 current_file = None
1821 1821 afile, bfile, first_hunk, gp = values
1822 1822 if gp:
1823 1823 gp.path = pstrip(gp.path)
1824 1824 if gp.oldpath:
1825 1825 gp.oldpath = pstrip(gp.oldpath)
1826 1826 else:
1827 1827 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1828 1828 prefix)
1829 1829 if gp.op == 'RENAME':
1830 1830 backend.unlink(gp.oldpath)
1831 1831 if not first_hunk:
1832 1832 if gp.op == 'DELETE':
1833 1833 backend.unlink(gp.path)
1834 1834 continue
1835 1835 data, mode = None, None
1836 1836 if gp.op in ('RENAME', 'COPY'):
1837 1837 data, mode = store.getfile(gp.oldpath)[:2]
1838 1838 # FIXME: failing getfile has never been handled here
1839 1839 assert data is not None
1840 1840 if gp.mode:
1841 1841 mode = gp.mode
1842 1842 if gp.op == 'ADD':
1843 1843 # Added files without content have no hunk and
1844 1844 # must be created
1845 1845 data = ''
1846 1846 if data or mode:
1847 1847 if (gp.op in ('ADD', 'RENAME', 'COPY')
1848 1848 and backend.exists(gp.path)):
1849 1849 raise PatchError(_("cannot create %s: destination "
1850 1850 "already exists") % gp.path)
1851 1851 backend.setfile(gp.path, data, mode, gp.oldpath)
1852 1852 continue
1853 1853 try:
1854 1854 current_file = patcher(ui, gp, backend, store,
1855 1855 eolmode=eolmode)
1856 1856 except PatchError, inst:
1857 1857 ui.warn(str(inst) + '\n')
1858 1858 current_file = None
1859 1859 rejects += 1
1860 1860 continue
1861 1861 elif state == 'git':
1862 1862 for gp in values:
1863 1863 path = pstrip(gp.oldpath)
1864 1864 data, mode = backend.getfile(path)
1865 1865 if data is None:
1866 1866 # The error ignored here will trigger a getfile()
1867 1867 # error in a place more appropriate for error
1868 1868 # handling, and will not interrupt the patching
1869 1869 # process.
1870 1870 pass
1871 1871 else:
1872 1872 store.setfile(path, data, mode)
1873 1873 else:
1874 1874 raise util.Abort(_('unsupported parser state: %s') % state)
1875 1875
1876 1876 if current_file:
1877 1877 rejects += current_file.close()
1878 1878
1879 1879 if rejects:
1880 1880 return -1
1881 1881 return err
1882 1882
1883 1883 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1884 1884 similarity):
1885 1885 """use <patcher> to apply <patchname> to the working directory.
1886 1886 returns whether patch was applied with fuzz factor."""
1887 1887
1888 1888 fuzz = False
1889 1889 args = []
1890 1890 cwd = repo.root
1891 1891 if cwd:
1892 1892 args.append('-d %s' % util.shellquote(cwd))
1893 1893 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1894 1894 util.shellquote(patchname)))
1895 1895 try:
1896 1896 for line in fp:
1897 1897 line = line.rstrip()
1898 1898 ui.note(line + '\n')
1899 1899 if line.startswith('patching file '):
1900 1900 pf = util.parsepatchoutput(line)
1901 1901 printed_file = False
1902 1902 files.add(pf)
1903 1903 elif line.find('with fuzz') >= 0:
1904 1904 fuzz = True
1905 1905 if not printed_file:
1906 1906 ui.warn(pf + '\n')
1907 1907 printed_file = True
1908 1908 ui.warn(line + '\n')
1909 1909 elif line.find('saving rejects to file') >= 0:
1910 1910 ui.warn(line + '\n')
1911 1911 elif line.find('FAILED') >= 0:
1912 1912 if not printed_file:
1913 1913 ui.warn(pf + '\n')
1914 1914 printed_file = True
1915 1915 ui.warn(line + '\n')
1916 1916 finally:
1917 1917 if files:
1918 1918 scmutil.marktouched(repo, files, similarity)
1919 1919 code = fp.close()
1920 1920 if code:
1921 1921 raise PatchError(_("patch command failed: %s") %
1922 1922 util.explainexit(code)[0])
1923 1923 return fuzz
1924 1924
1925 1925 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1926 1926 eolmode='strict'):
1927 1927 if files is None:
1928 1928 files = set()
1929 1929 if eolmode is None:
1930 1930 eolmode = ui.config('patch', 'eol', 'strict')
1931 1931 if eolmode.lower() not in eolmodes:
1932 1932 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1933 1933 eolmode = eolmode.lower()
1934 1934
1935 1935 store = filestore()
1936 1936 try:
1937 1937 fp = open(patchobj, 'rb')
1938 1938 except TypeError:
1939 1939 fp = patchobj
1940 1940 try:
1941 1941 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1942 1942 eolmode=eolmode)
1943 1943 finally:
1944 1944 if fp != patchobj:
1945 1945 fp.close()
1946 1946 files.update(backend.close())
1947 1947 store.close()
1948 1948 if ret < 0:
1949 1949 raise PatchError(_('patch failed to apply'))
1950 1950 return ret > 0
1951 1951
1952 1952 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1953 1953 eolmode='strict', similarity=0):
1954 1954 """use builtin patch to apply <patchobj> to the working directory.
1955 1955 returns whether patch was applied with fuzz factor."""
1956 1956 backend = workingbackend(ui, repo, similarity)
1957 1957 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1958 1958
1959 1959 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1960 1960 eolmode='strict'):
1961 1961 backend = repobackend(ui, repo, ctx, store)
1962 1962 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1963 1963
1964 1964 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1965 1965 similarity=0):
1966 1966 """Apply <patchname> to the working directory.
1967 1967
1968 1968 'eolmode' specifies how end of lines should be handled. It can be:
1969 1969 - 'strict': inputs are read in binary mode, EOLs are preserved
1970 1970 - 'crlf': EOLs are ignored when patching and reset to CRLF
1971 1971 - 'lf': EOLs are ignored when patching and reset to LF
1972 1972 - None: get it from user settings, default to 'strict'
1973 1973 'eolmode' is ignored when using an external patcher program.
1974 1974
1975 1975 Returns whether patch was applied with fuzz factor.
1976 1976 """
1977 1977 patcher = ui.config('ui', 'patch')
1978 1978 if files is None:
1979 1979 files = set()
1980 1980 if patcher:
1981 1981 return _externalpatch(ui, repo, patcher, patchname, strip,
1982 1982 files, similarity)
1983 1983 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1984 1984 similarity)
1985 1985
1986 1986 def changedfiles(ui, repo, patchpath, strip=1):
1987 1987 backend = fsbackend(ui, repo.root)
1988 1988 fp = open(patchpath, 'rb')
1989 1989 try:
1990 1990 changed = set()
1991 1991 for state, values in iterhunks(fp):
1992 1992 if state == 'file':
1993 1993 afile, bfile, first_hunk, gp = values
1994 1994 if gp:
1995 1995 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1996 1996 if gp.oldpath:
1997 1997 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1998 1998 else:
1999 1999 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2000 2000 '')
2001 2001 changed.add(gp.path)
2002 2002 if gp.op == 'RENAME':
2003 2003 changed.add(gp.oldpath)
2004 2004 elif state not in ('hunk', 'git'):
2005 2005 raise util.Abort(_('unsupported parser state: %s') % state)
2006 2006 return changed
2007 2007 finally:
2008 2008 fp.close()
2009 2009
2010 2010 class GitDiffRequired(Exception):
2011 2011 pass
2012 2012
2013 2013 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2014 2014 '''return diffopts with all features supported and parsed'''
2015 2015 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2016 2016 git=True, whitespace=True, formatchanging=True)
2017 2017
2018 2018 diffopts = diffallopts
2019 2019
2020 2020 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2021 2021 whitespace=False, formatchanging=False):
2022 2022 '''return diffopts with only opted-in features parsed
2023 2023
2024 2024 Features:
2025 2025 - git: git-style diffs
2026 2026 - whitespace: whitespace options like ignoreblanklines and ignorews
2027 2027 - formatchanging: options that will likely break or cause correctness issues
2028 2028 with most diff parsers
2029 2029 '''
2030 2030 def get(key, name=None, getter=ui.configbool, forceplain=None):
2031 2031 if opts:
2032 2032 v = opts.get(key)
2033 2033 if v:
2034 2034 return v
2035 2035 if forceplain is not None and ui.plain():
2036 2036 return forceplain
2037 2037 return getter(section, name or key, None, untrusted=untrusted)
2038 2038
2039 2039 # core options, expected to be understood by every diff parser
2040 2040 buildopts = {
2041 2041 'nodates': get('nodates'),
2042 2042 'showfunc': get('show_function', 'showfunc'),
2043 2043 'context': get('unified', getter=ui.config),
2044 2044 }
2045 2045
2046 2046 if git:
2047 2047 buildopts['git'] = get('git')
2048 2048 if whitespace:
2049 2049 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2050 2050 buildopts['ignorewsamount'] = get('ignore_space_change',
2051 2051 'ignorewsamount')
2052 2052 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2053 2053 'ignoreblanklines')
2054 2054 if formatchanging:
2055 2055 buildopts['text'] = opts and opts.get('text')
2056 2056 buildopts['nobinary'] = get('nobinary')
2057 2057 buildopts['noprefix'] = get('noprefix', forceplain=False)
2058 2058
2059 2059 return mdiff.diffopts(**buildopts)
2060 2060
2061 2061 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2062 2062 losedatafn=None, prefix='', relroot=''):
2063 2063 '''yields diff of changes to files between two nodes, or node and
2064 2064 working directory.
2065 2065
2066 2066 if node1 is None, use first dirstate parent instead.
2067 2067 if node2 is None, compare node1 with working directory.
2068 2068
2069 2069 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2070 2070 every time some change cannot be represented with the current
2071 2071 patch format. Return False to upgrade to git patch format, True to
2072 2072 accept the loss or raise an exception to abort the diff. It is
2073 2073 called with the name of current file being diffed as 'fn'. If set
2074 2074 to None, patches will always be upgraded to git format when
2075 2075 necessary.
2076 2076
2077 2077 prefix is a filename prefix that is prepended to all filenames on
2078 2078 display (used for subrepos).
2079 2079
2080 2080 relroot, if not empty, must be normalized with a trailing /. Any match
2081 2081 patterns that fall outside it will be ignored.'''
2082 2082
2083 2083 if opts is None:
2084 2084 opts = mdiff.defaultopts
2085 2085
2086 2086 if not node1 and not node2:
2087 2087 node1 = repo.dirstate.p1()
2088 2088
2089 2089 def lrugetfilectx():
2090 2090 cache = {}
2091 2091 order = util.deque()
2092 2092 def getfilectx(f, ctx):
2093 2093 fctx = ctx.filectx(f, filelog=cache.get(f))
2094 2094 if f not in cache:
2095 2095 if len(cache) > 20:
2096 2096 del cache[order.popleft()]
2097 2097 cache[f] = fctx.filelog()
2098 2098 else:
2099 2099 order.remove(f)
2100 2100 order.append(f)
2101 2101 return fctx
2102 2102 return getfilectx
2103 2103 getfilectx = lrugetfilectx()
2104 2104
2105 2105 ctx1 = repo[node1]
2106 2106 ctx2 = repo[node2]
2107 2107
2108 2108 relfiltered = False
2109 2109 if relroot != '' and match.always():
2110 2110 # as a special case, create a new matcher with just the relroot
2111 2111 pats = [relroot]
2112 2112 match = scmutil.match(ctx2, pats, default='path')
2113 2113 relfiltered = True
2114 2114
2115 2115 if not changes:
2116 2116 changes = repo.status(ctx1, ctx2, match=match)
2117 2117 modified, added, removed = changes[:3]
2118 2118
2119 2119 if not modified and not added and not removed:
2120 2120 return []
2121 2121
2122 2122 if repo.ui.debugflag:
2123 2123 hexfunc = hex
2124 2124 else:
2125 2125 hexfunc = short
2126 2126 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2127 2127
2128 2128 copy = {}
2129 2129 if opts.git or opts.upgrade:
2130 copy = copies.pathcopies(ctx1, ctx2)
2130 copy = copies.pathcopies(ctx1, ctx2, match=match)
2131 2131
2132 2132 if relroot is not None:
2133 2133 if not relfiltered:
2134 2134 # XXX this would ideally be done in the matcher, but that is
2135 2135 # generally meant to 'or' patterns, not 'and' them. In this case we
2136 2136 # need to 'and' all the patterns from the matcher with relroot.
2137 2137 def filterrel(l):
2138 2138 return [f for f in l if f.startswith(relroot)]
2139 2139 modified = filterrel(modified)
2140 2140 added = filterrel(added)
2141 2141 removed = filterrel(removed)
2142 2142 relfiltered = True
2143 2143 # filter out copies where either side isn't inside the relative root
2144 2144 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2145 2145 if dst.startswith(relroot)
2146 2146 and src.startswith(relroot)))
2147 2147
2148 2148 def difffn(opts, losedata):
2149 2149 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2150 2150 copy, getfilectx, opts, losedata, prefix, relroot)
2151 2151 if opts.upgrade and not opts.git:
2152 2152 try:
2153 2153 def losedata(fn):
2154 2154 if not losedatafn or not losedatafn(fn=fn):
2155 2155 raise GitDiffRequired
2156 2156 # Buffer the whole output until we are sure it can be generated
2157 2157 return list(difffn(opts.copy(git=False), losedata))
2158 2158 except GitDiffRequired:
2159 2159 return difffn(opts.copy(git=True), None)
2160 2160 else:
2161 2161 return difffn(opts, None)
2162 2162
2163 2163 def difflabel(func, *args, **kw):
2164 2164 '''yields 2-tuples of (output, label) based on the output of func()'''
2165 2165 headprefixes = [('diff', 'diff.diffline'),
2166 2166 ('copy', 'diff.extended'),
2167 2167 ('rename', 'diff.extended'),
2168 2168 ('old', 'diff.extended'),
2169 2169 ('new', 'diff.extended'),
2170 2170 ('deleted', 'diff.extended'),
2171 2171 ('---', 'diff.file_a'),
2172 2172 ('+++', 'diff.file_b')]
2173 2173 textprefixes = [('@', 'diff.hunk'),
2174 2174 ('-', 'diff.deleted'),
2175 2175 ('+', 'diff.inserted')]
2176 2176 head = False
2177 2177 for chunk in func(*args, **kw):
2178 2178 lines = chunk.split('\n')
2179 2179 for i, line in enumerate(lines):
2180 2180 if i != 0:
2181 2181 yield ('\n', '')
2182 2182 if head:
2183 2183 if line.startswith('@'):
2184 2184 head = False
2185 2185 else:
2186 2186 if line and line[0] not in ' +-@\\':
2187 2187 head = True
2188 2188 stripline = line
2189 2189 diffline = False
2190 2190 if not head and line and line[0] in '+-':
2191 2191 # highlight tabs and trailing whitespace, but only in
2192 2192 # changed lines
2193 2193 stripline = line.rstrip()
2194 2194 diffline = True
2195 2195
2196 2196 prefixes = textprefixes
2197 2197 if head:
2198 2198 prefixes = headprefixes
2199 2199 for prefix, label in prefixes:
2200 2200 if stripline.startswith(prefix):
2201 2201 if diffline:
2202 2202 for token in tabsplitter.findall(stripline):
2203 2203 if '\t' == token[0]:
2204 2204 yield (token, 'diff.tab')
2205 2205 else:
2206 2206 yield (token, label)
2207 2207 else:
2208 2208 yield (stripline, label)
2209 2209 break
2210 2210 else:
2211 2211 yield (line, '')
2212 2212 if line != stripline:
2213 2213 yield (line[len(stripline):], 'diff.trailingwhitespace')
2214 2214
2215 2215 def diffui(*args, **kw):
2216 2216 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2217 2217 return difflabel(diff, *args, **kw)
2218 2218
2219 2219 def _filepairs(ctx1, modified, added, removed, copy, opts):
2220 2220 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2221 2221 before and f2 is the the name after. For added files, f1 will be None,
2222 2222 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2223 2223 or 'rename' (the latter two only if opts.git is set).'''
2224 2224 gone = set()
2225 2225
2226 2226 copyto = dict([(v, k) for k, v in copy.items()])
2227 2227
2228 2228 addedset, removedset = set(added), set(removed)
2229 2229 # Fix up added, since merged-in additions appear as
2230 2230 # modifications during merges
2231 2231 for f in modified:
2232 2232 if f not in ctx1:
2233 2233 addedset.add(f)
2234 2234
2235 2235 for f in sorted(modified + added + removed):
2236 2236 copyop = None
2237 2237 f1, f2 = f, f
2238 2238 if f in addedset:
2239 2239 f1 = None
2240 2240 if f in copy:
2241 2241 if opts.git:
2242 2242 f1 = copy[f]
2243 2243 if f1 in removedset and f1 not in gone:
2244 2244 copyop = 'rename'
2245 2245 gone.add(f1)
2246 2246 else:
2247 2247 copyop = 'copy'
2248 2248 elif f in removedset:
2249 2249 f2 = None
2250 2250 if opts.git:
2251 2251 # have we already reported a copy above?
2252 2252 if (f in copyto and copyto[f] in addedset
2253 2253 and copy[copyto[f]] == f):
2254 2254 continue
2255 2255 yield f1, f2, copyop
2256 2256
2257 2257 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2258 2258 copy, getfilectx, opts, losedatafn, prefix, relroot):
2259 2259 '''given input data, generate a diff and yield it in blocks
2260 2260
2261 2261 If generating a diff would lose data like flags or binary data and
2262 2262 losedatafn is not None, it will be called.
2263 2263
2264 2264 relroot is removed and prefix is added to every path in the diff output.
2265 2265
2266 2266 If relroot is not empty, this function expects every path in modified,
2267 2267 added, removed and copy to start with it.'''
2268 2268
2269 2269 def gitindex(text):
2270 2270 if not text:
2271 2271 text = ""
2272 2272 l = len(text)
2273 2273 s = util.sha1('blob %d\0' % l)
2274 2274 s.update(text)
2275 2275 return s.hexdigest()
2276 2276
2277 2277 if opts.noprefix:
2278 2278 aprefix = bprefix = ''
2279 2279 else:
2280 2280 aprefix = 'a/'
2281 2281 bprefix = 'b/'
2282 2282
2283 2283 def diffline(f, revs):
2284 2284 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2285 2285 return 'diff %s %s' % (revinfo, f)
2286 2286
2287 2287 date1 = util.datestr(ctx1.date())
2288 2288 date2 = util.datestr(ctx2.date())
2289 2289
2290 2290 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2291 2291
2292 2292 if relroot != '' and (repo.ui.configbool('devel', 'all')
2293 2293 or repo.ui.configbool('devel', 'check-relroot')):
2294 2294 for f in modified + added + removed + copy.keys() + copy.values():
2295 2295 if f is not None and not f.startswith(relroot):
2296 2296 raise AssertionError(
2297 2297 "file %s doesn't start with relroot %s" % (f, relroot))
2298 2298
2299 2299 for f1, f2, copyop in _filepairs(
2300 2300 ctx1, modified, added, removed, copy, opts):
2301 2301 content1 = None
2302 2302 content2 = None
2303 2303 flag1 = None
2304 2304 flag2 = None
2305 2305 if f1:
2306 2306 content1 = getfilectx(f1, ctx1).data()
2307 2307 if opts.git or losedatafn:
2308 2308 flag1 = ctx1.flags(f1)
2309 2309 if f2:
2310 2310 content2 = getfilectx(f2, ctx2).data()
2311 2311 if opts.git or losedatafn:
2312 2312 flag2 = ctx2.flags(f2)
2313 2313 binary = False
2314 2314 if opts.git or losedatafn:
2315 2315 binary = util.binary(content1) or util.binary(content2)
2316 2316
2317 2317 if losedatafn and not opts.git:
2318 2318 if (binary or
2319 2319 # copy/rename
2320 2320 f2 in copy or
2321 2321 # empty file creation
2322 2322 (not f1 and not content2) or
2323 2323 # empty file deletion
2324 2324 (not content1 and not f2) or
2325 2325 # create with flags
2326 2326 (not f1 and flag2) or
2327 2327 # change flags
2328 2328 (f1 and f2 and flag1 != flag2)):
2329 2329 losedatafn(f2 or f1)
2330 2330
2331 2331 path1 = f1 or f2
2332 2332 path2 = f2 or f1
2333 2333 path1 = posixpath.join(prefix, path1[len(relroot):])
2334 2334 path2 = posixpath.join(prefix, path2[len(relroot):])
2335 2335 header = []
2336 2336 if opts.git:
2337 2337 header.append('diff --git %s%s %s%s' %
2338 2338 (aprefix, path1, bprefix, path2))
2339 2339 if not f1: # added
2340 2340 header.append('new file mode %s' % gitmode[flag2])
2341 2341 elif not f2: # removed
2342 2342 header.append('deleted file mode %s' % gitmode[flag1])
2343 2343 else: # modified/copied/renamed
2344 2344 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2345 2345 if mode1 != mode2:
2346 2346 header.append('old mode %s' % mode1)
2347 2347 header.append('new mode %s' % mode2)
2348 2348 if copyop is not None:
2349 2349 header.append('%s from %s' % (copyop, path1))
2350 2350 header.append('%s to %s' % (copyop, path2))
2351 2351 elif revs and not repo.ui.quiet:
2352 2352 header.append(diffline(path1, revs))
2353 2353
2354 2354 if binary and opts.git and not opts.nobinary:
2355 2355 text = mdiff.b85diff(content1, content2)
2356 2356 if text:
2357 2357 header.append('index %s..%s' %
2358 2358 (gitindex(content1), gitindex(content2)))
2359 2359 else:
2360 2360 text = mdiff.unidiff(content1, date1,
2361 2361 content2, date2,
2362 2362 path1, path2, opts=opts)
2363 2363 if header and (text or len(header) > 1):
2364 2364 yield '\n'.join(header) + '\n'
2365 2365 if text:
2366 2366 yield text
2367 2367
2368 2368 def diffstatsum(stats):
2369 2369 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2370 2370 for f, a, r, b in stats:
2371 2371 maxfile = max(maxfile, encoding.colwidth(f))
2372 2372 maxtotal = max(maxtotal, a + r)
2373 2373 addtotal += a
2374 2374 removetotal += r
2375 2375 binary = binary or b
2376 2376
2377 2377 return maxfile, maxtotal, addtotal, removetotal, binary
2378 2378
2379 2379 def diffstatdata(lines):
2380 2380 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2381 2381
2382 2382 results = []
2383 2383 filename, adds, removes, isbinary = None, 0, 0, False
2384 2384
2385 2385 def addresult():
2386 2386 if filename:
2387 2387 results.append((filename, adds, removes, isbinary))
2388 2388
2389 2389 for line in lines:
2390 2390 if line.startswith('diff'):
2391 2391 addresult()
2392 2392 # set numbers to 0 anyway when starting new file
2393 2393 adds, removes, isbinary = 0, 0, False
2394 2394 if line.startswith('diff --git a/'):
2395 2395 filename = gitre.search(line).group(2)
2396 2396 elif line.startswith('diff -r'):
2397 2397 # format: "diff -r ... -r ... filename"
2398 2398 filename = diffre.search(line).group(1)
2399 2399 elif line.startswith('+') and not line.startswith('+++ '):
2400 2400 adds += 1
2401 2401 elif line.startswith('-') and not line.startswith('--- '):
2402 2402 removes += 1
2403 2403 elif (line.startswith('GIT binary patch') or
2404 2404 line.startswith('Binary file')):
2405 2405 isbinary = True
2406 2406 addresult()
2407 2407 return results
2408 2408
2409 2409 def diffstat(lines, width=80, git=False):
2410 2410 output = []
2411 2411 stats = diffstatdata(lines)
2412 2412 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2413 2413
2414 2414 countwidth = len(str(maxtotal))
2415 2415 if hasbinary and countwidth < 3:
2416 2416 countwidth = 3
2417 2417 graphwidth = width - countwidth - maxname - 6
2418 2418 if graphwidth < 10:
2419 2419 graphwidth = 10
2420 2420
2421 2421 def scale(i):
2422 2422 if maxtotal <= graphwidth:
2423 2423 return i
2424 2424 # If diffstat runs out of room it doesn't print anything,
2425 2425 # which isn't very useful, so always print at least one + or -
2426 2426 # if there were at least some changes.
2427 2427 return max(i * graphwidth // maxtotal, int(bool(i)))
2428 2428
2429 2429 for filename, adds, removes, isbinary in stats:
2430 2430 if isbinary:
2431 2431 count = 'Bin'
2432 2432 else:
2433 2433 count = adds + removes
2434 2434 pluses = '+' * scale(adds)
2435 2435 minuses = '-' * scale(removes)
2436 2436 output.append(' %s%s | %*s %s%s\n' %
2437 2437 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2438 2438 countwidth, count, pluses, minuses))
2439 2439
2440 2440 if stats:
2441 2441 output.append(_(' %d files changed, %d insertions(+), '
2442 2442 '%d deletions(-)\n')
2443 2443 % (len(stats), totaladds, totalremoves))
2444 2444
2445 2445 return ''.join(output)
2446 2446
2447 2447 def diffstatui(*args, **kw):
2448 2448 '''like diffstat(), but yields 2-tuples of (output, label) for
2449 2449 ui.write()
2450 2450 '''
2451 2451
2452 2452 for line in diffstat(*args, **kw).splitlines():
2453 2453 if line and line[-1] in '+-':
2454 2454 name, graph = line.rsplit(' ', 1)
2455 2455 yield (name + ' ', '')
2456 2456 m = re.search(r'\++', graph)
2457 2457 if m:
2458 2458 yield (m.group(0), 'diffstat.inserted')
2459 2459 m = re.search(r'-+', graph)
2460 2460 if m:
2461 2461 yield (m.group(0), 'diffstat.deleted')
2462 2462 else:
2463 2463 yield (line, '')
2464 2464 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now