##// END OF EJS Templates
patch: pass in context objects into diffhunks() (API)...
Martin von Zweigbergk -
r41767:e834f6f6 default
parent child Browse files
Show More
@@ -1,815 +1,813 b''
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import difflib
13 13 import os
14 14 import re
15 15
16 16 from ..i18n import _
17 17 from ..node import hex, nullid, short
18 18
19 19 from .common import (
20 20 ErrorResponse,
21 21 HTTP_BAD_REQUEST,
22 22 HTTP_NOT_FOUND,
23 23 paritygen,
24 24 )
25 25
26 26 from .. import (
27 27 context,
28 28 diffutil,
29 29 error,
30 30 match,
31 31 mdiff,
32 32 obsutil,
33 33 patch,
34 34 pathutil,
35 35 pycompat,
36 36 scmutil,
37 37 templatefilters,
38 38 templatekw,
39 39 templateutil,
40 40 ui as uimod,
41 41 util,
42 42 )
43 43
44 44 from ..utils import (
45 45 stringutil,
46 46 )
47 47
48 48 archivespecs = util.sortdict((
49 49 ('zip', ('application/zip', 'zip', '.zip', None)),
50 50 ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
51 51 ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
52 52 ))
53 53
54 54 def archivelist(ui, nodeid, url=None):
55 55 allowed = ui.configlist('web', 'allow-archive', untrusted=True)
56 56 archives = []
57 57
58 58 for typ, spec in archivespecs.iteritems():
59 59 if typ in allowed or ui.configbool('web', 'allow' + typ,
60 60 untrusted=True):
61 61 archives.append({
62 62 'type': typ,
63 63 'extension': spec[2],
64 64 'node': nodeid,
65 65 'url': url,
66 66 })
67 67
68 68 return templateutil.mappinglist(archives)
69 69
70 70 def up(p):
71 71 if p[0:1] != "/":
72 72 p = "/" + p
73 73 if p[-1:] == "/":
74 74 p = p[:-1]
75 75 up = os.path.dirname(p)
76 76 if up == "/":
77 77 return "/"
78 78 return up + "/"
79 79
80 80 def _navseq(step, firststep=None):
81 81 if firststep:
82 82 yield firststep
83 83 if firststep >= 20 and firststep <= 40:
84 84 firststep = 50
85 85 yield firststep
86 86 assert step > 0
87 87 assert firststep > 0
88 88 while step <= firststep:
89 89 step *= 10
90 90 while True:
91 91 yield 1 * step
92 92 yield 3 * step
93 93 step *= 10
94 94
95 95 class revnav(object):
96 96
97 97 def __init__(self, repo):
98 98 """Navigation generation object
99 99
100 100 :repo: repo object we generate nav for
101 101 """
102 102 # used for hex generation
103 103 self._revlog = repo.changelog
104 104
105 105 def __nonzero__(self):
106 106 """return True if any revision to navigate over"""
107 107 return self._first() is not None
108 108
109 109 __bool__ = __nonzero__
110 110
111 111 def _first(self):
112 112 """return the minimum non-filtered changeset or None"""
113 113 try:
114 114 return next(iter(self._revlog))
115 115 except StopIteration:
116 116 return None
117 117
118 118 def hex(self, rev):
119 119 return hex(self._revlog.node(rev))
120 120
121 121 def gen(self, pos, pagelen, limit):
122 122 """computes label and revision id for navigation link
123 123
124 124 :pos: is the revision relative to which we generate navigation.
125 125 :pagelen: the size of each navigation page
126 126 :limit: how far shall we link
127 127
128 128 The return is:
129 129 - a single element mappinglist
130 130 - containing a dictionary with a `before` and `after` key
131 131 - values are dictionaries with `label` and `node` keys
132 132 """
133 133 if not self:
134 134 # empty repo
135 135 return templateutil.mappinglist([
136 136 {'before': templateutil.mappinglist([]),
137 137 'after': templateutil.mappinglist([])},
138 138 ])
139 139
140 140 targets = []
141 141 for f in _navseq(1, pagelen):
142 142 if f > limit:
143 143 break
144 144 targets.append(pos + f)
145 145 targets.append(pos - f)
146 146 targets.sort()
147 147
148 148 first = self._first()
149 149 navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}]
150 150 navafter = []
151 151 for rev in targets:
152 152 if rev not in self._revlog:
153 153 continue
154 154 if pos < rev < limit:
155 155 navafter.append({'label': '+%d' % abs(rev - pos),
156 156 'node': self.hex(rev)})
157 157 if 0 < rev < pos:
158 158 navbefore.append({'label': '-%d' % abs(rev - pos),
159 159 'node': self.hex(rev)})
160 160
161 161 navafter.append({'label': 'tip', 'node': 'tip'})
162 162
163 163 # TODO: maybe this can be a scalar object supporting tomap()
164 164 return templateutil.mappinglist([
165 165 {'before': templateutil.mappinglist(navbefore),
166 166 'after': templateutil.mappinglist(navafter)},
167 167 ])
168 168
169 169 class filerevnav(revnav):
170 170
171 171 def __init__(self, repo, path):
172 172 """Navigation generation object
173 173
174 174 :repo: repo object we generate nav for
175 175 :path: path of the file we generate nav for
176 176 """
177 177 # used for iteration
178 178 self._changelog = repo.unfiltered().changelog
179 179 # used for hex generation
180 180 self._revlog = repo.file(path)
181 181
182 182 def hex(self, rev):
183 183 return hex(self._changelog.node(self._revlog.linkrev(rev)))
184 184
185 185 # TODO: maybe this can be a wrapper class for changectx/filectx list, which
186 186 # yields {'ctx': ctx}
187 187 def _ctxsgen(context, ctxs):
188 188 for s in ctxs:
189 189 d = {
190 190 'node': s.hex(),
191 191 'rev': s.rev(),
192 192 'user': s.user(),
193 193 'date': s.date(),
194 194 'description': s.description(),
195 195 'branch': s.branch(),
196 196 }
197 197 if util.safehasattr(s, 'path'):
198 198 d['file'] = s.path()
199 199 yield d
200 200
201 201 def _siblings(siblings=None, hiderev=None):
202 202 if siblings is None:
203 203 siblings = []
204 204 siblings = [s for s in siblings if s.node() != nullid]
205 205 if len(siblings) == 1 and siblings[0].rev() == hiderev:
206 206 siblings = []
207 207 return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
208 208
209 209 def difffeatureopts(req, ui, section):
210 210 diffopts = diffutil.difffeatureopts(ui, untrusted=True,
211 211 section=section, whitespace=True)
212 212
213 213 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
214 214 v = req.qsparams.get(k)
215 215 if v is not None:
216 216 v = stringutil.parsebool(v)
217 217 setattr(diffopts, k, v if v is not None else True)
218 218
219 219 return diffopts
220 220
221 221 def annotate(req, fctx, ui):
222 222 diffopts = difffeatureopts(req, ui, 'annotate')
223 223 return fctx.annotate(follow=True, diffopts=diffopts)
224 224
225 225 def parents(ctx, hide=None):
226 226 if isinstance(ctx, context.basefilectx):
227 227 introrev = ctx.introrev()
228 228 if ctx.changectx().rev() != introrev:
229 229 return _siblings([ctx.repo()[introrev]], hide)
230 230 return _siblings(ctx.parents(), hide)
231 231
232 232 def children(ctx, hide=None):
233 233 return _siblings(ctx.children(), hide)
234 234
235 235 def renamelink(fctx):
236 236 r = fctx.renamed()
237 237 if r:
238 238 return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}])
239 239 return templateutil.mappinglist([])
240 240
241 241 def nodetagsdict(repo, node):
242 242 return templateutil.hybridlist(repo.nodetags(node), name='name')
243 243
244 244 def nodebookmarksdict(repo, node):
245 245 return templateutil.hybridlist(repo.nodebookmarks(node), name='name')
246 246
247 247 def nodebranchdict(repo, ctx):
248 248 branches = []
249 249 branch = ctx.branch()
250 250 # If this is an empty repo, ctx.node() == nullid,
251 251 # ctx.branch() == 'default'.
252 252 try:
253 253 branchnode = repo.branchtip(branch)
254 254 except error.RepoLookupError:
255 255 branchnode = None
256 256 if branchnode == ctx.node():
257 257 branches.append(branch)
258 258 return templateutil.hybridlist(branches, name='name')
259 259
260 260 def nodeinbranch(repo, ctx):
261 261 branches = []
262 262 branch = ctx.branch()
263 263 try:
264 264 branchnode = repo.branchtip(branch)
265 265 except error.RepoLookupError:
266 266 branchnode = None
267 267 if branch != 'default' and branchnode != ctx.node():
268 268 branches.append(branch)
269 269 return templateutil.hybridlist(branches, name='name')
270 270
271 271 def nodebranchnodefault(ctx):
272 272 branches = []
273 273 branch = ctx.branch()
274 274 if branch != 'default':
275 275 branches.append(branch)
276 276 return templateutil.hybridlist(branches, name='name')
277 277
278 278 def _nodenamesgen(context, f, node, name):
279 279 for t in f(node):
280 280 yield {name: t}
281 281
282 282 def showtag(repo, t1, node=nullid):
283 283 args = (repo.nodetags, node, 'tag')
284 284 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
285 285
286 286 def showbookmark(repo, t1, node=nullid):
287 287 args = (repo.nodebookmarks, node, 'bookmark')
288 288 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
289 289
290 290 def branchentries(repo, stripecount, limit=0):
291 291 tips = []
292 292 heads = repo.heads()
293 293 parity = paritygen(stripecount)
294 294 sortkey = lambda item: (not item[1], item[0].rev())
295 295
296 296 def entries(context):
297 297 count = 0
298 298 if not tips:
299 299 for tag, hs, tip, closed in repo.branchmap().iterbranches():
300 300 tips.append((repo[tip], closed))
301 301 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
302 302 if limit > 0 and count >= limit:
303 303 return
304 304 count += 1
305 305 if closed:
306 306 status = 'closed'
307 307 elif ctx.node() not in heads:
308 308 status = 'inactive'
309 309 else:
310 310 status = 'open'
311 311 yield {
312 312 'parity': next(parity),
313 313 'branch': ctx.branch(),
314 314 'status': status,
315 315 'node': ctx.hex(),
316 316 'date': ctx.date()
317 317 }
318 318
319 319 return templateutil.mappinggenerator(entries)
320 320
321 321 def cleanpath(repo, path):
322 322 path = path.lstrip('/')
323 323 auditor = pathutil.pathauditor(repo.root, realfs=False)
324 324 return pathutil.canonpath(repo.root, '', path, auditor=auditor)
325 325
326 326 def changectx(repo, req):
327 327 changeid = "tip"
328 328 if 'node' in req.qsparams:
329 329 changeid = req.qsparams['node']
330 330 ipos = changeid.find(':')
331 331 if ipos != -1:
332 332 changeid = changeid[(ipos + 1):]
333 333
334 334 return scmutil.revsymbol(repo, changeid)
335 335
336 336 def basechangectx(repo, req):
337 337 if 'node' in req.qsparams:
338 338 changeid = req.qsparams['node']
339 339 ipos = changeid.find(':')
340 340 if ipos != -1:
341 341 changeid = changeid[:ipos]
342 342 return scmutil.revsymbol(repo, changeid)
343 343
344 344 return None
345 345
346 346 def filectx(repo, req):
347 347 if 'file' not in req.qsparams:
348 348 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
349 349 path = cleanpath(repo, req.qsparams['file'])
350 350 if 'node' in req.qsparams:
351 351 changeid = req.qsparams['node']
352 352 elif 'filenode' in req.qsparams:
353 353 changeid = req.qsparams['filenode']
354 354 else:
355 355 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
356 356 try:
357 357 fctx = scmutil.revsymbol(repo, changeid)[path]
358 358 except error.RepoError:
359 359 fctx = repo.filectx(path, fileid=changeid)
360 360
361 361 return fctx
362 362
363 363 def linerange(req):
364 364 linerange = req.qsparams.getall('linerange')
365 365 if not linerange:
366 366 return None
367 367 if len(linerange) > 1:
368 368 raise ErrorResponse(HTTP_BAD_REQUEST,
369 369 'redundant linerange parameter')
370 370 try:
371 371 fromline, toline = map(int, linerange[0].split(':', 1))
372 372 except ValueError:
373 373 raise ErrorResponse(HTTP_BAD_REQUEST,
374 374 'invalid linerange parameter')
375 375 try:
376 376 return util.processlinerange(fromline, toline)
377 377 except error.ParseError as exc:
378 378 raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
379 379
380 380 def formatlinerange(fromline, toline):
381 381 return '%d:%d' % (fromline + 1, toline)
382 382
383 383 def _succsandmarkersgen(context, mapping):
384 384 repo = context.resource(mapping, 'repo')
385 385 itemmappings = templatekw.showsuccsandmarkers(context, mapping)
386 386 for item in itemmappings.tovalue(context, mapping):
387 387 item['successors'] = _siblings(repo[successor]
388 388 for successor in item['successors'])
389 389 yield item
390 390
391 391 def succsandmarkers(context, mapping):
392 392 return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,))
393 393
394 394 # teach templater succsandmarkers is switched to (context, mapping) API
395 395 succsandmarkers._requires = {'repo', 'ctx'}
396 396
397 397 def _whyunstablegen(context, mapping):
398 398 repo = context.resource(mapping, 'repo')
399 399 ctx = context.resource(mapping, 'ctx')
400 400
401 401 entries = obsutil.whyunstable(repo, ctx)
402 402 for entry in entries:
403 403 if entry.get('divergentnodes'):
404 404 entry['divergentnodes'] = _siblings(entry['divergentnodes'])
405 405 yield entry
406 406
407 407 def whyunstable(context, mapping):
408 408 return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,))
409 409
410 410 whyunstable._requires = {'repo', 'ctx'}
411 411
412 412 # helper to mark a function as a new-style template keyword; can be removed
413 413 # once old-style function gets unsupported and new-style becomes the default
414 414 def _kwfunc(f):
415 415 f._requires = ()
416 416 return f
417 417
418 418 def commonentry(repo, ctx):
419 419 node = scmutil.binnode(ctx)
420 420 return {
421 421 # TODO: perhaps ctx.changectx() should be assigned if ctx is a
422 422 # filectx, but I'm not pretty sure if that would always work because
423 423 # fctx.parents() != fctx.changectx.parents() for example.
424 424 'ctx': ctx,
425 425 'rev': ctx.rev(),
426 426 'node': hex(node),
427 427 'author': ctx.user(),
428 428 'desc': ctx.description(),
429 429 'date': ctx.date(),
430 430 'extra': ctx.extra(),
431 431 'phase': ctx.phasestr(),
432 432 'obsolete': ctx.obsolete(),
433 433 'succsandmarkers': succsandmarkers,
434 434 'instabilities': templateutil.hybridlist(ctx.instabilities(),
435 435 name='instability'),
436 436 'whyunstable': whyunstable,
437 437 'branch': nodebranchnodefault(ctx),
438 438 'inbranch': nodeinbranch(repo, ctx),
439 439 'branches': nodebranchdict(repo, ctx),
440 440 'tags': nodetagsdict(repo, node),
441 441 'bookmarks': nodebookmarksdict(repo, node),
442 442 'parent': _kwfunc(lambda context, mapping: parents(ctx)),
443 443 'child': _kwfunc(lambda context, mapping: children(ctx)),
444 444 }
445 445
446 446 def changelistentry(web, ctx):
447 447 '''Obtain a dictionary to be used for entries in a changelist.
448 448
449 449 This function is called when producing items for the "entries" list passed
450 450 to the "shortlog" and "changelog" templates.
451 451 '''
452 452 repo = web.repo
453 453 rev = ctx.rev()
454 454 n = scmutil.binnode(ctx)
455 455 showtags = showtag(repo, 'changelogtag', n)
456 456 files = listfilediffs(ctx.files(), n, web.maxfiles)
457 457
458 458 entry = commonentry(repo, ctx)
459 459 entry.update({
460 460 'allparents': _kwfunc(lambda context, mapping: parents(ctx)),
461 461 'parent': _kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
462 462 'child': _kwfunc(lambda context, mapping: children(ctx, rev + 1)),
463 463 'changelogtag': showtags,
464 464 'files': files,
465 465 })
466 466 return entry
467 467
468 468 def changelistentries(web, revs, maxcount, parityfn):
469 469 """Emit up to N records for an iterable of revisions."""
470 470 repo = web.repo
471 471
472 472 count = 0
473 473 for rev in revs:
474 474 if count >= maxcount:
475 475 break
476 476
477 477 count += 1
478 478
479 479 entry = changelistentry(web, repo[rev])
480 480 entry['parity'] = next(parityfn)
481 481
482 482 yield entry
483 483
484 484 def symrevorshortnode(req, ctx):
485 485 if 'node' in req.qsparams:
486 486 return templatefilters.revescape(req.qsparams['node'])
487 487 else:
488 488 return short(scmutil.binnode(ctx))
489 489
490 490 def _listfilesgen(context, ctx, stripecount):
491 491 parity = paritygen(stripecount)
492 492 for blockno, f in enumerate(ctx.files()):
493 493 template = 'filenodelink' if f in ctx else 'filenolink'
494 494 yield context.process(template, {
495 495 'node': ctx.hex(),
496 496 'file': f,
497 497 'blockno': blockno + 1,
498 498 'parity': next(parity),
499 499 })
500 500
501 501 def changesetentry(web, ctx):
502 502 '''Obtain a dictionary to be used to render the "changeset" template.'''
503 503
504 504 showtags = showtag(web.repo, 'changesettag', scmutil.binnode(ctx))
505 505 showbookmarks = showbookmark(web.repo, 'changesetbookmark',
506 506 scmutil.binnode(ctx))
507 507 showbranch = nodebranchnodefault(ctx)
508 508
509 509 basectx = basechangectx(web.repo, web.req)
510 510 if basectx is None:
511 511 basectx = ctx.p1()
512 512
513 513 style = web.config('web', 'style')
514 514 if 'style' in web.req.qsparams:
515 515 style = web.req.qsparams['style']
516 516
517 517 diff = diffs(web, ctx, basectx, None, style)
518 518
519 519 parity = paritygen(web.stripecount)
520 520 diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx)
521 521 diffstats = diffstat(ctx, diffstatsgen, parity)
522 522
523 523 return dict(
524 524 diff=diff,
525 525 symrev=symrevorshortnode(web.req, ctx),
526 526 basenode=basectx.hex(),
527 527 changesettag=showtags,
528 528 changesetbookmark=showbookmarks,
529 529 changesetbranch=showbranch,
530 530 files=templateutil.mappedgenerator(_listfilesgen,
531 531 args=(ctx, web.stripecount)),
532 532 diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
533 533 diffstat=diffstats,
534 534 archives=web.archivelist(ctx.hex()),
535 535 **pycompat.strkwargs(commonentry(web.repo, ctx)))
536 536
537 537 def _listfilediffsgen(context, files, node, max):
538 538 for f in files[:max]:
539 539 yield context.process('filedifflink', {'node': hex(node), 'file': f})
540 540 if len(files) > max:
541 541 yield context.process('fileellipses', {})
542 542
543 543 def listfilediffs(files, node, max):
544 544 return templateutil.mappedgenerator(_listfilediffsgen,
545 545 args=(files, node, max))
546 546
547 547 def _prettyprintdifflines(context, lines, blockno, lineidprefix):
548 548 for lineno, l in enumerate(lines, 1):
549 549 difflineno = "%d.%d" % (blockno, lineno)
550 550 if l.startswith('+'):
551 551 ltype = "difflineplus"
552 552 elif l.startswith('-'):
553 553 ltype = "difflineminus"
554 554 elif l.startswith('@'):
555 555 ltype = "difflineat"
556 556 else:
557 557 ltype = "diffline"
558 558 yield context.process(ltype, {
559 559 'line': l,
560 560 'lineno': lineno,
561 561 'lineid': lineidprefix + "l%s" % difflineno,
562 562 'linenumber': "% 8s" % difflineno,
563 563 })
564 564
565 565 def _diffsgen(context, repo, ctx, basectx, files, style, stripecount,
566 566 linerange, lineidprefix):
567 567 if files:
568 568 m = match.exact(repo.root, repo.getcwd(), files)
569 569 else:
570 570 m = match.always(repo.root, repo.getcwd())
571 571
572 572 diffopts = patch.diffopts(repo.ui, untrusted=True)
573 node1 = basectx.node()
574 node2 = ctx.node()
575 573 parity = paritygen(stripecount)
576 574
577 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
575 diffhunks = patch.diffhunks(repo, basectx, ctx, m, opts=diffopts)
578 576 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
579 577 if style != 'raw':
580 578 header = header[1:]
581 579 lines = [h + '\n' for h in header]
582 580 for hunkrange, hunklines in hunks:
583 581 if linerange is not None and hunkrange is not None:
584 582 s1, l1, s2, l2 = hunkrange
585 583 if not mdiff.hunkinrange((s2, l2), linerange):
586 584 continue
587 585 lines.extend(hunklines)
588 586 if lines:
589 587 l = templateutil.mappedgenerator(_prettyprintdifflines,
590 588 args=(lines, blockno,
591 589 lineidprefix))
592 590 yield {
593 591 'parity': next(parity),
594 592 'blockno': blockno,
595 593 'lines': l,
596 594 }
597 595
598 596 def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''):
599 597 args = (web.repo, ctx, basectx, files, style, web.stripecount,
600 598 linerange, lineidprefix)
601 599 return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock')
602 600
603 601 def _compline(type, leftlineno, leftline, rightlineno, rightline):
604 602 lineid = leftlineno and ("l%d" % leftlineno) or ''
605 603 lineid += rightlineno and ("r%d" % rightlineno) or ''
606 604 llno = '%d' % leftlineno if leftlineno else ''
607 605 rlno = '%d' % rightlineno if rightlineno else ''
608 606 return {
609 607 'type': type,
610 608 'lineid': lineid,
611 609 'leftlineno': leftlineno,
612 610 'leftlinenumber': "% 6s" % llno,
613 611 'leftline': leftline or '',
614 612 'rightlineno': rightlineno,
615 613 'rightlinenumber': "% 6s" % rlno,
616 614 'rightline': rightline or '',
617 615 }
618 616
619 617 def _getcompblockgen(context, leftlines, rightlines, opcodes):
620 618 for type, llo, lhi, rlo, rhi in opcodes:
621 619 type = pycompat.sysbytes(type)
622 620 len1 = lhi - llo
623 621 len2 = rhi - rlo
624 622 count = min(len1, len2)
625 623 for i in pycompat.xrange(count):
626 624 yield _compline(type=type,
627 625 leftlineno=llo + i + 1,
628 626 leftline=leftlines[llo + i],
629 627 rightlineno=rlo + i + 1,
630 628 rightline=rightlines[rlo + i])
631 629 if len1 > len2:
632 630 for i in pycompat.xrange(llo + count, lhi):
633 631 yield _compline(type=type,
634 632 leftlineno=i + 1,
635 633 leftline=leftlines[i],
636 634 rightlineno=None,
637 635 rightline=None)
638 636 elif len2 > len1:
639 637 for i in pycompat.xrange(rlo + count, rhi):
640 638 yield _compline(type=type,
641 639 leftlineno=None,
642 640 leftline=None,
643 641 rightlineno=i + 1,
644 642 rightline=rightlines[i])
645 643
646 644 def _getcompblock(leftlines, rightlines, opcodes):
647 645 args = (leftlines, rightlines, opcodes)
648 646 return templateutil.mappinggenerator(_getcompblockgen, args=args,
649 647 name='comparisonline')
650 648
651 649 def _comparegen(context, contextnum, leftlines, rightlines):
652 650 '''Generator function that provides side-by-side comparison data.'''
653 651 s = difflib.SequenceMatcher(None, leftlines, rightlines)
654 652 if contextnum < 0:
655 653 l = _getcompblock(leftlines, rightlines, s.get_opcodes())
656 654 yield {'lines': l}
657 655 else:
658 656 for oc in s.get_grouped_opcodes(n=contextnum):
659 657 l = _getcompblock(leftlines, rightlines, oc)
660 658 yield {'lines': l}
661 659
662 660 def compare(contextnum, leftlines, rightlines):
663 661 args = (contextnum, leftlines, rightlines)
664 662 return templateutil.mappinggenerator(_comparegen, args=args,
665 663 name='comparisonblock')
666 664
667 665 def diffstatgen(ui, ctx, basectx):
668 666 '''Generator function that provides the diffstat data.'''
669 667
670 668 diffopts = patch.diffopts(ui, {'noprefix': False})
671 669 stats = patch.diffstatdata(
672 670 util.iterlines(ctx.diff(basectx, opts=diffopts)))
673 671 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
674 672 while True:
675 673 yield stats, maxname, maxtotal, addtotal, removetotal, binary
676 674
677 675 def diffsummary(statgen):
678 676 '''Return a short summary of the diff.'''
679 677
680 678 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
681 679 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
682 680 len(stats), addtotal, removetotal)
683 681
684 682 def _diffstattmplgen(context, ctx, statgen, parity):
685 683 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
686 684 files = ctx.files()
687 685
688 686 def pct(i):
689 687 if maxtotal == 0:
690 688 return 0
691 689 return (float(i) / maxtotal) * 100
692 690
693 691 fileno = 0
694 692 for filename, adds, removes, isbinary in stats:
695 693 template = 'diffstatlink' if filename in files else 'diffstatnolink'
696 694 total = adds + removes
697 695 fileno += 1
698 696 yield context.process(template, {
699 697 'node': ctx.hex(),
700 698 'file': filename,
701 699 'fileno': fileno,
702 700 'total': total,
703 701 'addpct': pct(adds),
704 702 'removepct': pct(removes),
705 703 'parity': next(parity),
706 704 })
707 705
708 706 def diffstat(ctx, statgen, parity):
709 707 '''Return a diffstat template for each file in the diff.'''
710 708 args = (ctx, statgen, parity)
711 709 return templateutil.mappedgenerator(_diffstattmplgen, args=args)
712 710
713 711 class sessionvars(templateutil.wrapped):
714 712 def __init__(self, vars, start='?'):
715 713 self._start = start
716 714 self._vars = vars
717 715
718 716 def __getitem__(self, key):
719 717 return self._vars[key]
720 718
721 719 def __setitem__(self, key, value):
722 720 self._vars[key] = value
723 721
724 722 def __copy__(self):
725 723 return sessionvars(copy.copy(self._vars), self._start)
726 724
727 725 def contains(self, context, mapping, item):
728 726 item = templateutil.unwrapvalue(context, mapping, item)
729 727 return item in self._vars
730 728
731 729 def getmember(self, context, mapping, key):
732 730 key = templateutil.unwrapvalue(context, mapping, key)
733 731 return self._vars.get(key)
734 732
735 733 def getmin(self, context, mapping):
736 734 raise error.ParseError(_('not comparable'))
737 735
738 736 def getmax(self, context, mapping):
739 737 raise error.ParseError(_('not comparable'))
740 738
741 739 def filter(self, context, mapping, select):
742 740 # implement if necessary
743 741 raise error.ParseError(_('not filterable'))
744 742
745 743 def itermaps(self, context):
746 744 separator = self._start
747 745 for key, value in sorted(self._vars.iteritems()):
748 746 yield {'name': key,
749 747 'value': pycompat.bytestr(value),
750 748 'separator': separator,
751 749 }
752 750 separator = '&'
753 751
754 752 def join(self, context, mapping, sep):
755 753 # could be '{separator}{name}={value|urlescape}'
756 754 raise error.ParseError(_('not displayable without template'))
757 755
758 756 def show(self, context, mapping):
759 757 return self.join(context, '')
760 758
761 759 def tobool(self, context, mapping):
762 760 return bool(self._vars)
763 761
764 762 def tovalue(self, context, mapping):
765 763 return self._vars
766 764
767 765 class wsgiui(uimod.ui):
768 766 # default termwidth breaks under mod_wsgi
769 767 def termwidth(self):
770 768 return 80
771 769
772 770 def getwebsubs(repo):
773 771 websubtable = []
774 772 websubdefs = repo.ui.configitems('websub')
775 773 # we must maintain interhg backwards compatibility
776 774 websubdefs += repo.ui.configitems('interhg')
777 775 for key, pattern in websubdefs:
778 776 # grab the delimiter from the character after the "s"
779 777 unesc = pattern[1:2]
780 778 delim = stringutil.reescape(unesc)
781 779
782 780 # identify portions of the pattern, taking care to avoid escaped
783 781 # delimiters. the replace format and flags are optional, but
784 782 # delimiters are required.
785 783 match = re.match(
786 784 br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
787 785 % (delim, delim, delim), pattern)
788 786 if not match:
789 787 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
790 788 % (key, pattern))
791 789 continue
792 790
793 791 # we need to unescape the delimiter for regexp and format
794 792 delim_re = re.compile(br'(?<!\\)\\%s' % delim)
795 793 regexp = delim_re.sub(unesc, match.group(1))
796 794 format = delim_re.sub(unesc, match.group(2))
797 795
798 796 # the pattern allows for 6 regexp flags, so set them if necessary
799 797 flagin = match.group(3)
800 798 flags = 0
801 799 if flagin:
802 800 for flag in flagin.upper():
803 801 flags |= re.__dict__[flag]
804 802
805 803 try:
806 804 regexp = re.compile(regexp, flags)
807 805 websubtable.append((regexp, format))
808 806 except re.error:
809 807 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
810 808 % (key, regexp))
811 809 return websubtable
812 810
813 811 def getgraphnode(repo, ctx):
814 812 return (templatekw.getgraphnodecurrent(repo, ctx) +
815 813 templatekw.getgraphnodesymbol(ctx))
@@ -1,2862 +1,2862 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import contextlib
13 13 import copy
14 14 import email
15 15 import errno
16 16 import hashlib
17 17 import os
18 18 import posixpath
19 19 import re
20 20 import shutil
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 copies,
30 30 diffhelper,
31 31 diffutil,
32 32 encoding,
33 33 error,
34 34 mail,
35 35 match as matchmod,
36 36 mdiff,
37 37 pathutil,
38 38 pycompat,
39 39 scmutil,
40 40 similar,
41 41 util,
42 42 vfs as vfsmod,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 stringio = util.stringio
51 51
52 52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
53 53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
54 54 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
55 55 b'[^ \ta-zA-Z0-9_\x80-\xff])')
56 56
57 57 PatchError = error.PatchError
58 58
59 59 # public functions
60 60
61 61 def split(stream):
62 62 '''return an iterator of individual patches from a stream'''
63 63 def isheader(line, inheader):
64 64 if inheader and line.startswith((' ', '\t')):
65 65 # continuation
66 66 return True
67 67 if line.startswith((' ', '-', '+')):
68 68 # diff line - don't check for header pattern in there
69 69 return False
70 70 l = line.split(': ', 1)
71 71 return len(l) == 2 and ' ' not in l[0]
72 72
73 73 def chunk(lines):
74 74 return stringio(''.join(lines))
75 75
76 76 def hgsplit(stream, cur):
77 77 inheader = True
78 78
79 79 for line in stream:
80 80 if not line.strip():
81 81 inheader = False
82 82 if not inheader and line.startswith('# HG changeset patch'):
83 83 yield chunk(cur)
84 84 cur = []
85 85 inheader = True
86 86
87 87 cur.append(line)
88 88
89 89 if cur:
90 90 yield chunk(cur)
91 91
92 92 def mboxsplit(stream, cur):
93 93 for line in stream:
94 94 if line.startswith('From '):
95 95 for c in split(chunk(cur[1:])):
96 96 yield c
97 97 cur = []
98 98
99 99 cur.append(line)
100 100
101 101 if cur:
102 102 for c in split(chunk(cur[1:])):
103 103 yield c
104 104
105 105 def mimesplit(stream, cur):
106 106 def msgfp(m):
107 107 fp = stringio()
108 108 g = email.Generator.Generator(fp, mangle_from_=False)
109 109 g.flatten(m)
110 110 fp.seek(0)
111 111 return fp
112 112
113 113 for line in stream:
114 114 cur.append(line)
115 115 c = chunk(cur)
116 116
117 117 m = mail.parse(c)
118 118 if not m.is_multipart():
119 119 yield msgfp(m)
120 120 else:
121 121 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
122 122 for part in m.walk():
123 123 ct = part.get_content_type()
124 124 if ct not in ok_types:
125 125 continue
126 126 yield msgfp(part)
127 127
128 128 def headersplit(stream, cur):
129 129 inheader = False
130 130
131 131 for line in stream:
132 132 if not inheader and isheader(line, inheader):
133 133 yield chunk(cur)
134 134 cur = []
135 135 inheader = True
136 136 if inheader and not isheader(line, inheader):
137 137 inheader = False
138 138
139 139 cur.append(line)
140 140
141 141 if cur:
142 142 yield chunk(cur)
143 143
144 144 def remainder(cur):
145 145 yield chunk(cur)
146 146
147 147 class fiter(object):
148 148 def __init__(self, fp):
149 149 self.fp = fp
150 150
151 151 def __iter__(self):
152 152 return self
153 153
154 154 def next(self):
155 155 l = self.fp.readline()
156 156 if not l:
157 157 raise StopIteration
158 158 return l
159 159
160 160 __next__ = next
161 161
162 162 inheader = False
163 163 cur = []
164 164
165 165 mimeheaders = ['content-type']
166 166
167 167 if not util.safehasattr(stream, 'next'):
168 168 # http responses, for example, have readline but not next
169 169 stream = fiter(stream)
170 170
171 171 for line in stream:
172 172 cur.append(line)
173 173 if line.startswith('# HG changeset patch'):
174 174 return hgsplit(stream, cur)
175 175 elif line.startswith('From '):
176 176 return mboxsplit(stream, cur)
177 177 elif isheader(line, inheader):
178 178 inheader = True
179 179 if line.split(':', 1)[0].lower() in mimeheaders:
180 180 # let email parser handle this
181 181 return mimesplit(stream, cur)
182 182 elif line.startswith('--- ') and inheader:
183 183 # No evil headers seen by diff start, split by hand
184 184 return headersplit(stream, cur)
185 185 # Not enough info, keep reading
186 186
187 187 # if we are here, we have a very plain patch
188 188 return remainder(cur)
189 189
190 190 ## Some facility for extensible patch parsing:
191 191 # list of pairs ("header to match", "data key")
192 192 patchheadermap = [('Date', 'date'),
193 193 ('Branch', 'branch'),
194 194 ('Node ID', 'nodeid'),
195 195 ]
196 196
197 197 @contextlib.contextmanager
198 198 def extract(ui, fileobj):
199 199 '''extract patch from data read from fileobj.
200 200
201 201 patch can be a normal patch or contained in an email message.
202 202
203 203 return a dictionary. Standard keys are:
204 204 - filename,
205 205 - message,
206 206 - user,
207 207 - date,
208 208 - branch,
209 209 - node,
210 210 - p1,
211 211 - p2.
212 212 Any item can be missing from the dictionary. If filename is missing,
213 213 fileobj did not contain a patch. Caller must unlink filename when done.'''
214 214
215 215 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
216 216 tmpfp = os.fdopen(fd, r'wb')
217 217 try:
218 218 yield _extract(ui, fileobj, tmpname, tmpfp)
219 219 finally:
220 220 tmpfp.close()
221 221 os.unlink(tmpname)
222 222
223 223 def _extract(ui, fileobj, tmpname, tmpfp):
224 224
225 225 # attempt to detect the start of a patch
226 226 # (this heuristic is borrowed from quilt)
227 227 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
228 228 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
229 229 br'---[ \t].*?^\+\+\+[ \t]|'
230 230 br'\*\*\*[ \t].*?^---[ \t])',
231 231 re.MULTILINE | re.DOTALL)
232 232
233 233 data = {}
234 234
235 235 msg = mail.parse(fileobj)
236 236
237 237 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
238 238 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
239 239 if not subject and not data['user']:
240 240 # Not an email, restore parsed headers if any
241 241 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
242 242 for h in msg.items()) + '\n'
243 243
244 244 # should try to parse msg['Date']
245 245 parents = []
246 246
247 247 if subject:
248 248 if subject.startswith('[PATCH'):
249 249 pend = subject.find(']')
250 250 if pend >= 0:
251 251 subject = subject[pend + 1:].lstrip()
252 252 subject = re.sub(br'\n[ \t]+', ' ', subject)
253 253 ui.debug('Subject: %s\n' % subject)
254 254 if data['user']:
255 255 ui.debug('From: %s\n' % data['user'])
256 256 diffs_seen = 0
257 257 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
258 258 message = ''
259 259 for part in msg.walk():
260 260 content_type = pycompat.bytestr(part.get_content_type())
261 261 ui.debug('Content-Type: %s\n' % content_type)
262 262 if content_type not in ok_types:
263 263 continue
264 264 payload = part.get_payload(decode=True)
265 265 m = diffre.search(payload)
266 266 if m:
267 267 hgpatch = False
268 268 hgpatchheader = False
269 269 ignoretext = False
270 270
271 271 ui.debug('found patch at byte %d\n' % m.start(0))
272 272 diffs_seen += 1
273 273 cfp = stringio()
274 274 for line in payload[:m.start(0)].splitlines():
275 275 if line.startswith('# HG changeset patch') and not hgpatch:
276 276 ui.debug('patch generated by hg export\n')
277 277 hgpatch = True
278 278 hgpatchheader = True
279 279 # drop earlier commit message content
280 280 cfp.seek(0)
281 281 cfp.truncate()
282 282 subject = None
283 283 elif hgpatchheader:
284 284 if line.startswith('# User '):
285 285 data['user'] = line[7:]
286 286 ui.debug('From: %s\n' % data['user'])
287 287 elif line.startswith("# Parent "):
288 288 parents.append(line[9:].lstrip())
289 289 elif line.startswith("# "):
290 290 for header, key in patchheadermap:
291 291 prefix = '# %s ' % header
292 292 if line.startswith(prefix):
293 293 data[key] = line[len(prefix):]
294 294 else:
295 295 hgpatchheader = False
296 296 elif line == '---':
297 297 ignoretext = True
298 298 if not hgpatchheader and not ignoretext:
299 299 cfp.write(line)
300 300 cfp.write('\n')
301 301 message = cfp.getvalue()
302 302 if tmpfp:
303 303 tmpfp.write(payload)
304 304 if not payload.endswith('\n'):
305 305 tmpfp.write('\n')
306 306 elif not diffs_seen and message and content_type == 'text/plain':
307 307 message += '\n' + payload
308 308
309 309 if subject and not message.startswith(subject):
310 310 message = '%s\n%s' % (subject, message)
311 311 data['message'] = message
312 312 tmpfp.close()
313 313 if parents:
314 314 data['p1'] = parents.pop(0)
315 315 if parents:
316 316 data['p2'] = parents.pop(0)
317 317
318 318 if diffs_seen:
319 319 data['filename'] = tmpname
320 320
321 321 return data
322 322
323 323 class patchmeta(object):
324 324 """Patched file metadata
325 325
326 326 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
327 327 or COPY. 'path' is patched file path. 'oldpath' is set to the
328 328 origin file when 'op' is either COPY or RENAME, None otherwise. If
329 329 file mode is changed, 'mode' is a tuple (islink, isexec) where
330 330 'islink' is True if the file is a symlink and 'isexec' is True if
331 331 the file is executable. Otherwise, 'mode' is None.
332 332 """
333 333 def __init__(self, path):
334 334 self.path = path
335 335 self.oldpath = None
336 336 self.mode = None
337 337 self.op = 'MODIFY'
338 338 self.binary = False
339 339
340 340 def setmode(self, mode):
341 341 islink = mode & 0o20000
342 342 isexec = mode & 0o100
343 343 self.mode = (islink, isexec)
344 344
345 345 def copy(self):
346 346 other = patchmeta(self.path)
347 347 other.oldpath = self.oldpath
348 348 other.mode = self.mode
349 349 other.op = self.op
350 350 other.binary = self.binary
351 351 return other
352 352
353 353 def _ispatchinga(self, afile):
354 354 if afile == '/dev/null':
355 355 return self.op == 'ADD'
356 356 return afile == 'a/' + (self.oldpath or self.path)
357 357
358 358 def _ispatchingb(self, bfile):
359 359 if bfile == '/dev/null':
360 360 return self.op == 'DELETE'
361 361 return bfile == 'b/' + self.path
362 362
363 363 def ispatching(self, afile, bfile):
364 364 return self._ispatchinga(afile) and self._ispatchingb(bfile)
365 365
366 366 def __repr__(self):
367 367 return "<patchmeta %s %r>" % (self.op, self.path)
368 368
369 369 def readgitpatch(lr):
370 370 """extract git-style metadata about patches from <patchname>"""
371 371
372 372 # Filter patch for git information
373 373 gp = None
374 374 gitpatches = []
375 375 for line in lr:
376 376 line = line.rstrip(' \r\n')
377 377 if line.startswith('diff --git a/'):
378 378 m = gitre.match(line)
379 379 if m:
380 380 if gp:
381 381 gitpatches.append(gp)
382 382 dst = m.group(2)
383 383 gp = patchmeta(dst)
384 384 elif gp:
385 385 if line.startswith('--- '):
386 386 gitpatches.append(gp)
387 387 gp = None
388 388 continue
389 389 if line.startswith('rename from '):
390 390 gp.op = 'RENAME'
391 391 gp.oldpath = line[12:]
392 392 elif line.startswith('rename to '):
393 393 gp.path = line[10:]
394 394 elif line.startswith('copy from '):
395 395 gp.op = 'COPY'
396 396 gp.oldpath = line[10:]
397 397 elif line.startswith('copy to '):
398 398 gp.path = line[8:]
399 399 elif line.startswith('deleted file'):
400 400 gp.op = 'DELETE'
401 401 elif line.startswith('new file mode '):
402 402 gp.op = 'ADD'
403 403 gp.setmode(int(line[-6:], 8))
404 404 elif line.startswith('new mode '):
405 405 gp.setmode(int(line[-6:], 8))
406 406 elif line.startswith('GIT binary patch'):
407 407 gp.binary = True
408 408 if gp:
409 409 gitpatches.append(gp)
410 410
411 411 return gitpatches
412 412
413 413 class linereader(object):
414 414 # simple class to allow pushing lines back into the input stream
415 415 def __init__(self, fp):
416 416 self.fp = fp
417 417 self.buf = []
418 418
419 419 def push(self, line):
420 420 if line is not None:
421 421 self.buf.append(line)
422 422
423 423 def readline(self):
424 424 if self.buf:
425 425 l = self.buf[0]
426 426 del self.buf[0]
427 427 return l
428 428 return self.fp.readline()
429 429
430 430 def __iter__(self):
431 431 return iter(self.readline, '')
432 432
433 433 class abstractbackend(object):
434 434 def __init__(self, ui):
435 435 self.ui = ui
436 436
437 437 def getfile(self, fname):
438 438 """Return target file data and flags as a (data, (islink,
439 439 isexec)) tuple. Data is None if file is missing/deleted.
440 440 """
441 441 raise NotImplementedError
442 442
443 443 def setfile(self, fname, data, mode, copysource):
444 444 """Write data to target file fname and set its mode. mode is a
445 445 (islink, isexec) tuple. If data is None, the file content should
446 446 be left unchanged. If the file is modified after being copied,
447 447 copysource is set to the original file name.
448 448 """
449 449 raise NotImplementedError
450 450
451 451 def unlink(self, fname):
452 452 """Unlink target file."""
453 453 raise NotImplementedError
454 454
455 455 def writerej(self, fname, failed, total, lines):
456 456 """Write rejected lines for fname. total is the number of hunks
457 457 which failed to apply and total the total number of hunks for this
458 458 files.
459 459 """
460 460
461 461 def exists(self, fname):
462 462 raise NotImplementedError
463 463
464 464 def close(self):
465 465 raise NotImplementedError
466 466
467 467 class fsbackend(abstractbackend):
468 468 def __init__(self, ui, basedir):
469 469 super(fsbackend, self).__init__(ui)
470 470 self.opener = vfsmod.vfs(basedir)
471 471
472 472 def getfile(self, fname):
473 473 if self.opener.islink(fname):
474 474 return (self.opener.readlink(fname), (True, False))
475 475
476 476 isexec = False
477 477 try:
478 478 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
479 479 except OSError as e:
480 480 if e.errno != errno.ENOENT:
481 481 raise
482 482 try:
483 483 return (self.opener.read(fname), (False, isexec))
484 484 except IOError as e:
485 485 if e.errno != errno.ENOENT:
486 486 raise
487 487 return None, None
488 488
489 489 def setfile(self, fname, data, mode, copysource):
490 490 islink, isexec = mode
491 491 if data is None:
492 492 self.opener.setflags(fname, islink, isexec)
493 493 return
494 494 if islink:
495 495 self.opener.symlink(data, fname)
496 496 else:
497 497 self.opener.write(fname, data)
498 498 if isexec:
499 499 self.opener.setflags(fname, False, True)
500 500
501 501 def unlink(self, fname):
502 502 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
503 503 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
504 504
505 505 def writerej(self, fname, failed, total, lines):
506 506 fname = fname + ".rej"
507 507 self.ui.warn(
508 508 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
509 509 (failed, total, fname))
510 510 fp = self.opener(fname, 'w')
511 511 fp.writelines(lines)
512 512 fp.close()
513 513
514 514 def exists(self, fname):
515 515 return self.opener.lexists(fname)
516 516
517 517 class workingbackend(fsbackend):
518 518 def __init__(self, ui, repo, similarity):
519 519 super(workingbackend, self).__init__(ui, repo.root)
520 520 self.repo = repo
521 521 self.similarity = similarity
522 522 self.removed = set()
523 523 self.changed = set()
524 524 self.copied = []
525 525
526 526 def _checkknown(self, fname):
527 527 if self.repo.dirstate[fname] == '?' and self.exists(fname):
528 528 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
529 529
530 530 def setfile(self, fname, data, mode, copysource):
531 531 self._checkknown(fname)
532 532 super(workingbackend, self).setfile(fname, data, mode, copysource)
533 533 if copysource is not None:
534 534 self.copied.append((copysource, fname))
535 535 self.changed.add(fname)
536 536
537 537 def unlink(self, fname):
538 538 self._checkknown(fname)
539 539 super(workingbackend, self).unlink(fname)
540 540 self.removed.add(fname)
541 541 self.changed.add(fname)
542 542
543 543 def close(self):
544 544 wctx = self.repo[None]
545 545 changed = set(self.changed)
546 546 for src, dst in self.copied:
547 547 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
548 548 if self.removed:
549 549 wctx.forget(sorted(self.removed))
550 550 for f in self.removed:
551 551 if f not in self.repo.dirstate:
552 552 # File was deleted and no longer belongs to the
553 553 # dirstate, it was probably marked added then
554 554 # deleted, and should not be considered by
555 555 # marktouched().
556 556 changed.discard(f)
557 557 if changed:
558 558 scmutil.marktouched(self.repo, changed, self.similarity)
559 559 return sorted(self.changed)
560 560
561 561 class filestore(object):
562 562 def __init__(self, maxsize=None):
563 563 self.opener = None
564 564 self.files = {}
565 565 self.created = 0
566 566 self.maxsize = maxsize
567 567 if self.maxsize is None:
568 568 self.maxsize = 4*(2**20)
569 569 self.size = 0
570 570 self.data = {}
571 571
572 572 def setfile(self, fname, data, mode, copied=None):
573 573 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
574 574 self.data[fname] = (data, mode, copied)
575 575 self.size += len(data)
576 576 else:
577 577 if self.opener is None:
578 578 root = pycompat.mkdtemp(prefix='hg-patch-')
579 579 self.opener = vfsmod.vfs(root)
580 580 # Avoid filename issues with these simple names
581 581 fn = '%d' % self.created
582 582 self.opener.write(fn, data)
583 583 self.created += 1
584 584 self.files[fname] = (fn, mode, copied)
585 585
586 586 def getfile(self, fname):
587 587 if fname in self.data:
588 588 return self.data[fname]
589 589 if not self.opener or fname not in self.files:
590 590 return None, None, None
591 591 fn, mode, copied = self.files[fname]
592 592 return self.opener.read(fn), mode, copied
593 593
594 594 def close(self):
595 595 if self.opener:
596 596 shutil.rmtree(self.opener.base)
597 597
598 598 class repobackend(abstractbackend):
599 599 def __init__(self, ui, repo, ctx, store):
600 600 super(repobackend, self).__init__(ui)
601 601 self.repo = repo
602 602 self.ctx = ctx
603 603 self.store = store
604 604 self.changed = set()
605 605 self.removed = set()
606 606 self.copied = {}
607 607
608 608 def _checkknown(self, fname):
609 609 if fname not in self.ctx:
610 610 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
611 611
612 612 def getfile(self, fname):
613 613 try:
614 614 fctx = self.ctx[fname]
615 615 except error.LookupError:
616 616 return None, None
617 617 flags = fctx.flags()
618 618 return fctx.data(), ('l' in flags, 'x' in flags)
619 619
620 620 def setfile(self, fname, data, mode, copysource):
621 621 if copysource:
622 622 self._checkknown(copysource)
623 623 if data is None:
624 624 data = self.ctx[fname].data()
625 625 self.store.setfile(fname, data, mode, copysource)
626 626 self.changed.add(fname)
627 627 if copysource:
628 628 self.copied[fname] = copysource
629 629
630 630 def unlink(self, fname):
631 631 self._checkknown(fname)
632 632 self.removed.add(fname)
633 633
634 634 def exists(self, fname):
635 635 return fname in self.ctx
636 636
637 637 def close(self):
638 638 return self.changed | self.removed
639 639
640 640 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
641 641 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
642 642 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
643 643 eolmodes = ['strict', 'crlf', 'lf', 'auto']
644 644
645 645 class patchfile(object):
646 646 def __init__(self, ui, gp, backend, store, eolmode='strict'):
647 647 self.fname = gp.path
648 648 self.eolmode = eolmode
649 649 self.eol = None
650 650 self.backend = backend
651 651 self.ui = ui
652 652 self.lines = []
653 653 self.exists = False
654 654 self.missing = True
655 655 self.mode = gp.mode
656 656 self.copysource = gp.oldpath
657 657 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
658 658 self.remove = gp.op == 'DELETE'
659 659 if self.copysource is None:
660 660 data, mode = backend.getfile(self.fname)
661 661 else:
662 662 data, mode = store.getfile(self.copysource)[:2]
663 663 if data is not None:
664 664 self.exists = self.copysource is None or backend.exists(self.fname)
665 665 self.missing = False
666 666 if data:
667 667 self.lines = mdiff.splitnewlines(data)
668 668 if self.mode is None:
669 669 self.mode = mode
670 670 if self.lines:
671 671 # Normalize line endings
672 672 if self.lines[0].endswith('\r\n'):
673 673 self.eol = '\r\n'
674 674 elif self.lines[0].endswith('\n'):
675 675 self.eol = '\n'
676 676 if eolmode != 'strict':
677 677 nlines = []
678 678 for l in self.lines:
679 679 if l.endswith('\r\n'):
680 680 l = l[:-2] + '\n'
681 681 nlines.append(l)
682 682 self.lines = nlines
683 683 else:
684 684 if self.create:
685 685 self.missing = False
686 686 if self.mode is None:
687 687 self.mode = (False, False)
688 688 if self.missing:
689 689 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
690 690 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
691 691 "current directory)\n"))
692 692
693 693 self.hash = {}
694 694 self.dirty = 0
695 695 self.offset = 0
696 696 self.skew = 0
697 697 self.rej = []
698 698 self.fileprinted = False
699 699 self.printfile(False)
700 700 self.hunks = 0
701 701
702 702 def writelines(self, fname, lines, mode):
703 703 if self.eolmode == 'auto':
704 704 eol = self.eol
705 705 elif self.eolmode == 'crlf':
706 706 eol = '\r\n'
707 707 else:
708 708 eol = '\n'
709 709
710 710 if self.eolmode != 'strict' and eol and eol != '\n':
711 711 rawlines = []
712 712 for l in lines:
713 713 if l and l.endswith('\n'):
714 714 l = l[:-1] + eol
715 715 rawlines.append(l)
716 716 lines = rawlines
717 717
718 718 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
719 719
720 720 def printfile(self, warn):
721 721 if self.fileprinted:
722 722 return
723 723 if warn or self.ui.verbose:
724 724 self.fileprinted = True
725 725 s = _("patching file %s\n") % self.fname
726 726 if warn:
727 727 self.ui.warn(s)
728 728 else:
729 729 self.ui.note(s)
730 730
731 731
732 732 def findlines(self, l, linenum):
733 733 # looks through the hash and finds candidate lines. The
734 734 # result is a list of line numbers sorted based on distance
735 735 # from linenum
736 736
737 737 cand = self.hash.get(l, [])
738 738 if len(cand) > 1:
739 739 # resort our list of potentials forward then back.
740 740 cand.sort(key=lambda x: abs(x - linenum))
741 741 return cand
742 742
743 743 def write_rej(self):
744 744 # our rejects are a little different from patch(1). This always
745 745 # creates rejects in the same form as the original patch. A file
746 746 # header is inserted so that you can run the reject through patch again
747 747 # without having to type the filename.
748 748 if not self.rej:
749 749 return
750 750 base = os.path.basename(self.fname)
751 751 lines = ["--- %s\n+++ %s\n" % (base, base)]
752 752 for x in self.rej:
753 753 for l in x.hunk:
754 754 lines.append(l)
755 755 if l[-1:] != '\n':
756 756 lines.append("\n\\ No newline at end of file\n")
757 757 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
758 758
759 759 def apply(self, h):
760 760 if not h.complete():
761 761 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
762 762 (h.number, h.desc, len(h.a), h.lena, len(h.b),
763 763 h.lenb))
764 764
765 765 self.hunks += 1
766 766
767 767 if self.missing:
768 768 self.rej.append(h)
769 769 return -1
770 770
771 771 if self.exists and self.create:
772 772 if self.copysource:
773 773 self.ui.warn(_("cannot create %s: destination already "
774 774 "exists\n") % self.fname)
775 775 else:
776 776 self.ui.warn(_("file %s already exists\n") % self.fname)
777 777 self.rej.append(h)
778 778 return -1
779 779
780 780 if isinstance(h, binhunk):
781 781 if self.remove:
782 782 self.backend.unlink(self.fname)
783 783 else:
784 784 l = h.new(self.lines)
785 785 self.lines[:] = l
786 786 self.offset += len(l)
787 787 self.dirty = True
788 788 return 0
789 789
790 790 horig = h
791 791 if (self.eolmode in ('crlf', 'lf')
792 792 or self.eolmode == 'auto' and self.eol):
793 793 # If new eols are going to be normalized, then normalize
794 794 # hunk data before patching. Otherwise, preserve input
795 795 # line-endings.
796 796 h = h.getnormalized()
797 797
798 798 # fast case first, no offsets, no fuzz
799 799 old, oldstart, new, newstart = h.fuzzit(0, False)
800 800 oldstart += self.offset
801 801 orig_start = oldstart
802 802 # if there's skew we want to emit the "(offset %d lines)" even
803 803 # when the hunk cleanly applies at start + skew, so skip the
804 804 # fast case code
805 805 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
806 806 if self.remove:
807 807 self.backend.unlink(self.fname)
808 808 else:
809 809 self.lines[oldstart:oldstart + len(old)] = new
810 810 self.offset += len(new) - len(old)
811 811 self.dirty = True
812 812 return 0
813 813
814 814 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
815 815 self.hash = {}
816 816 for x, s in enumerate(self.lines):
817 817 self.hash.setdefault(s, []).append(x)
818 818
819 819 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
820 820 for toponly in [True, False]:
821 821 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
822 822 oldstart = oldstart + self.offset + self.skew
823 823 oldstart = min(oldstart, len(self.lines))
824 824 if old:
825 825 cand = self.findlines(old[0][1:], oldstart)
826 826 else:
827 827 # Only adding lines with no or fuzzed context, just
828 828 # take the skew in account
829 829 cand = [oldstart]
830 830
831 831 for l in cand:
832 832 if not old or diffhelper.testhunk(old, self.lines, l):
833 833 self.lines[l : l + len(old)] = new
834 834 self.offset += len(new) - len(old)
835 835 self.skew = l - orig_start
836 836 self.dirty = True
837 837 offset = l - orig_start - fuzzlen
838 838 if fuzzlen:
839 839 msg = _("Hunk #%d succeeded at %d "
840 840 "with fuzz %d "
841 841 "(offset %d lines).\n")
842 842 self.printfile(True)
843 843 self.ui.warn(msg %
844 844 (h.number, l + 1, fuzzlen, offset))
845 845 else:
846 846 msg = _("Hunk #%d succeeded at %d "
847 847 "(offset %d lines).\n")
848 848 self.ui.note(msg % (h.number, l + 1, offset))
849 849 return fuzzlen
850 850 self.printfile(True)
851 851 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
852 852 self.rej.append(horig)
853 853 return -1
854 854
855 855 def close(self):
856 856 if self.dirty:
857 857 self.writelines(self.fname, self.lines, self.mode)
858 858 self.write_rej()
859 859 return len(self.rej)
860 860
861 861 class header(object):
862 862 """patch header
863 863 """
864 864 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
865 865 diff_re = re.compile('diff -r .* (.*)$')
866 866 allhunks_re = re.compile('(?:index|deleted file) ')
867 867 pretty_re = re.compile('(?:new file|deleted file) ')
868 868 special_re = re.compile('(?:index|deleted|copy|rename) ')
869 869 newfile_re = re.compile('(?:new file)')
870 870
871 871 def __init__(self, header):
872 872 self.header = header
873 873 self.hunks = []
874 874
875 875 def binary(self):
876 876 return any(h.startswith('index ') for h in self.header)
877 877
878 878 def pretty(self, fp):
879 879 for h in self.header:
880 880 if h.startswith('index '):
881 881 fp.write(_('this modifies a binary file (all or nothing)\n'))
882 882 break
883 883 if self.pretty_re.match(h):
884 884 fp.write(h)
885 885 if self.binary():
886 886 fp.write(_('this is a binary file\n'))
887 887 break
888 888 if h.startswith('---'):
889 889 fp.write(_('%d hunks, %d lines changed\n') %
890 890 (len(self.hunks),
891 891 sum([max(h.added, h.removed) for h in self.hunks])))
892 892 break
893 893 fp.write(h)
894 894
895 895 def write(self, fp):
896 896 fp.write(''.join(self.header))
897 897
898 898 def allhunks(self):
899 899 return any(self.allhunks_re.match(h) for h in self.header)
900 900
901 901 def files(self):
902 902 match = self.diffgit_re.match(self.header[0])
903 903 if match:
904 904 fromfile, tofile = match.groups()
905 905 if fromfile == tofile:
906 906 return [fromfile]
907 907 return [fromfile, tofile]
908 908 else:
909 909 return self.diff_re.match(self.header[0]).groups()
910 910
911 911 def filename(self):
912 912 return self.files()[-1]
913 913
914 914 def __repr__(self):
915 915 return '<header %s>' % (' '.join(map(repr, self.files())))
916 916
917 917 def isnewfile(self):
918 918 return any(self.newfile_re.match(h) for h in self.header)
919 919
920 920 def special(self):
921 921 # Special files are shown only at the header level and not at the hunk
922 922 # level for example a file that has been deleted is a special file.
923 923 # The user cannot change the content of the operation, in the case of
924 924 # the deleted file he has to take the deletion or not take it, he
925 925 # cannot take some of it.
926 926 # Newly added files are special if they are empty, they are not special
927 927 # if they have some content as we want to be able to change it
928 928 nocontent = len(self.header) == 2
929 929 emptynewfile = self.isnewfile() and nocontent
930 930 return emptynewfile or \
931 931 any(self.special_re.match(h) for h in self.header)
932 932
933 933 class recordhunk(object):
934 934 """patch hunk
935 935
936 936 XXX shouldn't we merge this with the other hunk class?
937 937 """
938 938
939 939 def __init__(self, header, fromline, toline, proc, before, hunk, after,
940 940 maxcontext=None):
941 941 def trimcontext(lines, reverse=False):
942 942 if maxcontext is not None:
943 943 delta = len(lines) - maxcontext
944 944 if delta > 0:
945 945 if reverse:
946 946 return delta, lines[delta:]
947 947 else:
948 948 return delta, lines[:maxcontext]
949 949 return 0, lines
950 950
951 951 self.header = header
952 952 trimedbefore, self.before = trimcontext(before, True)
953 953 self.fromline = fromline + trimedbefore
954 954 self.toline = toline + trimedbefore
955 955 _trimedafter, self.after = trimcontext(after, False)
956 956 self.proc = proc
957 957 self.hunk = hunk
958 958 self.added, self.removed = self.countchanges(self.hunk)
959 959
960 960 def __eq__(self, v):
961 961 if not isinstance(v, recordhunk):
962 962 return False
963 963
964 964 return ((v.hunk == self.hunk) and
965 965 (v.proc == self.proc) and
966 966 (self.fromline == v.fromline) and
967 967 (self.header.files() == v.header.files()))
968 968
969 969 def __hash__(self):
970 970 return hash((tuple(self.hunk),
971 971 tuple(self.header.files()),
972 972 self.fromline,
973 973 self.proc))
974 974
975 975 def countchanges(self, hunk):
976 976 """hunk -> (n+,n-)"""
977 977 add = len([h for h in hunk if h.startswith('+')])
978 978 rem = len([h for h in hunk if h.startswith('-')])
979 979 return add, rem
980 980
981 981 def reversehunk(self):
982 982 """return another recordhunk which is the reverse of the hunk
983 983
984 984 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
985 985 that, swap fromline/toline and +/- signs while keep other things
986 986 unchanged.
987 987 """
988 988 m = {'+': '-', '-': '+', '\\': '\\'}
989 989 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
990 990 return recordhunk(self.header, self.toline, self.fromline, self.proc,
991 991 self.before, hunk, self.after)
992 992
993 993 def write(self, fp):
994 994 delta = len(self.before) + len(self.after)
995 995 if self.after and self.after[-1] == '\\ No newline at end of file\n':
996 996 delta -= 1
997 997 fromlen = delta + self.removed
998 998 tolen = delta + self.added
999 999 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
1000 1000 (self.fromline, fromlen, self.toline, tolen,
1001 1001 self.proc and (' ' + self.proc)))
1002 1002 fp.write(''.join(self.before + self.hunk + self.after))
1003 1003
1004 1004 pretty = write
1005 1005
1006 1006 def filename(self):
1007 1007 return self.header.filename()
1008 1008
1009 1009 def __repr__(self):
1010 1010 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1011 1011
1012 1012 def getmessages():
1013 1013 return {
1014 1014 'multiple': {
1015 1015 'apply': _("apply change %d/%d to '%s'?"),
1016 1016 'discard': _("discard change %d/%d to '%s'?"),
1017 1017 'record': _("record change %d/%d to '%s'?"),
1018 1018 },
1019 1019 'single': {
1020 1020 'apply': _("apply this change to '%s'?"),
1021 1021 'discard': _("discard this change to '%s'?"),
1022 1022 'record': _("record this change to '%s'?"),
1023 1023 },
1024 1024 'help': {
1025 1025 'apply': _('[Ynesfdaq?]'
1026 1026 '$$ &Yes, apply this change'
1027 1027 '$$ &No, skip this change'
1028 1028 '$$ &Edit this change manually'
1029 1029 '$$ &Skip remaining changes to this file'
1030 1030 '$$ Apply remaining changes to this &file'
1031 1031 '$$ &Done, skip remaining changes and files'
1032 1032 '$$ Apply &all changes to all remaining files'
1033 1033 '$$ &Quit, applying no changes'
1034 1034 '$$ &? (display help)'),
1035 1035 'discard': _('[Ynesfdaq?]'
1036 1036 '$$ &Yes, discard this change'
1037 1037 '$$ &No, skip this change'
1038 1038 '$$ &Edit this change manually'
1039 1039 '$$ &Skip remaining changes to this file'
1040 1040 '$$ Discard remaining changes to this &file'
1041 1041 '$$ &Done, skip remaining changes and files'
1042 1042 '$$ Discard &all changes to all remaining files'
1043 1043 '$$ &Quit, discarding no changes'
1044 1044 '$$ &? (display help)'),
1045 1045 'record': _('[Ynesfdaq?]'
1046 1046 '$$ &Yes, record this change'
1047 1047 '$$ &No, skip this change'
1048 1048 '$$ &Edit this change manually'
1049 1049 '$$ &Skip remaining changes to this file'
1050 1050 '$$ Record remaining changes to this &file'
1051 1051 '$$ &Done, skip remaining changes and files'
1052 1052 '$$ Record &all changes to all remaining files'
1053 1053 '$$ &Quit, recording no changes'
1054 1054 '$$ &? (display help)'),
1055 1055 }
1056 1056 }
1057 1057
1058 1058 def filterpatch(ui, headers, operation=None):
1059 1059 """Interactively filter patch chunks into applied-only chunks"""
1060 1060 messages = getmessages()
1061 1061
1062 1062 if operation is None:
1063 1063 operation = 'record'
1064 1064
1065 1065 def prompt(skipfile, skipall, query, chunk):
1066 1066 """prompt query, and process base inputs
1067 1067
1068 1068 - y/n for the rest of file
1069 1069 - y/n for the rest
1070 1070 - ? (help)
1071 1071 - q (quit)
1072 1072
1073 1073 Return True/False and possibly updated skipfile and skipall.
1074 1074 """
1075 1075 newpatches = None
1076 1076 if skipall is not None:
1077 1077 return skipall, skipfile, skipall, newpatches
1078 1078 if skipfile is not None:
1079 1079 return skipfile, skipfile, skipall, newpatches
1080 1080 while True:
1081 1081 resps = messages['help'][operation]
1082 1082 r = ui.promptchoice("%s %s" % (query, resps))
1083 1083 ui.write("\n")
1084 1084 if r == 8: # ?
1085 1085 for c, t in ui.extractchoices(resps)[1]:
1086 1086 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1087 1087 continue
1088 1088 elif r == 0: # yes
1089 1089 ret = True
1090 1090 elif r == 1: # no
1091 1091 ret = False
1092 1092 elif r == 2: # Edit patch
1093 1093 if chunk is None:
1094 1094 ui.write(_('cannot edit patch for whole file'))
1095 1095 ui.write("\n")
1096 1096 continue
1097 1097 if chunk.header.binary():
1098 1098 ui.write(_('cannot edit patch for binary file'))
1099 1099 ui.write("\n")
1100 1100 continue
1101 1101 # Patch comment based on the Git one (based on comment at end of
1102 1102 # https://mercurial-scm.org/wiki/RecordExtension)
1103 1103 phelp = '---' + _("""
1104 1104 To remove '-' lines, make them ' ' lines (context).
1105 1105 To remove '+' lines, delete them.
1106 1106 Lines starting with # will be removed from the patch.
1107 1107
1108 1108 If the patch applies cleanly, the edited hunk will immediately be
1109 1109 added to the record list. If it does not apply cleanly, a rejects
1110 1110 file will be generated: you can use that when you try again. If
1111 1111 all lines of the hunk are removed, then the edit is aborted and
1112 1112 the hunk is left unchanged.
1113 1113 """)
1114 1114 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1115 1115 suffix=".diff")
1116 1116 ncpatchfp = None
1117 1117 try:
1118 1118 # Write the initial patch
1119 1119 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1120 1120 chunk.header.write(f)
1121 1121 chunk.write(f)
1122 1122 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1123 1123 f.close()
1124 1124 # Start the editor and wait for it to complete
1125 1125 editor = ui.geteditor()
1126 1126 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1127 1127 environ={'HGUSER': ui.username()},
1128 1128 blockedtag='filterpatch')
1129 1129 if ret != 0:
1130 1130 ui.warn(_("editor exited with exit code %d\n") % ret)
1131 1131 continue
1132 1132 # Remove comment lines
1133 1133 patchfp = open(patchfn, r'rb')
1134 1134 ncpatchfp = stringio()
1135 1135 for line in util.iterfile(patchfp):
1136 1136 line = util.fromnativeeol(line)
1137 1137 if not line.startswith('#'):
1138 1138 ncpatchfp.write(line)
1139 1139 patchfp.close()
1140 1140 ncpatchfp.seek(0)
1141 1141 newpatches = parsepatch(ncpatchfp)
1142 1142 finally:
1143 1143 os.unlink(patchfn)
1144 1144 del ncpatchfp
1145 1145 # Signal that the chunk shouldn't be applied as-is, but
1146 1146 # provide the new patch to be used instead.
1147 1147 ret = False
1148 1148 elif r == 3: # Skip
1149 1149 ret = skipfile = False
1150 1150 elif r == 4: # file (Record remaining)
1151 1151 ret = skipfile = True
1152 1152 elif r == 5: # done, skip remaining
1153 1153 ret = skipall = False
1154 1154 elif r == 6: # all
1155 1155 ret = skipall = True
1156 1156 elif r == 7: # quit
1157 1157 raise error.Abort(_('user quit'))
1158 1158 return ret, skipfile, skipall, newpatches
1159 1159
1160 1160 seen = set()
1161 1161 applied = {} # 'filename' -> [] of chunks
1162 1162 skipfile, skipall = None, None
1163 1163 pos, total = 1, sum(len(h.hunks) for h in headers)
1164 1164 for h in headers:
1165 1165 pos += len(h.hunks)
1166 1166 skipfile = None
1167 1167 fixoffset = 0
1168 1168 hdr = ''.join(h.header)
1169 1169 if hdr in seen:
1170 1170 continue
1171 1171 seen.add(hdr)
1172 1172 if skipall is None:
1173 1173 h.pretty(ui)
1174 1174 msg = (_('examine changes to %s?') %
1175 1175 _(' and ').join("'%s'" % f for f in h.files()))
1176 1176 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1177 1177 if not r:
1178 1178 continue
1179 1179 applied[h.filename()] = [h]
1180 1180 if h.allhunks():
1181 1181 applied[h.filename()] += h.hunks
1182 1182 continue
1183 1183 for i, chunk in enumerate(h.hunks):
1184 1184 if skipfile is None and skipall is None:
1185 1185 chunk.pretty(ui)
1186 1186 if total == 1:
1187 1187 msg = messages['single'][operation] % chunk.filename()
1188 1188 else:
1189 1189 idx = pos - len(h.hunks) + i
1190 1190 msg = messages['multiple'][operation] % (idx, total,
1191 1191 chunk.filename())
1192 1192 r, skipfile, skipall, newpatches = prompt(skipfile,
1193 1193 skipall, msg, chunk)
1194 1194 if r:
1195 1195 if fixoffset:
1196 1196 chunk = copy.copy(chunk)
1197 1197 chunk.toline += fixoffset
1198 1198 applied[chunk.filename()].append(chunk)
1199 1199 elif newpatches is not None:
1200 1200 for newpatch in newpatches:
1201 1201 for newhunk in newpatch.hunks:
1202 1202 if fixoffset:
1203 1203 newhunk.toline += fixoffset
1204 1204 applied[newhunk.filename()].append(newhunk)
1205 1205 else:
1206 1206 fixoffset += chunk.removed - chunk.added
1207 1207 return (sum([h for h in applied.itervalues()
1208 1208 if h[0].special() or len(h) > 1], []), {})
1209 1209 class hunk(object):
1210 1210 def __init__(self, desc, num, lr, context):
1211 1211 self.number = num
1212 1212 self.desc = desc
1213 1213 self.hunk = [desc]
1214 1214 self.a = []
1215 1215 self.b = []
1216 1216 self.starta = self.lena = None
1217 1217 self.startb = self.lenb = None
1218 1218 if lr is not None:
1219 1219 if context:
1220 1220 self.read_context_hunk(lr)
1221 1221 else:
1222 1222 self.read_unified_hunk(lr)
1223 1223
1224 1224 def getnormalized(self):
1225 1225 """Return a copy with line endings normalized to LF."""
1226 1226
1227 1227 def normalize(lines):
1228 1228 nlines = []
1229 1229 for line in lines:
1230 1230 if line.endswith('\r\n'):
1231 1231 line = line[:-2] + '\n'
1232 1232 nlines.append(line)
1233 1233 return nlines
1234 1234
1235 1235 # Dummy object, it is rebuilt manually
1236 1236 nh = hunk(self.desc, self.number, None, None)
1237 1237 nh.number = self.number
1238 1238 nh.desc = self.desc
1239 1239 nh.hunk = self.hunk
1240 1240 nh.a = normalize(self.a)
1241 1241 nh.b = normalize(self.b)
1242 1242 nh.starta = self.starta
1243 1243 nh.startb = self.startb
1244 1244 nh.lena = self.lena
1245 1245 nh.lenb = self.lenb
1246 1246 return nh
1247 1247
1248 1248 def read_unified_hunk(self, lr):
1249 1249 m = unidesc.match(self.desc)
1250 1250 if not m:
1251 1251 raise PatchError(_("bad hunk #%d") % self.number)
1252 1252 self.starta, self.lena, self.startb, self.lenb = m.groups()
1253 1253 if self.lena is None:
1254 1254 self.lena = 1
1255 1255 else:
1256 1256 self.lena = int(self.lena)
1257 1257 if self.lenb is None:
1258 1258 self.lenb = 1
1259 1259 else:
1260 1260 self.lenb = int(self.lenb)
1261 1261 self.starta = int(self.starta)
1262 1262 self.startb = int(self.startb)
1263 1263 try:
1264 1264 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1265 1265 self.a, self.b)
1266 1266 except error.ParseError as e:
1267 1267 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1268 1268 # if we hit eof before finishing out the hunk, the last line will
1269 1269 # be zero length. Lets try to fix it up.
1270 1270 while len(self.hunk[-1]) == 0:
1271 1271 del self.hunk[-1]
1272 1272 del self.a[-1]
1273 1273 del self.b[-1]
1274 1274 self.lena -= 1
1275 1275 self.lenb -= 1
1276 1276 self._fixnewline(lr)
1277 1277
1278 1278 def read_context_hunk(self, lr):
1279 1279 self.desc = lr.readline()
1280 1280 m = contextdesc.match(self.desc)
1281 1281 if not m:
1282 1282 raise PatchError(_("bad hunk #%d") % self.number)
1283 1283 self.starta, aend = m.groups()
1284 1284 self.starta = int(self.starta)
1285 1285 if aend is None:
1286 1286 aend = self.starta
1287 1287 self.lena = int(aend) - self.starta
1288 1288 if self.starta:
1289 1289 self.lena += 1
1290 1290 for x in pycompat.xrange(self.lena):
1291 1291 l = lr.readline()
1292 1292 if l.startswith('---'):
1293 1293 # lines addition, old block is empty
1294 1294 lr.push(l)
1295 1295 break
1296 1296 s = l[2:]
1297 1297 if l.startswith('- ') or l.startswith('! '):
1298 1298 u = '-' + s
1299 1299 elif l.startswith(' '):
1300 1300 u = ' ' + s
1301 1301 else:
1302 1302 raise PatchError(_("bad hunk #%d old text line %d") %
1303 1303 (self.number, x))
1304 1304 self.a.append(u)
1305 1305 self.hunk.append(u)
1306 1306
1307 1307 l = lr.readline()
1308 1308 if l.startswith(br'\ '):
1309 1309 s = self.a[-1][:-1]
1310 1310 self.a[-1] = s
1311 1311 self.hunk[-1] = s
1312 1312 l = lr.readline()
1313 1313 m = contextdesc.match(l)
1314 1314 if not m:
1315 1315 raise PatchError(_("bad hunk #%d") % self.number)
1316 1316 self.startb, bend = m.groups()
1317 1317 self.startb = int(self.startb)
1318 1318 if bend is None:
1319 1319 bend = self.startb
1320 1320 self.lenb = int(bend) - self.startb
1321 1321 if self.startb:
1322 1322 self.lenb += 1
1323 1323 hunki = 1
1324 1324 for x in pycompat.xrange(self.lenb):
1325 1325 l = lr.readline()
1326 1326 if l.startswith(br'\ '):
1327 1327 # XXX: the only way to hit this is with an invalid line range.
1328 1328 # The no-eol marker is not counted in the line range, but I
1329 1329 # guess there are diff(1) out there which behave differently.
1330 1330 s = self.b[-1][:-1]
1331 1331 self.b[-1] = s
1332 1332 self.hunk[hunki - 1] = s
1333 1333 continue
1334 1334 if not l:
1335 1335 # line deletions, new block is empty and we hit EOF
1336 1336 lr.push(l)
1337 1337 break
1338 1338 s = l[2:]
1339 1339 if l.startswith('+ ') or l.startswith('! '):
1340 1340 u = '+' + s
1341 1341 elif l.startswith(' '):
1342 1342 u = ' ' + s
1343 1343 elif len(self.b) == 0:
1344 1344 # line deletions, new block is empty
1345 1345 lr.push(l)
1346 1346 break
1347 1347 else:
1348 1348 raise PatchError(_("bad hunk #%d old text line %d") %
1349 1349 (self.number, x))
1350 1350 self.b.append(s)
1351 1351 while True:
1352 1352 if hunki >= len(self.hunk):
1353 1353 h = ""
1354 1354 else:
1355 1355 h = self.hunk[hunki]
1356 1356 hunki += 1
1357 1357 if h == u:
1358 1358 break
1359 1359 elif h.startswith('-'):
1360 1360 continue
1361 1361 else:
1362 1362 self.hunk.insert(hunki - 1, u)
1363 1363 break
1364 1364
1365 1365 if not self.a:
1366 1366 # this happens when lines were only added to the hunk
1367 1367 for x in self.hunk:
1368 1368 if x.startswith('-') or x.startswith(' '):
1369 1369 self.a.append(x)
1370 1370 if not self.b:
1371 1371 # this happens when lines were only deleted from the hunk
1372 1372 for x in self.hunk:
1373 1373 if x.startswith('+') or x.startswith(' '):
1374 1374 self.b.append(x[1:])
1375 1375 # @@ -start,len +start,len @@
1376 1376 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1377 1377 self.startb, self.lenb)
1378 1378 self.hunk[0] = self.desc
1379 1379 self._fixnewline(lr)
1380 1380
1381 1381 def _fixnewline(self, lr):
1382 1382 l = lr.readline()
1383 1383 if l.startswith(br'\ '):
1384 1384 diffhelper.fixnewline(self.hunk, self.a, self.b)
1385 1385 else:
1386 1386 lr.push(l)
1387 1387
1388 1388 def complete(self):
1389 1389 return len(self.a) == self.lena and len(self.b) == self.lenb
1390 1390
1391 1391 def _fuzzit(self, old, new, fuzz, toponly):
1392 1392 # this removes context lines from the top and bottom of list 'l'. It
1393 1393 # checks the hunk to make sure only context lines are removed, and then
1394 1394 # returns a new shortened list of lines.
1395 1395 fuzz = min(fuzz, len(old))
1396 1396 if fuzz:
1397 1397 top = 0
1398 1398 bot = 0
1399 1399 hlen = len(self.hunk)
1400 1400 for x in pycompat.xrange(hlen - 1):
1401 1401 # the hunk starts with the @@ line, so use x+1
1402 1402 if self.hunk[x + 1].startswith(' '):
1403 1403 top += 1
1404 1404 else:
1405 1405 break
1406 1406 if not toponly:
1407 1407 for x in pycompat.xrange(hlen - 1):
1408 1408 if self.hunk[hlen - bot - 1].startswith(' '):
1409 1409 bot += 1
1410 1410 else:
1411 1411 break
1412 1412
1413 1413 bot = min(fuzz, bot)
1414 1414 top = min(fuzz, top)
1415 1415 return old[top:len(old) - bot], new[top:len(new) - bot], top
1416 1416 return old, new, 0
1417 1417
1418 1418 def fuzzit(self, fuzz, toponly):
1419 1419 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1420 1420 oldstart = self.starta + top
1421 1421 newstart = self.startb + top
1422 1422 # zero length hunk ranges already have their start decremented
1423 1423 if self.lena and oldstart > 0:
1424 1424 oldstart -= 1
1425 1425 if self.lenb and newstart > 0:
1426 1426 newstart -= 1
1427 1427 return old, oldstart, new, newstart
1428 1428
1429 1429 class binhunk(object):
1430 1430 'A binary patch file.'
1431 1431 def __init__(self, lr, fname):
1432 1432 self.text = None
1433 1433 self.delta = False
1434 1434 self.hunk = ['GIT binary patch\n']
1435 1435 self._fname = fname
1436 1436 self._read(lr)
1437 1437
1438 1438 def complete(self):
1439 1439 return self.text is not None
1440 1440
1441 1441 def new(self, lines):
1442 1442 if self.delta:
1443 1443 return [applybindelta(self.text, ''.join(lines))]
1444 1444 return [self.text]
1445 1445
1446 1446 def _read(self, lr):
1447 1447 def getline(lr, hunk):
1448 1448 l = lr.readline()
1449 1449 hunk.append(l)
1450 1450 return l.rstrip('\r\n')
1451 1451
1452 1452 while True:
1453 1453 line = getline(lr, self.hunk)
1454 1454 if not line:
1455 1455 raise PatchError(_('could not extract "%s" binary data')
1456 1456 % self._fname)
1457 1457 if line.startswith('literal '):
1458 1458 size = int(line[8:].rstrip())
1459 1459 break
1460 1460 if line.startswith('delta '):
1461 1461 size = int(line[6:].rstrip())
1462 1462 self.delta = True
1463 1463 break
1464 1464 dec = []
1465 1465 line = getline(lr, self.hunk)
1466 1466 while len(line) > 1:
1467 1467 l = line[0:1]
1468 1468 if l <= 'Z' and l >= 'A':
1469 1469 l = ord(l) - ord('A') + 1
1470 1470 else:
1471 1471 l = ord(l) - ord('a') + 27
1472 1472 try:
1473 1473 dec.append(util.b85decode(line[1:])[:l])
1474 1474 except ValueError as e:
1475 1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1476 1476 % (self._fname, stringutil.forcebytestr(e)))
1477 1477 line = getline(lr, self.hunk)
1478 1478 text = zlib.decompress(''.join(dec))
1479 1479 if len(text) != size:
1480 1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1481 1481 % (self._fname, len(text), size))
1482 1482 self.text = text
1483 1483
1484 1484 def parsefilename(str):
1485 1485 # --- filename \t|space stuff
1486 1486 s = str[4:].rstrip('\r\n')
1487 1487 i = s.find('\t')
1488 1488 if i < 0:
1489 1489 i = s.find(' ')
1490 1490 if i < 0:
1491 1491 return s
1492 1492 return s[:i]
1493 1493
1494 1494 def reversehunks(hunks):
1495 1495 '''reverse the signs in the hunks given as argument
1496 1496
1497 1497 This function operates on hunks coming out of patch.filterpatch, that is
1498 1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1499 1499
1500 1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1501 1501 ... --- a/folder1/g
1502 1502 ... +++ b/folder1/g
1503 1503 ... @@ -1,7 +1,7 @@
1504 1504 ... +firstline
1505 1505 ... c
1506 1506 ... 1
1507 1507 ... 2
1508 1508 ... + 3
1509 1509 ... -4
1510 1510 ... 5
1511 1511 ... d
1512 1512 ... +lastline"""
1513 1513 >>> hunks = parsepatch([rawpatch])
1514 1514 >>> hunkscomingfromfilterpatch = []
1515 1515 >>> for h in hunks:
1516 1516 ... hunkscomingfromfilterpatch.append(h)
1517 1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1518 1518
1519 1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1520 1520 >>> from . import util
1521 1521 >>> fp = util.stringio()
1522 1522 >>> for c in reversedhunks:
1523 1523 ... c.write(fp)
1524 1524 >>> fp.seek(0) or None
1525 1525 >>> reversedpatch = fp.read()
1526 1526 >>> print(pycompat.sysstr(reversedpatch))
1527 1527 diff --git a/folder1/g b/folder1/g
1528 1528 --- a/folder1/g
1529 1529 +++ b/folder1/g
1530 1530 @@ -1,4 +1,3 @@
1531 1531 -firstline
1532 1532 c
1533 1533 1
1534 1534 2
1535 1535 @@ -2,6 +1,6 @@
1536 1536 c
1537 1537 1
1538 1538 2
1539 1539 - 3
1540 1540 +4
1541 1541 5
1542 1542 d
1543 1543 @@ -6,3 +5,2 @@
1544 1544 5
1545 1545 d
1546 1546 -lastline
1547 1547
1548 1548 '''
1549 1549
1550 1550 newhunks = []
1551 1551 for c in hunks:
1552 1552 if util.safehasattr(c, 'reversehunk'):
1553 1553 c = c.reversehunk()
1554 1554 newhunks.append(c)
1555 1555 return newhunks
1556 1556
1557 1557 def parsepatch(originalchunks, maxcontext=None):
1558 1558 """patch -> [] of headers -> [] of hunks
1559 1559
1560 1560 If maxcontext is not None, trim context lines if necessary.
1561 1561
1562 1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1563 1563 ... --- a/folder1/g
1564 1564 ... +++ b/folder1/g
1565 1565 ... @@ -1,8 +1,10 @@
1566 1566 ... 1
1567 1567 ... 2
1568 1568 ... -3
1569 1569 ... 4
1570 1570 ... 5
1571 1571 ... 6
1572 1572 ... +6.1
1573 1573 ... +6.2
1574 1574 ... 7
1575 1575 ... 8
1576 1576 ... +9'''
1577 1577 >>> out = util.stringio()
1578 1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1579 1579 >>> for header in headers:
1580 1580 ... header.write(out)
1581 1581 ... for hunk in header.hunks:
1582 1582 ... hunk.write(out)
1583 1583 >>> print(pycompat.sysstr(out.getvalue()))
1584 1584 diff --git a/folder1/g b/folder1/g
1585 1585 --- a/folder1/g
1586 1586 +++ b/folder1/g
1587 1587 @@ -2,3 +2,2 @@
1588 1588 2
1589 1589 -3
1590 1590 4
1591 1591 @@ -6,2 +5,4 @@
1592 1592 6
1593 1593 +6.1
1594 1594 +6.2
1595 1595 7
1596 1596 @@ -8,1 +9,2 @@
1597 1597 8
1598 1598 +9
1599 1599 """
1600 1600 class parser(object):
1601 1601 """patch parsing state machine"""
1602 1602 def __init__(self):
1603 1603 self.fromline = 0
1604 1604 self.toline = 0
1605 1605 self.proc = ''
1606 1606 self.header = None
1607 1607 self.context = []
1608 1608 self.before = []
1609 1609 self.hunk = []
1610 1610 self.headers = []
1611 1611
1612 1612 def addrange(self, limits):
1613 1613 self.addcontext([])
1614 1614 fromstart, fromend, tostart, toend, proc = limits
1615 1615 self.fromline = int(fromstart)
1616 1616 self.toline = int(tostart)
1617 1617 self.proc = proc
1618 1618
1619 1619 def addcontext(self, context):
1620 1620 if self.hunk:
1621 1621 h = recordhunk(self.header, self.fromline, self.toline,
1622 1622 self.proc, self.before, self.hunk, context, maxcontext)
1623 1623 self.header.hunks.append(h)
1624 1624 self.fromline += len(self.before) + h.removed
1625 1625 self.toline += len(self.before) + h.added
1626 1626 self.before = []
1627 1627 self.hunk = []
1628 1628 self.context = context
1629 1629
1630 1630 def addhunk(self, hunk):
1631 1631 if self.context:
1632 1632 self.before = self.context
1633 1633 self.context = []
1634 1634 if self.hunk:
1635 1635 self.addcontext([])
1636 1636 self.hunk = hunk
1637 1637
1638 1638 def newfile(self, hdr):
1639 1639 self.addcontext([])
1640 1640 h = header(hdr)
1641 1641 self.headers.append(h)
1642 1642 self.header = h
1643 1643
1644 1644 def addother(self, line):
1645 1645 pass # 'other' lines are ignored
1646 1646
1647 1647 def finished(self):
1648 1648 self.addcontext([])
1649 1649 return self.headers
1650 1650
1651 1651 transitions = {
1652 1652 'file': {'context': addcontext,
1653 1653 'file': newfile,
1654 1654 'hunk': addhunk,
1655 1655 'range': addrange},
1656 1656 'context': {'file': newfile,
1657 1657 'hunk': addhunk,
1658 1658 'range': addrange,
1659 1659 'other': addother},
1660 1660 'hunk': {'context': addcontext,
1661 1661 'file': newfile,
1662 1662 'range': addrange},
1663 1663 'range': {'context': addcontext,
1664 1664 'hunk': addhunk},
1665 1665 'other': {'other': addother},
1666 1666 }
1667 1667
1668 1668 p = parser()
1669 1669 fp = stringio()
1670 1670 fp.write(''.join(originalchunks))
1671 1671 fp.seek(0)
1672 1672
1673 1673 state = 'context'
1674 1674 for newstate, data in scanpatch(fp):
1675 1675 try:
1676 1676 p.transitions[state][newstate](p, data)
1677 1677 except KeyError:
1678 1678 raise PatchError('unhandled transition: %s -> %s' %
1679 1679 (state, newstate))
1680 1680 state = newstate
1681 1681 del fp
1682 1682 return p.finished()
1683 1683
1684 1684 def pathtransform(path, strip, prefix):
1685 1685 '''turn a path from a patch into a path suitable for the repository
1686 1686
1687 1687 prefix, if not empty, is expected to be normalized with a / at the end.
1688 1688
1689 1689 Returns (stripped components, path in repository).
1690 1690
1691 1691 >>> pathtransform(b'a/b/c', 0, b'')
1692 1692 ('', 'a/b/c')
1693 1693 >>> pathtransform(b' a/b/c ', 0, b'')
1694 1694 ('', ' a/b/c')
1695 1695 >>> pathtransform(b' a/b/c ', 2, b'')
1696 1696 ('a/b/', 'c')
1697 1697 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1698 1698 ('', 'd/e/a/b/c')
1699 1699 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1700 1700 ('a//b/', 'd/e/c')
1701 1701 >>> pathtransform(b'a/b/c', 3, b'')
1702 1702 Traceback (most recent call last):
1703 1703 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1704 1704 '''
1705 1705 pathlen = len(path)
1706 1706 i = 0
1707 1707 if strip == 0:
1708 1708 return '', prefix + path.rstrip()
1709 1709 count = strip
1710 1710 while count > 0:
1711 1711 i = path.find('/', i)
1712 1712 if i == -1:
1713 1713 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1714 1714 (count, strip, path))
1715 1715 i += 1
1716 1716 # consume '//' in the path
1717 1717 while i < pathlen - 1 and path[i:i + 1] == '/':
1718 1718 i += 1
1719 1719 count -= 1
1720 1720 return path[:i].lstrip(), prefix + path[i:].rstrip()
1721 1721
1722 1722 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1723 1723 nulla = afile_orig == "/dev/null"
1724 1724 nullb = bfile_orig == "/dev/null"
1725 1725 create = nulla and hunk.starta == 0 and hunk.lena == 0
1726 1726 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1727 1727 abase, afile = pathtransform(afile_orig, strip, prefix)
1728 1728 gooda = not nulla and backend.exists(afile)
1729 1729 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1730 1730 if afile == bfile:
1731 1731 goodb = gooda
1732 1732 else:
1733 1733 goodb = not nullb and backend.exists(bfile)
1734 1734 missing = not goodb and not gooda and not create
1735 1735
1736 1736 # some diff programs apparently produce patches where the afile is
1737 1737 # not /dev/null, but afile starts with bfile
1738 1738 abasedir = afile[:afile.rfind('/') + 1]
1739 1739 bbasedir = bfile[:bfile.rfind('/') + 1]
1740 1740 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1741 1741 and hunk.starta == 0 and hunk.lena == 0):
1742 1742 create = True
1743 1743 missing = False
1744 1744
1745 1745 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1746 1746 # diff is between a file and its backup. In this case, the original
1747 1747 # file should be patched (see original mpatch code).
1748 1748 isbackup = (abase == bbase and bfile.startswith(afile))
1749 1749 fname = None
1750 1750 if not missing:
1751 1751 if gooda and goodb:
1752 1752 if isbackup:
1753 1753 fname = afile
1754 1754 else:
1755 1755 fname = bfile
1756 1756 elif gooda:
1757 1757 fname = afile
1758 1758
1759 1759 if not fname:
1760 1760 if not nullb:
1761 1761 if isbackup:
1762 1762 fname = afile
1763 1763 else:
1764 1764 fname = bfile
1765 1765 elif not nulla:
1766 1766 fname = afile
1767 1767 else:
1768 1768 raise PatchError(_("undefined source and destination files"))
1769 1769
1770 1770 gp = patchmeta(fname)
1771 1771 if create:
1772 1772 gp.op = 'ADD'
1773 1773 elif remove:
1774 1774 gp.op = 'DELETE'
1775 1775 return gp
1776 1776
1777 1777 def scanpatch(fp):
1778 1778 """like patch.iterhunks, but yield different events
1779 1779
1780 1780 - ('file', [header_lines + fromfile + tofile])
1781 1781 - ('context', [context_lines])
1782 1782 - ('hunk', [hunk_lines])
1783 1783 - ('range', (-start,len, +start,len, proc))
1784 1784 """
1785 1785 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1786 1786 lr = linereader(fp)
1787 1787
1788 1788 def scanwhile(first, p):
1789 1789 """scan lr while predicate holds"""
1790 1790 lines = [first]
1791 1791 for line in iter(lr.readline, ''):
1792 1792 if p(line):
1793 1793 lines.append(line)
1794 1794 else:
1795 1795 lr.push(line)
1796 1796 break
1797 1797 return lines
1798 1798
1799 1799 for line in iter(lr.readline, ''):
1800 1800 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1801 1801 def notheader(line):
1802 1802 s = line.split(None, 1)
1803 1803 return not s or s[0] not in ('---', 'diff')
1804 1804 header = scanwhile(line, notheader)
1805 1805 fromfile = lr.readline()
1806 1806 if fromfile.startswith('---'):
1807 1807 tofile = lr.readline()
1808 1808 header += [fromfile, tofile]
1809 1809 else:
1810 1810 lr.push(fromfile)
1811 1811 yield 'file', header
1812 1812 elif line.startswith(' '):
1813 1813 cs = (' ', '\\')
1814 1814 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1815 1815 elif line.startswith(('-', '+')):
1816 1816 cs = ('-', '+', '\\')
1817 1817 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1818 1818 else:
1819 1819 m = lines_re.match(line)
1820 1820 if m:
1821 1821 yield 'range', m.groups()
1822 1822 else:
1823 1823 yield 'other', line
1824 1824
1825 1825 def scangitpatch(lr, firstline):
1826 1826 """
1827 1827 Git patches can emit:
1828 1828 - rename a to b
1829 1829 - change b
1830 1830 - copy a to c
1831 1831 - change c
1832 1832
1833 1833 We cannot apply this sequence as-is, the renamed 'a' could not be
1834 1834 found for it would have been renamed already. And we cannot copy
1835 1835 from 'b' instead because 'b' would have been changed already. So
1836 1836 we scan the git patch for copy and rename commands so we can
1837 1837 perform the copies ahead of time.
1838 1838 """
1839 1839 pos = 0
1840 1840 try:
1841 1841 pos = lr.fp.tell()
1842 1842 fp = lr.fp
1843 1843 except IOError:
1844 1844 fp = stringio(lr.fp.read())
1845 1845 gitlr = linereader(fp)
1846 1846 gitlr.push(firstline)
1847 1847 gitpatches = readgitpatch(gitlr)
1848 1848 fp.seek(pos)
1849 1849 return gitpatches
1850 1850
1851 1851 def iterhunks(fp):
1852 1852 """Read a patch and yield the following events:
1853 1853 - ("file", afile, bfile, firsthunk): select a new target file.
1854 1854 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1855 1855 "file" event.
1856 1856 - ("git", gitchanges): current diff is in git format, gitchanges
1857 1857 maps filenames to gitpatch records. Unique event.
1858 1858 """
1859 1859 afile = ""
1860 1860 bfile = ""
1861 1861 state = None
1862 1862 hunknum = 0
1863 1863 emitfile = newfile = False
1864 1864 gitpatches = None
1865 1865
1866 1866 # our states
1867 1867 BFILE = 1
1868 1868 context = None
1869 1869 lr = linereader(fp)
1870 1870
1871 1871 for x in iter(lr.readline, ''):
1872 1872 if state == BFILE and (
1873 1873 (not context and x.startswith('@'))
1874 1874 or (context is not False and x.startswith('***************'))
1875 1875 or x.startswith('GIT binary patch')):
1876 1876 gp = None
1877 1877 if (gitpatches and
1878 1878 gitpatches[-1].ispatching(afile, bfile)):
1879 1879 gp = gitpatches.pop()
1880 1880 if x.startswith('GIT binary patch'):
1881 1881 h = binhunk(lr, gp.path)
1882 1882 else:
1883 1883 if context is None and x.startswith('***************'):
1884 1884 context = True
1885 1885 h = hunk(x, hunknum + 1, lr, context)
1886 1886 hunknum += 1
1887 1887 if emitfile:
1888 1888 emitfile = False
1889 1889 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1890 1890 yield 'hunk', h
1891 1891 elif x.startswith('diff --git a/'):
1892 1892 m = gitre.match(x.rstrip(' \r\n'))
1893 1893 if not m:
1894 1894 continue
1895 1895 if gitpatches is None:
1896 1896 # scan whole input for git metadata
1897 1897 gitpatches = scangitpatch(lr, x)
1898 1898 yield 'git', [g.copy() for g in gitpatches
1899 1899 if g.op in ('COPY', 'RENAME')]
1900 1900 gitpatches.reverse()
1901 1901 afile = 'a/' + m.group(1)
1902 1902 bfile = 'b/' + m.group(2)
1903 1903 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1904 1904 gp = gitpatches.pop()
1905 1905 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1906 1906 if not gitpatches:
1907 1907 raise PatchError(_('failed to synchronize metadata for "%s"')
1908 1908 % afile[2:])
1909 1909 newfile = True
1910 1910 elif x.startswith('---'):
1911 1911 # check for a unified diff
1912 1912 l2 = lr.readline()
1913 1913 if not l2.startswith('+++'):
1914 1914 lr.push(l2)
1915 1915 continue
1916 1916 newfile = True
1917 1917 context = False
1918 1918 afile = parsefilename(x)
1919 1919 bfile = parsefilename(l2)
1920 1920 elif x.startswith('***'):
1921 1921 # check for a context diff
1922 1922 l2 = lr.readline()
1923 1923 if not l2.startswith('---'):
1924 1924 lr.push(l2)
1925 1925 continue
1926 1926 l3 = lr.readline()
1927 1927 lr.push(l3)
1928 1928 if not l3.startswith("***************"):
1929 1929 lr.push(l2)
1930 1930 continue
1931 1931 newfile = True
1932 1932 context = True
1933 1933 afile = parsefilename(x)
1934 1934 bfile = parsefilename(l2)
1935 1935
1936 1936 if newfile:
1937 1937 newfile = False
1938 1938 emitfile = True
1939 1939 state = BFILE
1940 1940 hunknum = 0
1941 1941
1942 1942 while gitpatches:
1943 1943 gp = gitpatches.pop()
1944 1944 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1945 1945
1946 1946 def applybindelta(binchunk, data):
1947 1947 """Apply a binary delta hunk
1948 1948 The algorithm used is the algorithm from git's patch-delta.c
1949 1949 """
1950 1950 def deltahead(binchunk):
1951 1951 i = 0
1952 1952 for c in pycompat.bytestr(binchunk):
1953 1953 i += 1
1954 1954 if not (ord(c) & 0x80):
1955 1955 return i
1956 1956 return i
1957 1957 out = ""
1958 1958 s = deltahead(binchunk)
1959 1959 binchunk = binchunk[s:]
1960 1960 s = deltahead(binchunk)
1961 1961 binchunk = binchunk[s:]
1962 1962 i = 0
1963 1963 while i < len(binchunk):
1964 1964 cmd = ord(binchunk[i:i + 1])
1965 1965 i += 1
1966 1966 if (cmd & 0x80):
1967 1967 offset = 0
1968 1968 size = 0
1969 1969 if (cmd & 0x01):
1970 1970 offset = ord(binchunk[i:i + 1])
1971 1971 i += 1
1972 1972 if (cmd & 0x02):
1973 1973 offset |= ord(binchunk[i:i + 1]) << 8
1974 1974 i += 1
1975 1975 if (cmd & 0x04):
1976 1976 offset |= ord(binchunk[i:i + 1]) << 16
1977 1977 i += 1
1978 1978 if (cmd & 0x08):
1979 1979 offset |= ord(binchunk[i:i + 1]) << 24
1980 1980 i += 1
1981 1981 if (cmd & 0x10):
1982 1982 size = ord(binchunk[i:i + 1])
1983 1983 i += 1
1984 1984 if (cmd & 0x20):
1985 1985 size |= ord(binchunk[i:i + 1]) << 8
1986 1986 i += 1
1987 1987 if (cmd & 0x40):
1988 1988 size |= ord(binchunk[i:i + 1]) << 16
1989 1989 i += 1
1990 1990 if size == 0:
1991 1991 size = 0x10000
1992 1992 offset_end = offset + size
1993 1993 out += data[offset:offset_end]
1994 1994 elif cmd != 0:
1995 1995 offset_end = i + cmd
1996 1996 out += binchunk[i:offset_end]
1997 1997 i += cmd
1998 1998 else:
1999 1999 raise PatchError(_('unexpected delta opcode 0'))
2000 2000 return out
2001 2001
2002 2002 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2003 2003 """Reads a patch from fp and tries to apply it.
2004 2004
2005 2005 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2006 2006 there was any fuzz.
2007 2007
2008 2008 If 'eolmode' is 'strict', the patch content and patched file are
2009 2009 read in binary mode. Otherwise, line endings are ignored when
2010 2010 patching then normalized according to 'eolmode'.
2011 2011 """
2012 2012 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2013 2013 prefix=prefix, eolmode=eolmode)
2014 2014
2015 2015 def _canonprefix(repo, prefix):
2016 2016 if prefix:
2017 2017 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2018 2018 if prefix != '':
2019 2019 prefix += '/'
2020 2020 return prefix
2021 2021
2022 2022 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2023 2023 eolmode='strict'):
2024 2024 prefix = _canonprefix(backend.repo, prefix)
2025 2025 def pstrip(p):
2026 2026 return pathtransform(p, strip - 1, prefix)[1]
2027 2027
2028 2028 rejects = 0
2029 2029 err = 0
2030 2030 current_file = None
2031 2031
2032 2032 for state, values in iterhunks(fp):
2033 2033 if state == 'hunk':
2034 2034 if not current_file:
2035 2035 continue
2036 2036 ret = current_file.apply(values)
2037 2037 if ret > 0:
2038 2038 err = 1
2039 2039 elif state == 'file':
2040 2040 if current_file:
2041 2041 rejects += current_file.close()
2042 2042 current_file = None
2043 2043 afile, bfile, first_hunk, gp = values
2044 2044 if gp:
2045 2045 gp.path = pstrip(gp.path)
2046 2046 if gp.oldpath:
2047 2047 gp.oldpath = pstrip(gp.oldpath)
2048 2048 else:
2049 2049 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2050 2050 prefix)
2051 2051 if gp.op == 'RENAME':
2052 2052 backend.unlink(gp.oldpath)
2053 2053 if not first_hunk:
2054 2054 if gp.op == 'DELETE':
2055 2055 backend.unlink(gp.path)
2056 2056 continue
2057 2057 data, mode = None, None
2058 2058 if gp.op in ('RENAME', 'COPY'):
2059 2059 data, mode = store.getfile(gp.oldpath)[:2]
2060 2060 if data is None:
2061 2061 # This means that the old path does not exist
2062 2062 raise PatchError(_("source file '%s' does not exist")
2063 2063 % gp.oldpath)
2064 2064 if gp.mode:
2065 2065 mode = gp.mode
2066 2066 if gp.op == 'ADD':
2067 2067 # Added files without content have no hunk and
2068 2068 # must be created
2069 2069 data = ''
2070 2070 if data or mode:
2071 2071 if (gp.op in ('ADD', 'RENAME', 'COPY')
2072 2072 and backend.exists(gp.path)):
2073 2073 raise PatchError(_("cannot create %s: destination "
2074 2074 "already exists") % gp.path)
2075 2075 backend.setfile(gp.path, data, mode, gp.oldpath)
2076 2076 continue
2077 2077 try:
2078 2078 current_file = patcher(ui, gp, backend, store,
2079 2079 eolmode=eolmode)
2080 2080 except PatchError as inst:
2081 2081 ui.warn(str(inst) + '\n')
2082 2082 current_file = None
2083 2083 rejects += 1
2084 2084 continue
2085 2085 elif state == 'git':
2086 2086 for gp in values:
2087 2087 path = pstrip(gp.oldpath)
2088 2088 data, mode = backend.getfile(path)
2089 2089 if data is None:
2090 2090 # The error ignored here will trigger a getfile()
2091 2091 # error in a place more appropriate for error
2092 2092 # handling, and will not interrupt the patching
2093 2093 # process.
2094 2094 pass
2095 2095 else:
2096 2096 store.setfile(path, data, mode)
2097 2097 else:
2098 2098 raise error.Abort(_('unsupported parser state: %s') % state)
2099 2099
2100 2100 if current_file:
2101 2101 rejects += current_file.close()
2102 2102
2103 2103 if rejects:
2104 2104 return -1
2105 2105 return err
2106 2106
2107 2107 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2108 2108 similarity):
2109 2109 """use <patcher> to apply <patchname> to the working directory.
2110 2110 returns whether patch was applied with fuzz factor."""
2111 2111
2112 2112 fuzz = False
2113 2113 args = []
2114 2114 cwd = repo.root
2115 2115 if cwd:
2116 2116 args.append('-d %s' % procutil.shellquote(cwd))
2117 2117 cmd = ('%s %s -p%d < %s'
2118 2118 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2119 2119 ui.debug('Using external patch tool: %s\n' % cmd)
2120 2120 fp = procutil.popen(cmd, 'rb')
2121 2121 try:
2122 2122 for line in util.iterfile(fp):
2123 2123 line = line.rstrip()
2124 2124 ui.note(line + '\n')
2125 2125 if line.startswith('patching file '):
2126 2126 pf = util.parsepatchoutput(line)
2127 2127 printed_file = False
2128 2128 files.add(pf)
2129 2129 elif line.find('with fuzz') >= 0:
2130 2130 fuzz = True
2131 2131 if not printed_file:
2132 2132 ui.warn(pf + '\n')
2133 2133 printed_file = True
2134 2134 ui.warn(line + '\n')
2135 2135 elif line.find('saving rejects to file') >= 0:
2136 2136 ui.warn(line + '\n')
2137 2137 elif line.find('FAILED') >= 0:
2138 2138 if not printed_file:
2139 2139 ui.warn(pf + '\n')
2140 2140 printed_file = True
2141 2141 ui.warn(line + '\n')
2142 2142 finally:
2143 2143 if files:
2144 2144 scmutil.marktouched(repo, files, similarity)
2145 2145 code = fp.close()
2146 2146 if code:
2147 2147 raise PatchError(_("patch command failed: %s") %
2148 2148 procutil.explainexit(code))
2149 2149 return fuzz
2150 2150
2151 2151 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2152 2152 eolmode='strict'):
2153 2153 if files is None:
2154 2154 files = set()
2155 2155 if eolmode is None:
2156 2156 eolmode = ui.config('patch', 'eol')
2157 2157 if eolmode.lower() not in eolmodes:
2158 2158 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2159 2159 eolmode = eolmode.lower()
2160 2160
2161 2161 store = filestore()
2162 2162 try:
2163 2163 fp = open(patchobj, 'rb')
2164 2164 except TypeError:
2165 2165 fp = patchobj
2166 2166 try:
2167 2167 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2168 2168 eolmode=eolmode)
2169 2169 finally:
2170 2170 if fp != patchobj:
2171 2171 fp.close()
2172 2172 files.update(backend.close())
2173 2173 store.close()
2174 2174 if ret < 0:
2175 2175 raise PatchError(_('patch failed to apply'))
2176 2176 return ret > 0
2177 2177
2178 2178 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2179 2179 eolmode='strict', similarity=0):
2180 2180 """use builtin patch to apply <patchobj> to the working directory.
2181 2181 returns whether patch was applied with fuzz factor."""
2182 2182 backend = workingbackend(ui, repo, similarity)
2183 2183 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2184 2184
2185 2185 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2186 2186 eolmode='strict'):
2187 2187 backend = repobackend(ui, repo, ctx, store)
2188 2188 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2189 2189
2190 2190 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2191 2191 similarity=0):
2192 2192 """Apply <patchname> to the working directory.
2193 2193
2194 2194 'eolmode' specifies how end of lines should be handled. It can be:
2195 2195 - 'strict': inputs are read in binary mode, EOLs are preserved
2196 2196 - 'crlf': EOLs are ignored when patching and reset to CRLF
2197 2197 - 'lf': EOLs are ignored when patching and reset to LF
2198 2198 - None: get it from user settings, default to 'strict'
2199 2199 'eolmode' is ignored when using an external patcher program.
2200 2200
2201 2201 Returns whether patch was applied with fuzz factor.
2202 2202 """
2203 2203 patcher = ui.config('ui', 'patch')
2204 2204 if files is None:
2205 2205 files = set()
2206 2206 if patcher:
2207 2207 return _externalpatch(ui, repo, patcher, patchname, strip,
2208 2208 files, similarity)
2209 2209 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2210 2210 similarity)
2211 2211
2212 2212 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2213 2213 backend = fsbackend(ui, repo.root)
2214 2214 prefix = _canonprefix(repo, prefix)
2215 2215 with open(patchpath, 'rb') as fp:
2216 2216 changed = set()
2217 2217 for state, values in iterhunks(fp):
2218 2218 if state == 'file':
2219 2219 afile, bfile, first_hunk, gp = values
2220 2220 if gp:
2221 2221 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2222 2222 if gp.oldpath:
2223 2223 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2224 2224 prefix)[1]
2225 2225 else:
2226 2226 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2227 2227 prefix)
2228 2228 changed.add(gp.path)
2229 2229 if gp.op == 'RENAME':
2230 2230 changed.add(gp.oldpath)
2231 2231 elif state not in ('hunk', 'git'):
2232 2232 raise error.Abort(_('unsupported parser state: %s') % state)
2233 2233 return changed
2234 2234
2235 2235 class GitDiffRequired(Exception):
2236 2236 pass
2237 2237
2238 2238 diffopts = diffutil.diffallopts
2239 2239 diffallopts = diffutil.diffallopts
2240 2240 difffeatureopts = diffutil.difffeatureopts
2241 2241
2242 2242 def diff(repo, node1=None, node2=None, match=None, changes=None,
2243 2243 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2244 2244 hunksfilterfn=None):
2245 2245 '''yields diff of changes to files between two nodes, or node and
2246 2246 working directory.
2247 2247
2248 2248 if node1 is None, use first dirstate parent instead.
2249 2249 if node2 is None, compare node1 with working directory.
2250 2250
2251 2251 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2252 2252 every time some change cannot be represented with the current
2253 2253 patch format. Return False to upgrade to git patch format, True to
2254 2254 accept the loss or raise an exception to abort the diff. It is
2255 2255 called with the name of current file being diffed as 'fn'. If set
2256 2256 to None, patches will always be upgraded to git format when
2257 2257 necessary.
2258 2258
2259 2259 prefix is a filename prefix that is prepended to all filenames on
2260 2260 display (used for subrepos).
2261 2261
2262 2262 relroot, if not empty, must be normalized with a trailing /. Any match
2263 2263 patterns that fall outside it will be ignored.
2264 2264
2265 2265 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2266 2266 information.
2267 2267
2268 2268 hunksfilterfn, if not None, should be a function taking a filectx and
2269 2269 hunks generator that may yield filtered hunks.
2270 2270 '''
2271 if not node1 and not node2:
2272 node1 = repo.dirstate.p1()
2273
2274 ctx1 = repo[node1]
2275 ctx2 = repo[node2]
2276
2271 2277 for fctx1, fctx2, hdr, hunks in diffhunks(
2272 repo, node1=node1, node2=node2,
2278 repo, ctx1=ctx1, ctx2=ctx2,
2273 2279 match=match, changes=changes, opts=opts,
2274 2280 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2275 2281 ):
2276 2282 if hunksfilterfn is not None:
2277 2283 # If the file has been removed, fctx2 is None; but this should
2278 2284 # not occur here since we catch removed files early in
2279 2285 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2280 2286 assert fctx2 is not None, \
2281 2287 'fctx2 unexpectly None in diff hunks filtering'
2282 2288 hunks = hunksfilterfn(fctx2, hunks)
2283 2289 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2284 2290 if hdr and (text or len(hdr) > 1):
2285 2291 yield '\n'.join(hdr) + '\n'
2286 2292 if text:
2287 2293 yield text
2288 2294
2289 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2295 def diffhunks(repo, ctx1, ctx2, match=None, changes=None,
2290 2296 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2291 2297 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2292 2298 where `header` is a list of diff headers and `hunks` is an iterable of
2293 2299 (`hunkrange`, `hunklines`) tuples.
2294 2300
2295 2301 See diff() for the meaning of parameters.
2296 2302 """
2297 2303
2298 2304 if opts is None:
2299 2305 opts = mdiff.defaultopts
2300 2306
2301 if not node1 and not node2:
2302 node1 = repo.dirstate.p1()
2303
2304 2307 def lrugetfilectx():
2305 2308 cache = {}
2306 2309 order = collections.deque()
2307 2310 def getfilectx(f, ctx):
2308 2311 fctx = ctx.filectx(f, filelog=cache.get(f))
2309 2312 if f not in cache:
2310 2313 if len(cache) > 20:
2311 2314 del cache[order.popleft()]
2312 2315 cache[f] = fctx.filelog()
2313 2316 else:
2314 2317 order.remove(f)
2315 2318 order.append(f)
2316 2319 return fctx
2317 2320 return getfilectx
2318 2321 getfilectx = lrugetfilectx()
2319 2322
2320 ctx1 = repo[node1]
2321 ctx2 = repo[node2]
2322
2323 2323 if relroot:
2324 2324 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
2325 2325 match = matchmod.intersectmatchers(match, relrootmatch)
2326 2326
2327 2327 if not changes:
2328 2328 changes = ctx1.status(ctx2, match=match)
2329 2329 modified, added, removed = changes[:3]
2330 2330
2331 2331 if not modified and not added and not removed:
2332 2332 return []
2333 2333
2334 2334 if repo.ui.debugflag:
2335 2335 hexfunc = hex
2336 2336 else:
2337 2337 hexfunc = short
2338 2338 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2339 2339
2340 2340 if copy is None:
2341 2341 copy = {}
2342 2342 if opts.git or opts.upgrade:
2343 2343 copy = copies.pathcopies(ctx1, ctx2, match=match)
2344 2344
2345 2345 if relroot:
2346 2346 # filter out copies where source side isn't inside the relative root
2347 2347 # (copies.pathcopies() already filtered out the destination)
2348 2348 copy = {dst: src for dst, src in copy.iteritems()
2349 2349 if src.startswith(relroot)}
2350 2350
2351 2351 modifiedset = set(modified)
2352 2352 addedset = set(added)
2353 2353 removedset = set(removed)
2354 2354 for f in modified:
2355 2355 if f not in ctx1:
2356 2356 # Fix up added, since merged-in additions appear as
2357 2357 # modifications during merges
2358 2358 modifiedset.remove(f)
2359 2359 addedset.add(f)
2360 2360 for f in removed:
2361 2361 if f not in ctx1:
2362 2362 # Merged-in additions that are then removed are reported as removed.
2363 2363 # They are not in ctx1, so We don't want to show them in the diff.
2364 2364 removedset.remove(f)
2365 2365 modified = sorted(modifiedset)
2366 2366 added = sorted(addedset)
2367 2367 removed = sorted(removedset)
2368 2368 for dst, src in list(copy.items()):
2369 2369 if src not in ctx1:
2370 2370 # Files merged in during a merge and then copied/renamed are
2371 2371 # reported as copies. We want to show them in the diff as additions.
2372 2372 del copy[dst]
2373 2373
2374 2374 prefetchmatch = scmutil.matchfiles(
2375 2375 repo, list(modifiedset | addedset | removedset))
2376 2376 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2377 2377
2378 2378 def difffn(opts, losedata):
2379 2379 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2380 2380 copy, getfilectx, opts, losedata, prefix, relroot)
2381 2381 if opts.upgrade and not opts.git:
2382 2382 try:
2383 2383 def losedata(fn):
2384 2384 if not losedatafn or not losedatafn(fn=fn):
2385 2385 raise GitDiffRequired
2386 2386 # Buffer the whole output until we are sure it can be generated
2387 2387 return list(difffn(opts.copy(git=False), losedata))
2388 2388 except GitDiffRequired:
2389 2389 return difffn(opts.copy(git=True), None)
2390 2390 else:
2391 2391 return difffn(opts, None)
2392 2392
2393 2393 def diffsinglehunk(hunklines):
2394 2394 """yield tokens for a list of lines in a single hunk"""
2395 2395 for line in hunklines:
2396 2396 # chomp
2397 2397 chompline = line.rstrip('\r\n')
2398 2398 # highlight tabs and trailing whitespace
2399 2399 stripline = chompline.rstrip()
2400 2400 if line.startswith('-'):
2401 2401 label = 'diff.deleted'
2402 2402 elif line.startswith('+'):
2403 2403 label = 'diff.inserted'
2404 2404 else:
2405 2405 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2406 2406 for token in tabsplitter.findall(stripline):
2407 2407 if token.startswith('\t'):
2408 2408 yield (token, 'diff.tab')
2409 2409 else:
2410 2410 yield (token, label)
2411 2411
2412 2412 if chompline != stripline:
2413 2413 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2414 2414 if chompline != line:
2415 2415 yield (line[len(chompline):], '')
2416 2416
2417 2417 def diffsinglehunkinline(hunklines):
2418 2418 """yield tokens for a list of lines in a single hunk, with inline colors"""
2419 2419 # prepare deleted, and inserted content
2420 2420 a = ''
2421 2421 b = ''
2422 2422 for line in hunklines:
2423 2423 if line[0:1] == '-':
2424 2424 a += line[1:]
2425 2425 elif line[0:1] == '+':
2426 2426 b += line[1:]
2427 2427 else:
2428 2428 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2429 2429 # fast path: if either side is empty, use diffsinglehunk
2430 2430 if not a or not b:
2431 2431 for t in diffsinglehunk(hunklines):
2432 2432 yield t
2433 2433 return
2434 2434 # re-split the content into words
2435 2435 al = wordsplitter.findall(a)
2436 2436 bl = wordsplitter.findall(b)
2437 2437 # re-arrange the words to lines since the diff algorithm is line-based
2438 2438 aln = [s if s == '\n' else s + '\n' for s in al]
2439 2439 bln = [s if s == '\n' else s + '\n' for s in bl]
2440 2440 an = ''.join(aln)
2441 2441 bn = ''.join(bln)
2442 2442 # run the diff algorithm, prepare atokens and btokens
2443 2443 atokens = []
2444 2444 btokens = []
2445 2445 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2446 2446 for (a1, a2, b1, b2), btype in blocks:
2447 2447 changed = btype == '!'
2448 2448 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2449 2449 atokens.append((changed, token))
2450 2450 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2451 2451 btokens.append((changed, token))
2452 2452
2453 2453 # yield deleted tokens, then inserted ones
2454 2454 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2455 2455 ('+', 'diff.inserted', btokens)]:
2456 2456 nextisnewline = True
2457 2457 for changed, token in tokens:
2458 2458 if nextisnewline:
2459 2459 yield (prefix, label)
2460 2460 nextisnewline = False
2461 2461 # special handling line end
2462 2462 isendofline = token.endswith('\n')
2463 2463 if isendofline:
2464 2464 chomp = token[:-1] # chomp
2465 2465 if chomp.endswith('\r'):
2466 2466 chomp = chomp[:-1]
2467 2467 endofline = token[len(chomp):]
2468 2468 token = chomp.rstrip() # detect spaces at the end
2469 2469 endspaces = chomp[len(token):]
2470 2470 # scan tabs
2471 2471 for maybetab in tabsplitter.findall(token):
2472 2472 if b'\t' == maybetab[0:1]:
2473 2473 currentlabel = 'diff.tab'
2474 2474 else:
2475 2475 if changed:
2476 2476 currentlabel = label + '.changed'
2477 2477 else:
2478 2478 currentlabel = label + '.unchanged'
2479 2479 yield (maybetab, currentlabel)
2480 2480 if isendofline:
2481 2481 if endspaces:
2482 2482 yield (endspaces, 'diff.trailingwhitespace')
2483 2483 yield (endofline, '')
2484 2484 nextisnewline = True
2485 2485
2486 2486 def difflabel(func, *args, **kw):
2487 2487 '''yields 2-tuples of (output, label) based on the output of func()'''
2488 2488 if kw.get(r'opts') and kw[r'opts'].worddiff:
2489 2489 dodiffhunk = diffsinglehunkinline
2490 2490 else:
2491 2491 dodiffhunk = diffsinglehunk
2492 2492 headprefixes = [('diff', 'diff.diffline'),
2493 2493 ('copy', 'diff.extended'),
2494 2494 ('rename', 'diff.extended'),
2495 2495 ('old', 'diff.extended'),
2496 2496 ('new', 'diff.extended'),
2497 2497 ('deleted', 'diff.extended'),
2498 2498 ('index', 'diff.extended'),
2499 2499 ('similarity', 'diff.extended'),
2500 2500 ('---', 'diff.file_a'),
2501 2501 ('+++', 'diff.file_b')]
2502 2502 textprefixes = [('@', 'diff.hunk'),
2503 2503 # - and + are handled by diffsinglehunk
2504 2504 ]
2505 2505 head = False
2506 2506
2507 2507 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2508 2508 hunkbuffer = []
2509 2509 def consumehunkbuffer():
2510 2510 if hunkbuffer:
2511 2511 for token in dodiffhunk(hunkbuffer):
2512 2512 yield token
2513 2513 hunkbuffer[:] = []
2514 2514
2515 2515 for chunk in func(*args, **kw):
2516 2516 lines = chunk.split('\n')
2517 2517 linecount = len(lines)
2518 2518 for i, line in enumerate(lines):
2519 2519 if head:
2520 2520 if line.startswith('@'):
2521 2521 head = False
2522 2522 else:
2523 2523 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2524 2524 head = True
2525 2525 diffline = False
2526 2526 if not head and line and line.startswith(('+', '-')):
2527 2527 diffline = True
2528 2528
2529 2529 prefixes = textprefixes
2530 2530 if head:
2531 2531 prefixes = headprefixes
2532 2532 if diffline:
2533 2533 # buffered
2534 2534 bufferedline = line
2535 2535 if i + 1 < linecount:
2536 2536 bufferedline += "\n"
2537 2537 hunkbuffer.append(bufferedline)
2538 2538 else:
2539 2539 # unbuffered
2540 2540 for token in consumehunkbuffer():
2541 2541 yield token
2542 2542 stripline = line.rstrip()
2543 2543 for prefix, label in prefixes:
2544 2544 if stripline.startswith(prefix):
2545 2545 yield (stripline, label)
2546 2546 if line != stripline:
2547 2547 yield (line[len(stripline):],
2548 2548 'diff.trailingwhitespace')
2549 2549 break
2550 2550 else:
2551 2551 yield (line, '')
2552 2552 if i + 1 < linecount:
2553 2553 yield ('\n', '')
2554 2554 for token in consumehunkbuffer():
2555 2555 yield token
2556 2556
2557 2557 def diffui(*args, **kw):
2558 2558 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2559 2559 return difflabel(diff, *args, **kw)
2560 2560
2561 2561 def _filepairs(modified, added, removed, copy, opts):
2562 2562 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2563 2563 before and f2 is the the name after. For added files, f1 will be None,
2564 2564 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2565 2565 or 'rename' (the latter two only if opts.git is set).'''
2566 2566 gone = set()
2567 2567
2568 2568 copyto = dict([(v, k) for k, v in copy.items()])
2569 2569
2570 2570 addedset, removedset = set(added), set(removed)
2571 2571
2572 2572 for f in sorted(modified + added + removed):
2573 2573 copyop = None
2574 2574 f1, f2 = f, f
2575 2575 if f in addedset:
2576 2576 f1 = None
2577 2577 if f in copy:
2578 2578 if opts.git:
2579 2579 f1 = copy[f]
2580 2580 if f1 in removedset and f1 not in gone:
2581 2581 copyop = 'rename'
2582 2582 gone.add(f1)
2583 2583 else:
2584 2584 copyop = 'copy'
2585 2585 elif f in removedset:
2586 2586 f2 = None
2587 2587 if opts.git:
2588 2588 # have we already reported a copy above?
2589 2589 if (f in copyto and copyto[f] in addedset
2590 2590 and copy[copyto[f]] == f):
2591 2591 continue
2592 2592 yield f1, f2, copyop
2593 2593
2594 2594 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2595 2595 copy, getfilectx, opts, losedatafn, prefix, relroot):
2596 2596 '''given input data, generate a diff and yield it in blocks
2597 2597
2598 2598 If generating a diff would lose data like flags or binary data and
2599 2599 losedatafn is not None, it will be called.
2600 2600
2601 2601 relroot is removed and prefix is added to every path in the diff output.
2602 2602
2603 2603 If relroot is not empty, this function expects every path in modified,
2604 2604 added, removed and copy to start with it.'''
2605 2605
2606 2606 def gitindex(text):
2607 2607 if not text:
2608 2608 text = ""
2609 2609 l = len(text)
2610 2610 s = hashlib.sha1('blob %d\0' % l)
2611 2611 s.update(text)
2612 2612 return hex(s.digest())
2613 2613
2614 2614 if opts.noprefix:
2615 2615 aprefix = bprefix = ''
2616 2616 else:
2617 2617 aprefix = 'a/'
2618 2618 bprefix = 'b/'
2619 2619
2620 2620 def diffline(f, revs):
2621 2621 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2622 2622 return 'diff %s %s' % (revinfo, f)
2623 2623
2624 2624 def isempty(fctx):
2625 2625 return fctx is None or fctx.size() == 0
2626 2626
2627 2627 date1 = dateutil.datestr(ctx1.date())
2628 2628 date2 = dateutil.datestr(ctx2.date())
2629 2629
2630 2630 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2631 2631
2632 2632 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2633 2633 or repo.ui.configbool('devel', 'check-relroot')):
2634 2634 for f in modified + added + removed + list(copy) + list(copy.values()):
2635 2635 if f is not None and not f.startswith(relroot):
2636 2636 raise AssertionError(
2637 2637 "file %s doesn't start with relroot %s" % (f, relroot))
2638 2638
2639 2639 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2640 2640 content1 = None
2641 2641 content2 = None
2642 2642 fctx1 = None
2643 2643 fctx2 = None
2644 2644 flag1 = None
2645 2645 flag2 = None
2646 2646 if f1:
2647 2647 fctx1 = getfilectx(f1, ctx1)
2648 2648 if opts.git or losedatafn:
2649 2649 flag1 = ctx1.flags(f1)
2650 2650 if f2:
2651 2651 fctx2 = getfilectx(f2, ctx2)
2652 2652 if opts.git or losedatafn:
2653 2653 flag2 = ctx2.flags(f2)
2654 2654 # if binary is True, output "summary" or "base85", but not "text diff"
2655 2655 if opts.text:
2656 2656 binary = False
2657 2657 else:
2658 2658 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2659 2659
2660 2660 if losedatafn and not opts.git:
2661 2661 if (binary or
2662 2662 # copy/rename
2663 2663 f2 in copy or
2664 2664 # empty file creation
2665 2665 (not f1 and isempty(fctx2)) or
2666 2666 # empty file deletion
2667 2667 (isempty(fctx1) and not f2) or
2668 2668 # create with flags
2669 2669 (not f1 and flag2) or
2670 2670 # change flags
2671 2671 (f1 and f2 and flag1 != flag2)):
2672 2672 losedatafn(f2 or f1)
2673 2673
2674 2674 path1 = f1 or f2
2675 2675 path2 = f2 or f1
2676 2676 path1 = posixpath.join(prefix, path1[len(relroot):])
2677 2677 path2 = posixpath.join(prefix, path2[len(relroot):])
2678 2678 header = []
2679 2679 if opts.git:
2680 2680 header.append('diff --git %s%s %s%s' %
2681 2681 (aprefix, path1, bprefix, path2))
2682 2682 if not f1: # added
2683 2683 header.append('new file mode %s' % gitmode[flag2])
2684 2684 elif not f2: # removed
2685 2685 header.append('deleted file mode %s' % gitmode[flag1])
2686 2686 else: # modified/copied/renamed
2687 2687 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2688 2688 if mode1 != mode2:
2689 2689 header.append('old mode %s' % mode1)
2690 2690 header.append('new mode %s' % mode2)
2691 2691 if copyop is not None:
2692 2692 if opts.showsimilarity:
2693 2693 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2694 2694 header.append('similarity index %d%%' % sim)
2695 2695 header.append('%s from %s' % (copyop, path1))
2696 2696 header.append('%s to %s' % (copyop, path2))
2697 2697 elif revs and not repo.ui.quiet:
2698 2698 header.append(diffline(path1, revs))
2699 2699
2700 2700 # fctx.is | diffopts | what to | is fctx.data()
2701 2701 # binary() | text nobinary git index | output? | outputted?
2702 2702 # ------------------------------------|----------------------------
2703 2703 # yes | no no no * | summary | no
2704 2704 # yes | no no yes * | base85 | yes
2705 2705 # yes | no yes no * | summary | no
2706 2706 # yes | no yes yes 0 | summary | no
2707 2707 # yes | no yes yes >0 | summary | semi [1]
2708 2708 # yes | yes * * * | text diff | yes
2709 2709 # no | * * * * | text diff | yes
2710 2710 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2711 2711 if binary and (not opts.git or (opts.git and opts.nobinary and not
2712 2712 opts.index)):
2713 2713 # fast path: no binary content will be displayed, content1 and
2714 2714 # content2 are only used for equivalent test. cmp() could have a
2715 2715 # fast path.
2716 2716 if fctx1 is not None:
2717 2717 content1 = b'\0'
2718 2718 if fctx2 is not None:
2719 2719 if fctx1 is not None and not fctx1.cmp(fctx2):
2720 2720 content2 = b'\0' # not different
2721 2721 else:
2722 2722 content2 = b'\0\0'
2723 2723 else:
2724 2724 # normal path: load contents
2725 2725 if fctx1 is not None:
2726 2726 content1 = fctx1.data()
2727 2727 if fctx2 is not None:
2728 2728 content2 = fctx2.data()
2729 2729
2730 2730 if binary and opts.git and not opts.nobinary:
2731 2731 text = mdiff.b85diff(content1, content2)
2732 2732 if text:
2733 2733 header.append('index %s..%s' %
2734 2734 (gitindex(content1), gitindex(content2)))
2735 2735 hunks = (None, [text]),
2736 2736 else:
2737 2737 if opts.git and opts.index > 0:
2738 2738 flag = flag1
2739 2739 if flag is None:
2740 2740 flag = flag2
2741 2741 header.append('index %s..%s %s' %
2742 2742 (gitindex(content1)[0:opts.index],
2743 2743 gitindex(content2)[0:opts.index],
2744 2744 gitmode[flag]))
2745 2745
2746 2746 uheaders, hunks = mdiff.unidiff(content1, date1,
2747 2747 content2, date2,
2748 2748 path1, path2,
2749 2749 binary=binary, opts=opts)
2750 2750 header.extend(uheaders)
2751 2751 yield fctx1, fctx2, header, hunks
2752 2752
2753 2753 def diffstatsum(stats):
2754 2754 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2755 2755 for f, a, r, b in stats:
2756 2756 maxfile = max(maxfile, encoding.colwidth(f))
2757 2757 maxtotal = max(maxtotal, a + r)
2758 2758 addtotal += a
2759 2759 removetotal += r
2760 2760 binary = binary or b
2761 2761
2762 2762 return maxfile, maxtotal, addtotal, removetotal, binary
2763 2763
2764 2764 def diffstatdata(lines):
2765 2765 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2766 2766
2767 2767 results = []
2768 2768 filename, adds, removes, isbinary = None, 0, 0, False
2769 2769
2770 2770 def addresult():
2771 2771 if filename:
2772 2772 results.append((filename, adds, removes, isbinary))
2773 2773
2774 2774 # inheader is used to track if a line is in the
2775 2775 # header portion of the diff. This helps properly account
2776 2776 # for lines that start with '--' or '++'
2777 2777 inheader = False
2778 2778
2779 2779 for line in lines:
2780 2780 if line.startswith('diff'):
2781 2781 addresult()
2782 2782 # starting a new file diff
2783 2783 # set numbers to 0 and reset inheader
2784 2784 inheader = True
2785 2785 adds, removes, isbinary = 0, 0, False
2786 2786 if line.startswith('diff --git a/'):
2787 2787 filename = gitre.search(line).group(2)
2788 2788 elif line.startswith('diff -r'):
2789 2789 # format: "diff -r ... -r ... filename"
2790 2790 filename = diffre.search(line).group(1)
2791 2791 elif line.startswith('@@'):
2792 2792 inheader = False
2793 2793 elif line.startswith('+') and not inheader:
2794 2794 adds += 1
2795 2795 elif line.startswith('-') and not inheader:
2796 2796 removes += 1
2797 2797 elif (line.startswith('GIT binary patch') or
2798 2798 line.startswith('Binary file')):
2799 2799 isbinary = True
2800 2800 elif line.startswith('rename from'):
2801 2801 filename = line[12:]
2802 2802 elif line.startswith('rename to'):
2803 2803 filename += ' => %s' % line[10:]
2804 2804 addresult()
2805 2805 return results
2806 2806
2807 2807 def diffstat(lines, width=80):
2808 2808 output = []
2809 2809 stats = diffstatdata(lines)
2810 2810 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2811 2811
2812 2812 countwidth = len(str(maxtotal))
2813 2813 if hasbinary and countwidth < 3:
2814 2814 countwidth = 3
2815 2815 graphwidth = width - countwidth - maxname - 6
2816 2816 if graphwidth < 10:
2817 2817 graphwidth = 10
2818 2818
2819 2819 def scale(i):
2820 2820 if maxtotal <= graphwidth:
2821 2821 return i
2822 2822 # If diffstat runs out of room it doesn't print anything,
2823 2823 # which isn't very useful, so always print at least one + or -
2824 2824 # if there were at least some changes.
2825 2825 return max(i * graphwidth // maxtotal, int(bool(i)))
2826 2826
2827 2827 for filename, adds, removes, isbinary in stats:
2828 2828 if isbinary:
2829 2829 count = 'Bin'
2830 2830 else:
2831 2831 count = '%d' % (adds + removes)
2832 2832 pluses = '+' * scale(adds)
2833 2833 minuses = '-' * scale(removes)
2834 2834 output.append(' %s%s | %*s %s%s\n' %
2835 2835 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2836 2836 countwidth, count, pluses, minuses))
2837 2837
2838 2838 if stats:
2839 2839 output.append(_(' %d files changed, %d insertions(+), '
2840 2840 '%d deletions(-)\n')
2841 2841 % (len(stats), totaladds, totalremoves))
2842 2842
2843 2843 return ''.join(output)
2844 2844
2845 2845 def diffstatui(*args, **kw):
2846 2846 '''like diffstat(), but yields 2-tuples of (output, label) for
2847 2847 ui.write()
2848 2848 '''
2849 2849
2850 2850 for line in diffstat(*args, **kw).splitlines():
2851 2851 if line and line[-1] in '+-':
2852 2852 name, graph = line.rsplit(' ', 1)
2853 2853 yield (name + ' ', '')
2854 2854 m = re.search(br'\++', graph)
2855 2855 if m:
2856 2856 yield (m.group(0), 'diffstat.inserted')
2857 2857 m = re.search(br'-+', graph)
2858 2858 if m:
2859 2859 yield (m.group(0), 'diffstat.deleted')
2860 2860 else:
2861 2861 yield (line, '')
2862 2862 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now