##// END OF EJS Templates
patch: pass in context objects into diffhunks() (API)...
Martin von Zweigbergk -
r41767:e834f6f6 default
parent child Browse files
Show More
@@ -1,815 +1,813 b''
1 # hgweb/webutil.py - utility library for the web interface.
1 # hgweb/webutil.py - utility library for the web interface.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import copy
11 import copy
12 import difflib
12 import difflib
13 import os
13 import os
14 import re
14 import re
15
15
16 from ..i18n import _
16 from ..i18n import _
17 from ..node import hex, nullid, short
17 from ..node import hex, nullid, short
18
18
19 from .common import (
19 from .common import (
20 ErrorResponse,
20 ErrorResponse,
21 HTTP_BAD_REQUEST,
21 HTTP_BAD_REQUEST,
22 HTTP_NOT_FOUND,
22 HTTP_NOT_FOUND,
23 paritygen,
23 paritygen,
24 )
24 )
25
25
26 from .. import (
26 from .. import (
27 context,
27 context,
28 diffutil,
28 diffutil,
29 error,
29 error,
30 match,
30 match,
31 mdiff,
31 mdiff,
32 obsutil,
32 obsutil,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 templatefilters,
37 templatefilters,
38 templatekw,
38 templatekw,
39 templateutil,
39 templateutil,
40 ui as uimod,
40 ui as uimod,
41 util,
41 util,
42 )
42 )
43
43
44 from ..utils import (
44 from ..utils import (
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 archivespecs = util.sortdict((
48 archivespecs = util.sortdict((
49 ('zip', ('application/zip', 'zip', '.zip', None)),
49 ('zip', ('application/zip', 'zip', '.zip', None)),
50 ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
50 ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
51 ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
51 ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
52 ))
52 ))
53
53
54 def archivelist(ui, nodeid, url=None):
54 def archivelist(ui, nodeid, url=None):
55 allowed = ui.configlist('web', 'allow-archive', untrusted=True)
55 allowed = ui.configlist('web', 'allow-archive', untrusted=True)
56 archives = []
56 archives = []
57
57
58 for typ, spec in archivespecs.iteritems():
58 for typ, spec in archivespecs.iteritems():
59 if typ in allowed or ui.configbool('web', 'allow' + typ,
59 if typ in allowed or ui.configbool('web', 'allow' + typ,
60 untrusted=True):
60 untrusted=True):
61 archives.append({
61 archives.append({
62 'type': typ,
62 'type': typ,
63 'extension': spec[2],
63 'extension': spec[2],
64 'node': nodeid,
64 'node': nodeid,
65 'url': url,
65 'url': url,
66 })
66 })
67
67
68 return templateutil.mappinglist(archives)
68 return templateutil.mappinglist(archives)
69
69
70 def up(p):
70 def up(p):
71 if p[0:1] != "/":
71 if p[0:1] != "/":
72 p = "/" + p
72 p = "/" + p
73 if p[-1:] == "/":
73 if p[-1:] == "/":
74 p = p[:-1]
74 p = p[:-1]
75 up = os.path.dirname(p)
75 up = os.path.dirname(p)
76 if up == "/":
76 if up == "/":
77 return "/"
77 return "/"
78 return up + "/"
78 return up + "/"
79
79
80 def _navseq(step, firststep=None):
80 def _navseq(step, firststep=None):
81 if firststep:
81 if firststep:
82 yield firststep
82 yield firststep
83 if firststep >= 20 and firststep <= 40:
83 if firststep >= 20 and firststep <= 40:
84 firststep = 50
84 firststep = 50
85 yield firststep
85 yield firststep
86 assert step > 0
86 assert step > 0
87 assert firststep > 0
87 assert firststep > 0
88 while step <= firststep:
88 while step <= firststep:
89 step *= 10
89 step *= 10
90 while True:
90 while True:
91 yield 1 * step
91 yield 1 * step
92 yield 3 * step
92 yield 3 * step
93 step *= 10
93 step *= 10
94
94
95 class revnav(object):
95 class revnav(object):
96
96
97 def __init__(self, repo):
97 def __init__(self, repo):
98 """Navigation generation object
98 """Navigation generation object
99
99
100 :repo: repo object we generate nav for
100 :repo: repo object we generate nav for
101 """
101 """
102 # used for hex generation
102 # used for hex generation
103 self._revlog = repo.changelog
103 self._revlog = repo.changelog
104
104
105 def __nonzero__(self):
105 def __nonzero__(self):
106 """return True if any revision to navigate over"""
106 """return True if any revision to navigate over"""
107 return self._first() is not None
107 return self._first() is not None
108
108
109 __bool__ = __nonzero__
109 __bool__ = __nonzero__
110
110
111 def _first(self):
111 def _first(self):
112 """return the minimum non-filtered changeset or None"""
112 """return the minimum non-filtered changeset or None"""
113 try:
113 try:
114 return next(iter(self._revlog))
114 return next(iter(self._revlog))
115 except StopIteration:
115 except StopIteration:
116 return None
116 return None
117
117
118 def hex(self, rev):
118 def hex(self, rev):
119 return hex(self._revlog.node(rev))
119 return hex(self._revlog.node(rev))
120
120
121 def gen(self, pos, pagelen, limit):
121 def gen(self, pos, pagelen, limit):
122 """computes label and revision id for navigation link
122 """computes label and revision id for navigation link
123
123
124 :pos: is the revision relative to which we generate navigation.
124 :pos: is the revision relative to which we generate navigation.
125 :pagelen: the size of each navigation page
125 :pagelen: the size of each navigation page
126 :limit: how far shall we link
126 :limit: how far shall we link
127
127
128 The return is:
128 The return is:
129 - a single element mappinglist
129 - a single element mappinglist
130 - containing a dictionary with a `before` and `after` key
130 - containing a dictionary with a `before` and `after` key
131 - values are dictionaries with `label` and `node` keys
131 - values are dictionaries with `label` and `node` keys
132 """
132 """
133 if not self:
133 if not self:
134 # empty repo
134 # empty repo
135 return templateutil.mappinglist([
135 return templateutil.mappinglist([
136 {'before': templateutil.mappinglist([]),
136 {'before': templateutil.mappinglist([]),
137 'after': templateutil.mappinglist([])},
137 'after': templateutil.mappinglist([])},
138 ])
138 ])
139
139
140 targets = []
140 targets = []
141 for f in _navseq(1, pagelen):
141 for f in _navseq(1, pagelen):
142 if f > limit:
142 if f > limit:
143 break
143 break
144 targets.append(pos + f)
144 targets.append(pos + f)
145 targets.append(pos - f)
145 targets.append(pos - f)
146 targets.sort()
146 targets.sort()
147
147
148 first = self._first()
148 first = self._first()
149 navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}]
149 navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}]
150 navafter = []
150 navafter = []
151 for rev in targets:
151 for rev in targets:
152 if rev not in self._revlog:
152 if rev not in self._revlog:
153 continue
153 continue
154 if pos < rev < limit:
154 if pos < rev < limit:
155 navafter.append({'label': '+%d' % abs(rev - pos),
155 navafter.append({'label': '+%d' % abs(rev - pos),
156 'node': self.hex(rev)})
156 'node': self.hex(rev)})
157 if 0 < rev < pos:
157 if 0 < rev < pos:
158 navbefore.append({'label': '-%d' % abs(rev - pos),
158 navbefore.append({'label': '-%d' % abs(rev - pos),
159 'node': self.hex(rev)})
159 'node': self.hex(rev)})
160
160
161 navafter.append({'label': 'tip', 'node': 'tip'})
161 navafter.append({'label': 'tip', 'node': 'tip'})
162
162
163 # TODO: maybe this can be a scalar object supporting tomap()
163 # TODO: maybe this can be a scalar object supporting tomap()
164 return templateutil.mappinglist([
164 return templateutil.mappinglist([
165 {'before': templateutil.mappinglist(navbefore),
165 {'before': templateutil.mappinglist(navbefore),
166 'after': templateutil.mappinglist(navafter)},
166 'after': templateutil.mappinglist(navafter)},
167 ])
167 ])
168
168
169 class filerevnav(revnav):
169 class filerevnav(revnav):
170
170
171 def __init__(self, repo, path):
171 def __init__(self, repo, path):
172 """Navigation generation object
172 """Navigation generation object
173
173
174 :repo: repo object we generate nav for
174 :repo: repo object we generate nav for
175 :path: path of the file we generate nav for
175 :path: path of the file we generate nav for
176 """
176 """
177 # used for iteration
177 # used for iteration
178 self._changelog = repo.unfiltered().changelog
178 self._changelog = repo.unfiltered().changelog
179 # used for hex generation
179 # used for hex generation
180 self._revlog = repo.file(path)
180 self._revlog = repo.file(path)
181
181
182 def hex(self, rev):
182 def hex(self, rev):
183 return hex(self._changelog.node(self._revlog.linkrev(rev)))
183 return hex(self._changelog.node(self._revlog.linkrev(rev)))
184
184
185 # TODO: maybe this can be a wrapper class for changectx/filectx list, which
185 # TODO: maybe this can be a wrapper class for changectx/filectx list, which
186 # yields {'ctx': ctx}
186 # yields {'ctx': ctx}
187 def _ctxsgen(context, ctxs):
187 def _ctxsgen(context, ctxs):
188 for s in ctxs:
188 for s in ctxs:
189 d = {
189 d = {
190 'node': s.hex(),
190 'node': s.hex(),
191 'rev': s.rev(),
191 'rev': s.rev(),
192 'user': s.user(),
192 'user': s.user(),
193 'date': s.date(),
193 'date': s.date(),
194 'description': s.description(),
194 'description': s.description(),
195 'branch': s.branch(),
195 'branch': s.branch(),
196 }
196 }
197 if util.safehasattr(s, 'path'):
197 if util.safehasattr(s, 'path'):
198 d['file'] = s.path()
198 d['file'] = s.path()
199 yield d
199 yield d
200
200
201 def _siblings(siblings=None, hiderev=None):
201 def _siblings(siblings=None, hiderev=None):
202 if siblings is None:
202 if siblings is None:
203 siblings = []
203 siblings = []
204 siblings = [s for s in siblings if s.node() != nullid]
204 siblings = [s for s in siblings if s.node() != nullid]
205 if len(siblings) == 1 and siblings[0].rev() == hiderev:
205 if len(siblings) == 1 and siblings[0].rev() == hiderev:
206 siblings = []
206 siblings = []
207 return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
207 return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
208
208
209 def difffeatureopts(req, ui, section):
209 def difffeatureopts(req, ui, section):
210 diffopts = diffutil.difffeatureopts(ui, untrusted=True,
210 diffopts = diffutil.difffeatureopts(ui, untrusted=True,
211 section=section, whitespace=True)
211 section=section, whitespace=True)
212
212
213 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
213 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
214 v = req.qsparams.get(k)
214 v = req.qsparams.get(k)
215 if v is not None:
215 if v is not None:
216 v = stringutil.parsebool(v)
216 v = stringutil.parsebool(v)
217 setattr(diffopts, k, v if v is not None else True)
217 setattr(diffopts, k, v if v is not None else True)
218
218
219 return diffopts
219 return diffopts
220
220
221 def annotate(req, fctx, ui):
221 def annotate(req, fctx, ui):
222 diffopts = difffeatureopts(req, ui, 'annotate')
222 diffopts = difffeatureopts(req, ui, 'annotate')
223 return fctx.annotate(follow=True, diffopts=diffopts)
223 return fctx.annotate(follow=True, diffopts=diffopts)
224
224
225 def parents(ctx, hide=None):
225 def parents(ctx, hide=None):
226 if isinstance(ctx, context.basefilectx):
226 if isinstance(ctx, context.basefilectx):
227 introrev = ctx.introrev()
227 introrev = ctx.introrev()
228 if ctx.changectx().rev() != introrev:
228 if ctx.changectx().rev() != introrev:
229 return _siblings([ctx.repo()[introrev]], hide)
229 return _siblings([ctx.repo()[introrev]], hide)
230 return _siblings(ctx.parents(), hide)
230 return _siblings(ctx.parents(), hide)
231
231
232 def children(ctx, hide=None):
232 def children(ctx, hide=None):
233 return _siblings(ctx.children(), hide)
233 return _siblings(ctx.children(), hide)
234
234
235 def renamelink(fctx):
235 def renamelink(fctx):
236 r = fctx.renamed()
236 r = fctx.renamed()
237 if r:
237 if r:
238 return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}])
238 return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}])
239 return templateutil.mappinglist([])
239 return templateutil.mappinglist([])
240
240
241 def nodetagsdict(repo, node):
241 def nodetagsdict(repo, node):
242 return templateutil.hybridlist(repo.nodetags(node), name='name')
242 return templateutil.hybridlist(repo.nodetags(node), name='name')
243
243
244 def nodebookmarksdict(repo, node):
244 def nodebookmarksdict(repo, node):
245 return templateutil.hybridlist(repo.nodebookmarks(node), name='name')
245 return templateutil.hybridlist(repo.nodebookmarks(node), name='name')
246
246
247 def nodebranchdict(repo, ctx):
247 def nodebranchdict(repo, ctx):
248 branches = []
248 branches = []
249 branch = ctx.branch()
249 branch = ctx.branch()
250 # If this is an empty repo, ctx.node() == nullid,
250 # If this is an empty repo, ctx.node() == nullid,
251 # ctx.branch() == 'default'.
251 # ctx.branch() == 'default'.
252 try:
252 try:
253 branchnode = repo.branchtip(branch)
253 branchnode = repo.branchtip(branch)
254 except error.RepoLookupError:
254 except error.RepoLookupError:
255 branchnode = None
255 branchnode = None
256 if branchnode == ctx.node():
256 if branchnode == ctx.node():
257 branches.append(branch)
257 branches.append(branch)
258 return templateutil.hybridlist(branches, name='name')
258 return templateutil.hybridlist(branches, name='name')
259
259
260 def nodeinbranch(repo, ctx):
260 def nodeinbranch(repo, ctx):
261 branches = []
261 branches = []
262 branch = ctx.branch()
262 branch = ctx.branch()
263 try:
263 try:
264 branchnode = repo.branchtip(branch)
264 branchnode = repo.branchtip(branch)
265 except error.RepoLookupError:
265 except error.RepoLookupError:
266 branchnode = None
266 branchnode = None
267 if branch != 'default' and branchnode != ctx.node():
267 if branch != 'default' and branchnode != ctx.node():
268 branches.append(branch)
268 branches.append(branch)
269 return templateutil.hybridlist(branches, name='name')
269 return templateutil.hybridlist(branches, name='name')
270
270
271 def nodebranchnodefault(ctx):
271 def nodebranchnodefault(ctx):
272 branches = []
272 branches = []
273 branch = ctx.branch()
273 branch = ctx.branch()
274 if branch != 'default':
274 if branch != 'default':
275 branches.append(branch)
275 branches.append(branch)
276 return templateutil.hybridlist(branches, name='name')
276 return templateutil.hybridlist(branches, name='name')
277
277
278 def _nodenamesgen(context, f, node, name):
278 def _nodenamesgen(context, f, node, name):
279 for t in f(node):
279 for t in f(node):
280 yield {name: t}
280 yield {name: t}
281
281
282 def showtag(repo, t1, node=nullid):
282 def showtag(repo, t1, node=nullid):
283 args = (repo.nodetags, node, 'tag')
283 args = (repo.nodetags, node, 'tag')
284 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
284 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
285
285
286 def showbookmark(repo, t1, node=nullid):
286 def showbookmark(repo, t1, node=nullid):
287 args = (repo.nodebookmarks, node, 'bookmark')
287 args = (repo.nodebookmarks, node, 'bookmark')
288 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
288 return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
289
289
290 def branchentries(repo, stripecount, limit=0):
290 def branchentries(repo, stripecount, limit=0):
291 tips = []
291 tips = []
292 heads = repo.heads()
292 heads = repo.heads()
293 parity = paritygen(stripecount)
293 parity = paritygen(stripecount)
294 sortkey = lambda item: (not item[1], item[0].rev())
294 sortkey = lambda item: (not item[1], item[0].rev())
295
295
296 def entries(context):
296 def entries(context):
297 count = 0
297 count = 0
298 if not tips:
298 if not tips:
299 for tag, hs, tip, closed in repo.branchmap().iterbranches():
299 for tag, hs, tip, closed in repo.branchmap().iterbranches():
300 tips.append((repo[tip], closed))
300 tips.append((repo[tip], closed))
301 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
301 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
302 if limit > 0 and count >= limit:
302 if limit > 0 and count >= limit:
303 return
303 return
304 count += 1
304 count += 1
305 if closed:
305 if closed:
306 status = 'closed'
306 status = 'closed'
307 elif ctx.node() not in heads:
307 elif ctx.node() not in heads:
308 status = 'inactive'
308 status = 'inactive'
309 else:
309 else:
310 status = 'open'
310 status = 'open'
311 yield {
311 yield {
312 'parity': next(parity),
312 'parity': next(parity),
313 'branch': ctx.branch(),
313 'branch': ctx.branch(),
314 'status': status,
314 'status': status,
315 'node': ctx.hex(),
315 'node': ctx.hex(),
316 'date': ctx.date()
316 'date': ctx.date()
317 }
317 }
318
318
319 return templateutil.mappinggenerator(entries)
319 return templateutil.mappinggenerator(entries)
320
320
321 def cleanpath(repo, path):
321 def cleanpath(repo, path):
322 path = path.lstrip('/')
322 path = path.lstrip('/')
323 auditor = pathutil.pathauditor(repo.root, realfs=False)
323 auditor = pathutil.pathauditor(repo.root, realfs=False)
324 return pathutil.canonpath(repo.root, '', path, auditor=auditor)
324 return pathutil.canonpath(repo.root, '', path, auditor=auditor)
325
325
326 def changectx(repo, req):
326 def changectx(repo, req):
327 changeid = "tip"
327 changeid = "tip"
328 if 'node' in req.qsparams:
328 if 'node' in req.qsparams:
329 changeid = req.qsparams['node']
329 changeid = req.qsparams['node']
330 ipos = changeid.find(':')
330 ipos = changeid.find(':')
331 if ipos != -1:
331 if ipos != -1:
332 changeid = changeid[(ipos + 1):]
332 changeid = changeid[(ipos + 1):]
333
333
334 return scmutil.revsymbol(repo, changeid)
334 return scmutil.revsymbol(repo, changeid)
335
335
336 def basechangectx(repo, req):
336 def basechangectx(repo, req):
337 if 'node' in req.qsparams:
337 if 'node' in req.qsparams:
338 changeid = req.qsparams['node']
338 changeid = req.qsparams['node']
339 ipos = changeid.find(':')
339 ipos = changeid.find(':')
340 if ipos != -1:
340 if ipos != -1:
341 changeid = changeid[:ipos]
341 changeid = changeid[:ipos]
342 return scmutil.revsymbol(repo, changeid)
342 return scmutil.revsymbol(repo, changeid)
343
343
344 return None
344 return None
345
345
346 def filectx(repo, req):
346 def filectx(repo, req):
347 if 'file' not in req.qsparams:
347 if 'file' not in req.qsparams:
348 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
348 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
349 path = cleanpath(repo, req.qsparams['file'])
349 path = cleanpath(repo, req.qsparams['file'])
350 if 'node' in req.qsparams:
350 if 'node' in req.qsparams:
351 changeid = req.qsparams['node']
351 changeid = req.qsparams['node']
352 elif 'filenode' in req.qsparams:
352 elif 'filenode' in req.qsparams:
353 changeid = req.qsparams['filenode']
353 changeid = req.qsparams['filenode']
354 else:
354 else:
355 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
355 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
356 try:
356 try:
357 fctx = scmutil.revsymbol(repo, changeid)[path]
357 fctx = scmutil.revsymbol(repo, changeid)[path]
358 except error.RepoError:
358 except error.RepoError:
359 fctx = repo.filectx(path, fileid=changeid)
359 fctx = repo.filectx(path, fileid=changeid)
360
360
361 return fctx
361 return fctx
362
362
363 def linerange(req):
363 def linerange(req):
364 linerange = req.qsparams.getall('linerange')
364 linerange = req.qsparams.getall('linerange')
365 if not linerange:
365 if not linerange:
366 return None
366 return None
367 if len(linerange) > 1:
367 if len(linerange) > 1:
368 raise ErrorResponse(HTTP_BAD_REQUEST,
368 raise ErrorResponse(HTTP_BAD_REQUEST,
369 'redundant linerange parameter')
369 'redundant linerange parameter')
370 try:
370 try:
371 fromline, toline = map(int, linerange[0].split(':', 1))
371 fromline, toline = map(int, linerange[0].split(':', 1))
372 except ValueError:
372 except ValueError:
373 raise ErrorResponse(HTTP_BAD_REQUEST,
373 raise ErrorResponse(HTTP_BAD_REQUEST,
374 'invalid linerange parameter')
374 'invalid linerange parameter')
375 try:
375 try:
376 return util.processlinerange(fromline, toline)
376 return util.processlinerange(fromline, toline)
377 except error.ParseError as exc:
377 except error.ParseError as exc:
378 raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
378 raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
379
379
380 def formatlinerange(fromline, toline):
380 def formatlinerange(fromline, toline):
381 return '%d:%d' % (fromline + 1, toline)
381 return '%d:%d' % (fromline + 1, toline)
382
382
383 def _succsandmarkersgen(context, mapping):
383 def _succsandmarkersgen(context, mapping):
384 repo = context.resource(mapping, 'repo')
384 repo = context.resource(mapping, 'repo')
385 itemmappings = templatekw.showsuccsandmarkers(context, mapping)
385 itemmappings = templatekw.showsuccsandmarkers(context, mapping)
386 for item in itemmappings.tovalue(context, mapping):
386 for item in itemmappings.tovalue(context, mapping):
387 item['successors'] = _siblings(repo[successor]
387 item['successors'] = _siblings(repo[successor]
388 for successor in item['successors'])
388 for successor in item['successors'])
389 yield item
389 yield item
390
390
391 def succsandmarkers(context, mapping):
391 def succsandmarkers(context, mapping):
392 return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,))
392 return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,))
393
393
394 # teach templater succsandmarkers is switched to (context, mapping) API
394 # teach templater succsandmarkers is switched to (context, mapping) API
395 succsandmarkers._requires = {'repo', 'ctx'}
395 succsandmarkers._requires = {'repo', 'ctx'}
396
396
397 def _whyunstablegen(context, mapping):
397 def _whyunstablegen(context, mapping):
398 repo = context.resource(mapping, 'repo')
398 repo = context.resource(mapping, 'repo')
399 ctx = context.resource(mapping, 'ctx')
399 ctx = context.resource(mapping, 'ctx')
400
400
401 entries = obsutil.whyunstable(repo, ctx)
401 entries = obsutil.whyunstable(repo, ctx)
402 for entry in entries:
402 for entry in entries:
403 if entry.get('divergentnodes'):
403 if entry.get('divergentnodes'):
404 entry['divergentnodes'] = _siblings(entry['divergentnodes'])
404 entry['divergentnodes'] = _siblings(entry['divergentnodes'])
405 yield entry
405 yield entry
406
406
407 def whyunstable(context, mapping):
407 def whyunstable(context, mapping):
408 return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,))
408 return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,))
409
409
410 whyunstable._requires = {'repo', 'ctx'}
410 whyunstable._requires = {'repo', 'ctx'}
411
411
412 # helper to mark a function as a new-style template keyword; can be removed
412 # helper to mark a function as a new-style template keyword; can be removed
413 # once old-style function gets unsupported and new-style becomes the default
413 # once old-style function gets unsupported and new-style becomes the default
414 def _kwfunc(f):
414 def _kwfunc(f):
415 f._requires = ()
415 f._requires = ()
416 return f
416 return f
417
417
418 def commonentry(repo, ctx):
418 def commonentry(repo, ctx):
419 node = scmutil.binnode(ctx)
419 node = scmutil.binnode(ctx)
420 return {
420 return {
421 # TODO: perhaps ctx.changectx() should be assigned if ctx is a
421 # TODO: perhaps ctx.changectx() should be assigned if ctx is a
422 # filectx, but I'm not pretty sure if that would always work because
422 # filectx, but I'm not pretty sure if that would always work because
423 # fctx.parents() != fctx.changectx.parents() for example.
423 # fctx.parents() != fctx.changectx.parents() for example.
424 'ctx': ctx,
424 'ctx': ctx,
425 'rev': ctx.rev(),
425 'rev': ctx.rev(),
426 'node': hex(node),
426 'node': hex(node),
427 'author': ctx.user(),
427 'author': ctx.user(),
428 'desc': ctx.description(),
428 'desc': ctx.description(),
429 'date': ctx.date(),
429 'date': ctx.date(),
430 'extra': ctx.extra(),
430 'extra': ctx.extra(),
431 'phase': ctx.phasestr(),
431 'phase': ctx.phasestr(),
432 'obsolete': ctx.obsolete(),
432 'obsolete': ctx.obsolete(),
433 'succsandmarkers': succsandmarkers,
433 'succsandmarkers': succsandmarkers,
434 'instabilities': templateutil.hybridlist(ctx.instabilities(),
434 'instabilities': templateutil.hybridlist(ctx.instabilities(),
435 name='instability'),
435 name='instability'),
436 'whyunstable': whyunstable,
436 'whyunstable': whyunstable,
437 'branch': nodebranchnodefault(ctx),
437 'branch': nodebranchnodefault(ctx),
438 'inbranch': nodeinbranch(repo, ctx),
438 'inbranch': nodeinbranch(repo, ctx),
439 'branches': nodebranchdict(repo, ctx),
439 'branches': nodebranchdict(repo, ctx),
440 'tags': nodetagsdict(repo, node),
440 'tags': nodetagsdict(repo, node),
441 'bookmarks': nodebookmarksdict(repo, node),
441 'bookmarks': nodebookmarksdict(repo, node),
442 'parent': _kwfunc(lambda context, mapping: parents(ctx)),
442 'parent': _kwfunc(lambda context, mapping: parents(ctx)),
443 'child': _kwfunc(lambda context, mapping: children(ctx)),
443 'child': _kwfunc(lambda context, mapping: children(ctx)),
444 }
444 }
445
445
446 def changelistentry(web, ctx):
446 def changelistentry(web, ctx):
447 '''Obtain a dictionary to be used for entries in a changelist.
447 '''Obtain a dictionary to be used for entries in a changelist.
448
448
449 This function is called when producing items for the "entries" list passed
449 This function is called when producing items for the "entries" list passed
450 to the "shortlog" and "changelog" templates.
450 to the "shortlog" and "changelog" templates.
451 '''
451 '''
452 repo = web.repo
452 repo = web.repo
453 rev = ctx.rev()
453 rev = ctx.rev()
454 n = scmutil.binnode(ctx)
454 n = scmutil.binnode(ctx)
455 showtags = showtag(repo, 'changelogtag', n)
455 showtags = showtag(repo, 'changelogtag', n)
456 files = listfilediffs(ctx.files(), n, web.maxfiles)
456 files = listfilediffs(ctx.files(), n, web.maxfiles)
457
457
458 entry = commonentry(repo, ctx)
458 entry = commonentry(repo, ctx)
459 entry.update({
459 entry.update({
460 'allparents': _kwfunc(lambda context, mapping: parents(ctx)),
460 'allparents': _kwfunc(lambda context, mapping: parents(ctx)),
461 'parent': _kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
461 'parent': _kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
462 'child': _kwfunc(lambda context, mapping: children(ctx, rev + 1)),
462 'child': _kwfunc(lambda context, mapping: children(ctx, rev + 1)),
463 'changelogtag': showtags,
463 'changelogtag': showtags,
464 'files': files,
464 'files': files,
465 })
465 })
466 return entry
466 return entry
467
467
468 def changelistentries(web, revs, maxcount, parityfn):
468 def changelistentries(web, revs, maxcount, parityfn):
469 """Emit up to N records for an iterable of revisions."""
469 """Emit up to N records for an iterable of revisions."""
470 repo = web.repo
470 repo = web.repo
471
471
472 count = 0
472 count = 0
473 for rev in revs:
473 for rev in revs:
474 if count >= maxcount:
474 if count >= maxcount:
475 break
475 break
476
476
477 count += 1
477 count += 1
478
478
479 entry = changelistentry(web, repo[rev])
479 entry = changelistentry(web, repo[rev])
480 entry['parity'] = next(parityfn)
480 entry['parity'] = next(parityfn)
481
481
482 yield entry
482 yield entry
483
483
484 def symrevorshortnode(req, ctx):
484 def symrevorshortnode(req, ctx):
485 if 'node' in req.qsparams:
485 if 'node' in req.qsparams:
486 return templatefilters.revescape(req.qsparams['node'])
486 return templatefilters.revescape(req.qsparams['node'])
487 else:
487 else:
488 return short(scmutil.binnode(ctx))
488 return short(scmutil.binnode(ctx))
489
489
490 def _listfilesgen(context, ctx, stripecount):
490 def _listfilesgen(context, ctx, stripecount):
491 parity = paritygen(stripecount)
491 parity = paritygen(stripecount)
492 for blockno, f in enumerate(ctx.files()):
492 for blockno, f in enumerate(ctx.files()):
493 template = 'filenodelink' if f in ctx else 'filenolink'
493 template = 'filenodelink' if f in ctx else 'filenolink'
494 yield context.process(template, {
494 yield context.process(template, {
495 'node': ctx.hex(),
495 'node': ctx.hex(),
496 'file': f,
496 'file': f,
497 'blockno': blockno + 1,
497 'blockno': blockno + 1,
498 'parity': next(parity),
498 'parity': next(parity),
499 })
499 })
500
500
501 def changesetentry(web, ctx):
501 def changesetentry(web, ctx):
502 '''Obtain a dictionary to be used to render the "changeset" template.'''
502 '''Obtain a dictionary to be used to render the "changeset" template.'''
503
503
504 showtags = showtag(web.repo, 'changesettag', scmutil.binnode(ctx))
504 showtags = showtag(web.repo, 'changesettag', scmutil.binnode(ctx))
505 showbookmarks = showbookmark(web.repo, 'changesetbookmark',
505 showbookmarks = showbookmark(web.repo, 'changesetbookmark',
506 scmutil.binnode(ctx))
506 scmutil.binnode(ctx))
507 showbranch = nodebranchnodefault(ctx)
507 showbranch = nodebranchnodefault(ctx)
508
508
509 basectx = basechangectx(web.repo, web.req)
509 basectx = basechangectx(web.repo, web.req)
510 if basectx is None:
510 if basectx is None:
511 basectx = ctx.p1()
511 basectx = ctx.p1()
512
512
513 style = web.config('web', 'style')
513 style = web.config('web', 'style')
514 if 'style' in web.req.qsparams:
514 if 'style' in web.req.qsparams:
515 style = web.req.qsparams['style']
515 style = web.req.qsparams['style']
516
516
517 diff = diffs(web, ctx, basectx, None, style)
517 diff = diffs(web, ctx, basectx, None, style)
518
518
519 parity = paritygen(web.stripecount)
519 parity = paritygen(web.stripecount)
520 diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx)
520 diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx)
521 diffstats = diffstat(ctx, diffstatsgen, parity)
521 diffstats = diffstat(ctx, diffstatsgen, parity)
522
522
523 return dict(
523 return dict(
524 diff=diff,
524 diff=diff,
525 symrev=symrevorshortnode(web.req, ctx),
525 symrev=symrevorshortnode(web.req, ctx),
526 basenode=basectx.hex(),
526 basenode=basectx.hex(),
527 changesettag=showtags,
527 changesettag=showtags,
528 changesetbookmark=showbookmarks,
528 changesetbookmark=showbookmarks,
529 changesetbranch=showbranch,
529 changesetbranch=showbranch,
530 files=templateutil.mappedgenerator(_listfilesgen,
530 files=templateutil.mappedgenerator(_listfilesgen,
531 args=(ctx, web.stripecount)),
531 args=(ctx, web.stripecount)),
532 diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
532 diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
533 diffstat=diffstats,
533 diffstat=diffstats,
534 archives=web.archivelist(ctx.hex()),
534 archives=web.archivelist(ctx.hex()),
535 **pycompat.strkwargs(commonentry(web.repo, ctx)))
535 **pycompat.strkwargs(commonentry(web.repo, ctx)))
536
536
537 def _listfilediffsgen(context, files, node, max):
537 def _listfilediffsgen(context, files, node, max):
538 for f in files[:max]:
538 for f in files[:max]:
539 yield context.process('filedifflink', {'node': hex(node), 'file': f})
539 yield context.process('filedifflink', {'node': hex(node), 'file': f})
540 if len(files) > max:
540 if len(files) > max:
541 yield context.process('fileellipses', {})
541 yield context.process('fileellipses', {})
542
542
543 def listfilediffs(files, node, max):
543 def listfilediffs(files, node, max):
544 return templateutil.mappedgenerator(_listfilediffsgen,
544 return templateutil.mappedgenerator(_listfilediffsgen,
545 args=(files, node, max))
545 args=(files, node, max))
546
546
547 def _prettyprintdifflines(context, lines, blockno, lineidprefix):
547 def _prettyprintdifflines(context, lines, blockno, lineidprefix):
548 for lineno, l in enumerate(lines, 1):
548 for lineno, l in enumerate(lines, 1):
549 difflineno = "%d.%d" % (blockno, lineno)
549 difflineno = "%d.%d" % (blockno, lineno)
550 if l.startswith('+'):
550 if l.startswith('+'):
551 ltype = "difflineplus"
551 ltype = "difflineplus"
552 elif l.startswith('-'):
552 elif l.startswith('-'):
553 ltype = "difflineminus"
553 ltype = "difflineminus"
554 elif l.startswith('@'):
554 elif l.startswith('@'):
555 ltype = "difflineat"
555 ltype = "difflineat"
556 else:
556 else:
557 ltype = "diffline"
557 ltype = "diffline"
558 yield context.process(ltype, {
558 yield context.process(ltype, {
559 'line': l,
559 'line': l,
560 'lineno': lineno,
560 'lineno': lineno,
561 'lineid': lineidprefix + "l%s" % difflineno,
561 'lineid': lineidprefix + "l%s" % difflineno,
562 'linenumber': "% 8s" % difflineno,
562 'linenumber': "% 8s" % difflineno,
563 })
563 })
564
564
565 def _diffsgen(context, repo, ctx, basectx, files, style, stripecount,
565 def _diffsgen(context, repo, ctx, basectx, files, style, stripecount,
566 linerange, lineidprefix):
566 linerange, lineidprefix):
567 if files:
567 if files:
568 m = match.exact(repo.root, repo.getcwd(), files)
568 m = match.exact(repo.root, repo.getcwd(), files)
569 else:
569 else:
570 m = match.always(repo.root, repo.getcwd())
570 m = match.always(repo.root, repo.getcwd())
571
571
572 diffopts = patch.diffopts(repo.ui, untrusted=True)
572 diffopts = patch.diffopts(repo.ui, untrusted=True)
573 node1 = basectx.node()
574 node2 = ctx.node()
575 parity = paritygen(stripecount)
573 parity = paritygen(stripecount)
576
574
577 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
575 diffhunks = patch.diffhunks(repo, basectx, ctx, m, opts=diffopts)
578 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
576 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
579 if style != 'raw':
577 if style != 'raw':
580 header = header[1:]
578 header = header[1:]
581 lines = [h + '\n' for h in header]
579 lines = [h + '\n' for h in header]
582 for hunkrange, hunklines in hunks:
580 for hunkrange, hunklines in hunks:
583 if linerange is not None and hunkrange is not None:
581 if linerange is not None and hunkrange is not None:
584 s1, l1, s2, l2 = hunkrange
582 s1, l1, s2, l2 = hunkrange
585 if not mdiff.hunkinrange((s2, l2), linerange):
583 if not mdiff.hunkinrange((s2, l2), linerange):
586 continue
584 continue
587 lines.extend(hunklines)
585 lines.extend(hunklines)
588 if lines:
586 if lines:
589 l = templateutil.mappedgenerator(_prettyprintdifflines,
587 l = templateutil.mappedgenerator(_prettyprintdifflines,
590 args=(lines, blockno,
588 args=(lines, blockno,
591 lineidprefix))
589 lineidprefix))
592 yield {
590 yield {
593 'parity': next(parity),
591 'parity': next(parity),
594 'blockno': blockno,
592 'blockno': blockno,
595 'lines': l,
593 'lines': l,
596 }
594 }
597
595
598 def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''):
596 def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''):
599 args = (web.repo, ctx, basectx, files, style, web.stripecount,
597 args = (web.repo, ctx, basectx, files, style, web.stripecount,
600 linerange, lineidprefix)
598 linerange, lineidprefix)
601 return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock')
599 return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock')
602
600
603 def _compline(type, leftlineno, leftline, rightlineno, rightline):
601 def _compline(type, leftlineno, leftline, rightlineno, rightline):
604 lineid = leftlineno and ("l%d" % leftlineno) or ''
602 lineid = leftlineno and ("l%d" % leftlineno) or ''
605 lineid += rightlineno and ("r%d" % rightlineno) or ''
603 lineid += rightlineno and ("r%d" % rightlineno) or ''
606 llno = '%d' % leftlineno if leftlineno else ''
604 llno = '%d' % leftlineno if leftlineno else ''
607 rlno = '%d' % rightlineno if rightlineno else ''
605 rlno = '%d' % rightlineno if rightlineno else ''
608 return {
606 return {
609 'type': type,
607 'type': type,
610 'lineid': lineid,
608 'lineid': lineid,
611 'leftlineno': leftlineno,
609 'leftlineno': leftlineno,
612 'leftlinenumber': "% 6s" % llno,
610 'leftlinenumber': "% 6s" % llno,
613 'leftline': leftline or '',
611 'leftline': leftline or '',
614 'rightlineno': rightlineno,
612 'rightlineno': rightlineno,
615 'rightlinenumber': "% 6s" % rlno,
613 'rightlinenumber': "% 6s" % rlno,
616 'rightline': rightline or '',
614 'rightline': rightline or '',
617 }
615 }
618
616
619 def _getcompblockgen(context, leftlines, rightlines, opcodes):
617 def _getcompblockgen(context, leftlines, rightlines, opcodes):
620 for type, llo, lhi, rlo, rhi in opcodes:
618 for type, llo, lhi, rlo, rhi in opcodes:
621 type = pycompat.sysbytes(type)
619 type = pycompat.sysbytes(type)
622 len1 = lhi - llo
620 len1 = lhi - llo
623 len2 = rhi - rlo
621 len2 = rhi - rlo
624 count = min(len1, len2)
622 count = min(len1, len2)
625 for i in pycompat.xrange(count):
623 for i in pycompat.xrange(count):
626 yield _compline(type=type,
624 yield _compline(type=type,
627 leftlineno=llo + i + 1,
625 leftlineno=llo + i + 1,
628 leftline=leftlines[llo + i],
626 leftline=leftlines[llo + i],
629 rightlineno=rlo + i + 1,
627 rightlineno=rlo + i + 1,
630 rightline=rightlines[rlo + i])
628 rightline=rightlines[rlo + i])
631 if len1 > len2:
629 if len1 > len2:
632 for i in pycompat.xrange(llo + count, lhi):
630 for i in pycompat.xrange(llo + count, lhi):
633 yield _compline(type=type,
631 yield _compline(type=type,
634 leftlineno=i + 1,
632 leftlineno=i + 1,
635 leftline=leftlines[i],
633 leftline=leftlines[i],
636 rightlineno=None,
634 rightlineno=None,
637 rightline=None)
635 rightline=None)
638 elif len2 > len1:
636 elif len2 > len1:
639 for i in pycompat.xrange(rlo + count, rhi):
637 for i in pycompat.xrange(rlo + count, rhi):
640 yield _compline(type=type,
638 yield _compline(type=type,
641 leftlineno=None,
639 leftlineno=None,
642 leftline=None,
640 leftline=None,
643 rightlineno=i + 1,
641 rightlineno=i + 1,
644 rightline=rightlines[i])
642 rightline=rightlines[i])
645
643
646 def _getcompblock(leftlines, rightlines, opcodes):
644 def _getcompblock(leftlines, rightlines, opcodes):
647 args = (leftlines, rightlines, opcodes)
645 args = (leftlines, rightlines, opcodes)
648 return templateutil.mappinggenerator(_getcompblockgen, args=args,
646 return templateutil.mappinggenerator(_getcompblockgen, args=args,
649 name='comparisonline')
647 name='comparisonline')
650
648
651 def _comparegen(context, contextnum, leftlines, rightlines):
649 def _comparegen(context, contextnum, leftlines, rightlines):
652 '''Generator function that provides side-by-side comparison data.'''
650 '''Generator function that provides side-by-side comparison data.'''
653 s = difflib.SequenceMatcher(None, leftlines, rightlines)
651 s = difflib.SequenceMatcher(None, leftlines, rightlines)
654 if contextnum < 0:
652 if contextnum < 0:
655 l = _getcompblock(leftlines, rightlines, s.get_opcodes())
653 l = _getcompblock(leftlines, rightlines, s.get_opcodes())
656 yield {'lines': l}
654 yield {'lines': l}
657 else:
655 else:
658 for oc in s.get_grouped_opcodes(n=contextnum):
656 for oc in s.get_grouped_opcodes(n=contextnum):
659 l = _getcompblock(leftlines, rightlines, oc)
657 l = _getcompblock(leftlines, rightlines, oc)
660 yield {'lines': l}
658 yield {'lines': l}
661
659
662 def compare(contextnum, leftlines, rightlines):
660 def compare(contextnum, leftlines, rightlines):
663 args = (contextnum, leftlines, rightlines)
661 args = (contextnum, leftlines, rightlines)
664 return templateutil.mappinggenerator(_comparegen, args=args,
662 return templateutil.mappinggenerator(_comparegen, args=args,
665 name='comparisonblock')
663 name='comparisonblock')
666
664
667 def diffstatgen(ui, ctx, basectx):
665 def diffstatgen(ui, ctx, basectx):
668 '''Generator function that provides the diffstat data.'''
666 '''Generator function that provides the diffstat data.'''
669
667
670 diffopts = patch.diffopts(ui, {'noprefix': False})
668 diffopts = patch.diffopts(ui, {'noprefix': False})
671 stats = patch.diffstatdata(
669 stats = patch.diffstatdata(
672 util.iterlines(ctx.diff(basectx, opts=diffopts)))
670 util.iterlines(ctx.diff(basectx, opts=diffopts)))
673 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
671 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
674 while True:
672 while True:
675 yield stats, maxname, maxtotal, addtotal, removetotal, binary
673 yield stats, maxname, maxtotal, addtotal, removetotal, binary
676
674
677 def diffsummary(statgen):
675 def diffsummary(statgen):
678 '''Return a short summary of the diff.'''
676 '''Return a short summary of the diff.'''
679
677
680 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
678 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
681 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
679 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
682 len(stats), addtotal, removetotal)
680 len(stats), addtotal, removetotal)
683
681
684 def _diffstattmplgen(context, ctx, statgen, parity):
682 def _diffstattmplgen(context, ctx, statgen, parity):
685 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
683 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
686 files = ctx.files()
684 files = ctx.files()
687
685
688 def pct(i):
686 def pct(i):
689 if maxtotal == 0:
687 if maxtotal == 0:
690 return 0
688 return 0
691 return (float(i) / maxtotal) * 100
689 return (float(i) / maxtotal) * 100
692
690
693 fileno = 0
691 fileno = 0
694 for filename, adds, removes, isbinary in stats:
692 for filename, adds, removes, isbinary in stats:
695 template = 'diffstatlink' if filename in files else 'diffstatnolink'
693 template = 'diffstatlink' if filename in files else 'diffstatnolink'
696 total = adds + removes
694 total = adds + removes
697 fileno += 1
695 fileno += 1
698 yield context.process(template, {
696 yield context.process(template, {
699 'node': ctx.hex(),
697 'node': ctx.hex(),
700 'file': filename,
698 'file': filename,
701 'fileno': fileno,
699 'fileno': fileno,
702 'total': total,
700 'total': total,
703 'addpct': pct(adds),
701 'addpct': pct(adds),
704 'removepct': pct(removes),
702 'removepct': pct(removes),
705 'parity': next(parity),
703 'parity': next(parity),
706 })
704 })
707
705
708 def diffstat(ctx, statgen, parity):
706 def diffstat(ctx, statgen, parity):
709 '''Return a diffstat template for each file in the diff.'''
707 '''Return a diffstat template for each file in the diff.'''
710 args = (ctx, statgen, parity)
708 args = (ctx, statgen, parity)
711 return templateutil.mappedgenerator(_diffstattmplgen, args=args)
709 return templateutil.mappedgenerator(_diffstattmplgen, args=args)
712
710
713 class sessionvars(templateutil.wrapped):
711 class sessionvars(templateutil.wrapped):
714 def __init__(self, vars, start='?'):
712 def __init__(self, vars, start='?'):
715 self._start = start
713 self._start = start
716 self._vars = vars
714 self._vars = vars
717
715
718 def __getitem__(self, key):
716 def __getitem__(self, key):
719 return self._vars[key]
717 return self._vars[key]
720
718
721 def __setitem__(self, key, value):
719 def __setitem__(self, key, value):
722 self._vars[key] = value
720 self._vars[key] = value
723
721
724 def __copy__(self):
722 def __copy__(self):
725 return sessionvars(copy.copy(self._vars), self._start)
723 return sessionvars(copy.copy(self._vars), self._start)
726
724
727 def contains(self, context, mapping, item):
725 def contains(self, context, mapping, item):
728 item = templateutil.unwrapvalue(context, mapping, item)
726 item = templateutil.unwrapvalue(context, mapping, item)
729 return item in self._vars
727 return item in self._vars
730
728
731 def getmember(self, context, mapping, key):
729 def getmember(self, context, mapping, key):
732 key = templateutil.unwrapvalue(context, mapping, key)
730 key = templateutil.unwrapvalue(context, mapping, key)
733 return self._vars.get(key)
731 return self._vars.get(key)
734
732
735 def getmin(self, context, mapping):
733 def getmin(self, context, mapping):
736 raise error.ParseError(_('not comparable'))
734 raise error.ParseError(_('not comparable'))
737
735
738 def getmax(self, context, mapping):
736 def getmax(self, context, mapping):
739 raise error.ParseError(_('not comparable'))
737 raise error.ParseError(_('not comparable'))
740
738
741 def filter(self, context, mapping, select):
739 def filter(self, context, mapping, select):
742 # implement if necessary
740 # implement if necessary
743 raise error.ParseError(_('not filterable'))
741 raise error.ParseError(_('not filterable'))
744
742
745 def itermaps(self, context):
743 def itermaps(self, context):
746 separator = self._start
744 separator = self._start
747 for key, value in sorted(self._vars.iteritems()):
745 for key, value in sorted(self._vars.iteritems()):
748 yield {'name': key,
746 yield {'name': key,
749 'value': pycompat.bytestr(value),
747 'value': pycompat.bytestr(value),
750 'separator': separator,
748 'separator': separator,
751 }
749 }
752 separator = '&'
750 separator = '&'
753
751
754 def join(self, context, mapping, sep):
752 def join(self, context, mapping, sep):
755 # could be '{separator}{name}={value|urlescape}'
753 # could be '{separator}{name}={value|urlescape}'
756 raise error.ParseError(_('not displayable without template'))
754 raise error.ParseError(_('not displayable without template'))
757
755
758 def show(self, context, mapping):
756 def show(self, context, mapping):
759 return self.join(context, '')
757 return self.join(context, '')
760
758
761 def tobool(self, context, mapping):
759 def tobool(self, context, mapping):
762 return bool(self._vars)
760 return bool(self._vars)
763
761
764 def tovalue(self, context, mapping):
762 def tovalue(self, context, mapping):
765 return self._vars
763 return self._vars
766
764
767 class wsgiui(uimod.ui):
765 class wsgiui(uimod.ui):
768 # default termwidth breaks under mod_wsgi
766 # default termwidth breaks under mod_wsgi
769 def termwidth(self):
767 def termwidth(self):
770 return 80
768 return 80
771
769
772 def getwebsubs(repo):
770 def getwebsubs(repo):
773 websubtable = []
771 websubtable = []
774 websubdefs = repo.ui.configitems('websub')
772 websubdefs = repo.ui.configitems('websub')
775 # we must maintain interhg backwards compatibility
773 # we must maintain interhg backwards compatibility
776 websubdefs += repo.ui.configitems('interhg')
774 websubdefs += repo.ui.configitems('interhg')
777 for key, pattern in websubdefs:
775 for key, pattern in websubdefs:
778 # grab the delimiter from the character after the "s"
776 # grab the delimiter from the character after the "s"
779 unesc = pattern[1:2]
777 unesc = pattern[1:2]
780 delim = stringutil.reescape(unesc)
778 delim = stringutil.reescape(unesc)
781
779
782 # identify portions of the pattern, taking care to avoid escaped
780 # identify portions of the pattern, taking care to avoid escaped
783 # delimiters. the replace format and flags are optional, but
781 # delimiters. the replace format and flags are optional, but
784 # delimiters are required.
782 # delimiters are required.
785 match = re.match(
783 match = re.match(
786 br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
784 br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
787 % (delim, delim, delim), pattern)
785 % (delim, delim, delim), pattern)
788 if not match:
786 if not match:
789 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
787 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
790 % (key, pattern))
788 % (key, pattern))
791 continue
789 continue
792
790
793 # we need to unescape the delimiter for regexp and format
791 # we need to unescape the delimiter for regexp and format
794 delim_re = re.compile(br'(?<!\\)\\%s' % delim)
792 delim_re = re.compile(br'(?<!\\)\\%s' % delim)
795 regexp = delim_re.sub(unesc, match.group(1))
793 regexp = delim_re.sub(unesc, match.group(1))
796 format = delim_re.sub(unesc, match.group(2))
794 format = delim_re.sub(unesc, match.group(2))
797
795
798 # the pattern allows for 6 regexp flags, so set them if necessary
796 # the pattern allows for 6 regexp flags, so set them if necessary
799 flagin = match.group(3)
797 flagin = match.group(3)
800 flags = 0
798 flags = 0
801 if flagin:
799 if flagin:
802 for flag in flagin.upper():
800 for flag in flagin.upper():
803 flags |= re.__dict__[flag]
801 flags |= re.__dict__[flag]
804
802
805 try:
803 try:
806 regexp = re.compile(regexp, flags)
804 regexp = re.compile(regexp, flags)
807 websubtable.append((regexp, format))
805 websubtable.append((regexp, format))
808 except re.error:
806 except re.error:
809 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
807 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
810 % (key, regexp))
808 % (key, regexp))
811 return websubtable
809 return websubtable
812
810
813 def getgraphnode(repo, ctx):
811 def getgraphnode(repo, ctx):
814 return (templatekw.getgraphnodecurrent(repo, ctx) +
812 return (templatekw.getgraphnodecurrent(repo, ctx) +
815 templatekw.getgraphnodesymbol(ctx))
813 templatekw.getgraphnodesymbol(ctx))
@@ -1,2862 +1,2862 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 diffhelper,
30 diffhelper,
31 diffutil,
31 diffutil,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 match as matchmod,
35 match as matchmod,
36 mdiff,
36 mdiff,
37 pathutil,
37 pathutil,
38 pycompat,
38 pycompat,
39 scmutil,
39 scmutil,
40 similar,
40 similar,
41 util,
41 util,
42 vfs as vfsmod,
42 vfs as vfsmod,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 procutil,
46 procutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 stringio = util.stringio
50 stringio = util.stringio
51
51
52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
54 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
55 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55 b'[^ \ta-zA-Z0-9_\x80-\xff])')
56
56
57 PatchError = error.PatchError
57 PatchError = error.PatchError
58
58
59 # public functions
59 # public functions
60
60
61 def split(stream):
61 def split(stream):
62 '''return an iterator of individual patches from a stream'''
62 '''return an iterator of individual patches from a stream'''
63 def isheader(line, inheader):
63 def isheader(line, inheader):
64 if inheader and line.startswith((' ', '\t')):
64 if inheader and line.startswith((' ', '\t')):
65 # continuation
65 # continuation
66 return True
66 return True
67 if line.startswith((' ', '-', '+')):
67 if line.startswith((' ', '-', '+')):
68 # diff line - don't check for header pattern in there
68 # diff line - don't check for header pattern in there
69 return False
69 return False
70 l = line.split(': ', 1)
70 l = line.split(': ', 1)
71 return len(l) == 2 and ' ' not in l[0]
71 return len(l) == 2 and ' ' not in l[0]
72
72
73 def chunk(lines):
73 def chunk(lines):
74 return stringio(''.join(lines))
74 return stringio(''.join(lines))
75
75
76 def hgsplit(stream, cur):
76 def hgsplit(stream, cur):
77 inheader = True
77 inheader = True
78
78
79 for line in stream:
79 for line in stream:
80 if not line.strip():
80 if not line.strip():
81 inheader = False
81 inheader = False
82 if not inheader and line.startswith('# HG changeset patch'):
82 if not inheader and line.startswith('# HG changeset patch'):
83 yield chunk(cur)
83 yield chunk(cur)
84 cur = []
84 cur = []
85 inheader = True
85 inheader = True
86
86
87 cur.append(line)
87 cur.append(line)
88
88
89 if cur:
89 if cur:
90 yield chunk(cur)
90 yield chunk(cur)
91
91
92 def mboxsplit(stream, cur):
92 def mboxsplit(stream, cur):
93 for line in stream:
93 for line in stream:
94 if line.startswith('From '):
94 if line.startswith('From '):
95 for c in split(chunk(cur[1:])):
95 for c in split(chunk(cur[1:])):
96 yield c
96 yield c
97 cur = []
97 cur = []
98
98
99 cur.append(line)
99 cur.append(line)
100
100
101 if cur:
101 if cur:
102 for c in split(chunk(cur[1:])):
102 for c in split(chunk(cur[1:])):
103 yield c
103 yield c
104
104
105 def mimesplit(stream, cur):
105 def mimesplit(stream, cur):
106 def msgfp(m):
106 def msgfp(m):
107 fp = stringio()
107 fp = stringio()
108 g = email.Generator.Generator(fp, mangle_from_=False)
108 g = email.Generator.Generator(fp, mangle_from_=False)
109 g.flatten(m)
109 g.flatten(m)
110 fp.seek(0)
110 fp.seek(0)
111 return fp
111 return fp
112
112
113 for line in stream:
113 for line in stream:
114 cur.append(line)
114 cur.append(line)
115 c = chunk(cur)
115 c = chunk(cur)
116
116
117 m = mail.parse(c)
117 m = mail.parse(c)
118 if not m.is_multipart():
118 if not m.is_multipart():
119 yield msgfp(m)
119 yield msgfp(m)
120 else:
120 else:
121 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
122 for part in m.walk():
122 for part in m.walk():
123 ct = part.get_content_type()
123 ct = part.get_content_type()
124 if ct not in ok_types:
124 if ct not in ok_types:
125 continue
125 continue
126 yield msgfp(part)
126 yield msgfp(part)
127
127
128 def headersplit(stream, cur):
128 def headersplit(stream, cur):
129 inheader = False
129 inheader = False
130
130
131 for line in stream:
131 for line in stream:
132 if not inheader and isheader(line, inheader):
132 if not inheader and isheader(line, inheader):
133 yield chunk(cur)
133 yield chunk(cur)
134 cur = []
134 cur = []
135 inheader = True
135 inheader = True
136 if inheader and not isheader(line, inheader):
136 if inheader and not isheader(line, inheader):
137 inheader = False
137 inheader = False
138
138
139 cur.append(line)
139 cur.append(line)
140
140
141 if cur:
141 if cur:
142 yield chunk(cur)
142 yield chunk(cur)
143
143
144 def remainder(cur):
144 def remainder(cur):
145 yield chunk(cur)
145 yield chunk(cur)
146
146
147 class fiter(object):
147 class fiter(object):
148 def __init__(self, fp):
148 def __init__(self, fp):
149 self.fp = fp
149 self.fp = fp
150
150
151 def __iter__(self):
151 def __iter__(self):
152 return self
152 return self
153
153
154 def next(self):
154 def next(self):
155 l = self.fp.readline()
155 l = self.fp.readline()
156 if not l:
156 if not l:
157 raise StopIteration
157 raise StopIteration
158 return l
158 return l
159
159
160 __next__ = next
160 __next__ = next
161
161
162 inheader = False
162 inheader = False
163 cur = []
163 cur = []
164
164
165 mimeheaders = ['content-type']
165 mimeheaders = ['content-type']
166
166
167 if not util.safehasattr(stream, 'next'):
167 if not util.safehasattr(stream, 'next'):
168 # http responses, for example, have readline but not next
168 # http responses, for example, have readline but not next
169 stream = fiter(stream)
169 stream = fiter(stream)
170
170
171 for line in stream:
171 for line in stream:
172 cur.append(line)
172 cur.append(line)
173 if line.startswith('# HG changeset patch'):
173 if line.startswith('# HG changeset patch'):
174 return hgsplit(stream, cur)
174 return hgsplit(stream, cur)
175 elif line.startswith('From '):
175 elif line.startswith('From '):
176 return mboxsplit(stream, cur)
176 return mboxsplit(stream, cur)
177 elif isheader(line, inheader):
177 elif isheader(line, inheader):
178 inheader = True
178 inheader = True
179 if line.split(':', 1)[0].lower() in mimeheaders:
179 if line.split(':', 1)[0].lower() in mimeheaders:
180 # let email parser handle this
180 # let email parser handle this
181 return mimesplit(stream, cur)
181 return mimesplit(stream, cur)
182 elif line.startswith('--- ') and inheader:
182 elif line.startswith('--- ') and inheader:
183 # No evil headers seen by diff start, split by hand
183 # No evil headers seen by diff start, split by hand
184 return headersplit(stream, cur)
184 return headersplit(stream, cur)
185 # Not enough info, keep reading
185 # Not enough info, keep reading
186
186
187 # if we are here, we have a very plain patch
187 # if we are here, we have a very plain patch
188 return remainder(cur)
188 return remainder(cur)
189
189
190 ## Some facility for extensible patch parsing:
190 ## Some facility for extensible patch parsing:
191 # list of pairs ("header to match", "data key")
191 # list of pairs ("header to match", "data key")
192 patchheadermap = [('Date', 'date'),
192 patchheadermap = [('Date', 'date'),
193 ('Branch', 'branch'),
193 ('Branch', 'branch'),
194 ('Node ID', 'nodeid'),
194 ('Node ID', 'nodeid'),
195 ]
195 ]
196
196
197 @contextlib.contextmanager
197 @contextlib.contextmanager
198 def extract(ui, fileobj):
198 def extract(ui, fileobj):
199 '''extract patch from data read from fileobj.
199 '''extract patch from data read from fileobj.
200
200
201 patch can be a normal patch or contained in an email message.
201 patch can be a normal patch or contained in an email message.
202
202
203 return a dictionary. Standard keys are:
203 return a dictionary. Standard keys are:
204 - filename,
204 - filename,
205 - message,
205 - message,
206 - user,
206 - user,
207 - date,
207 - date,
208 - branch,
208 - branch,
209 - node,
209 - node,
210 - p1,
210 - p1,
211 - p2.
211 - p2.
212 Any item can be missing from the dictionary. If filename is missing,
212 Any item can be missing from the dictionary. If filename is missing,
213 fileobj did not contain a patch. Caller must unlink filename when done.'''
213 fileobj did not contain a patch. Caller must unlink filename when done.'''
214
214
215 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
216 tmpfp = os.fdopen(fd, r'wb')
216 tmpfp = os.fdopen(fd, r'wb')
217 try:
217 try:
218 yield _extract(ui, fileobj, tmpname, tmpfp)
218 yield _extract(ui, fileobj, tmpname, tmpfp)
219 finally:
219 finally:
220 tmpfp.close()
220 tmpfp.close()
221 os.unlink(tmpname)
221 os.unlink(tmpname)
222
222
223 def _extract(ui, fileobj, tmpname, tmpfp):
223 def _extract(ui, fileobj, tmpname, tmpfp):
224
224
225 # attempt to detect the start of a patch
225 # attempt to detect the start of a patch
226 # (this heuristic is borrowed from quilt)
226 # (this heuristic is borrowed from quilt)
227 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
228 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
229 br'---[ \t].*?^\+\+\+[ \t]|'
229 br'---[ \t].*?^\+\+\+[ \t]|'
230 br'\*\*\*[ \t].*?^---[ \t])',
230 br'\*\*\*[ \t].*?^---[ \t])',
231 re.MULTILINE | re.DOTALL)
231 re.MULTILINE | re.DOTALL)
232
232
233 data = {}
233 data = {}
234
234
235 msg = mail.parse(fileobj)
235 msg = mail.parse(fileobj)
236
236
237 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
238 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
239 if not subject and not data['user']:
239 if not subject and not data['user']:
240 # Not an email, restore parsed headers if any
240 # Not an email, restore parsed headers if any
241 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
242 for h in msg.items()) + '\n'
242 for h in msg.items()) + '\n'
243
243
244 # should try to parse msg['Date']
244 # should try to parse msg['Date']
245 parents = []
245 parents = []
246
246
247 if subject:
247 if subject:
248 if subject.startswith('[PATCH'):
248 if subject.startswith('[PATCH'):
249 pend = subject.find(']')
249 pend = subject.find(']')
250 if pend >= 0:
250 if pend >= 0:
251 subject = subject[pend + 1:].lstrip()
251 subject = subject[pend + 1:].lstrip()
252 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 subject = re.sub(br'\n[ \t]+', ' ', subject)
253 ui.debug('Subject: %s\n' % subject)
253 ui.debug('Subject: %s\n' % subject)
254 if data['user']:
254 if data['user']:
255 ui.debug('From: %s\n' % data['user'])
255 ui.debug('From: %s\n' % data['user'])
256 diffs_seen = 0
256 diffs_seen = 0
257 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
258 message = ''
258 message = ''
259 for part in msg.walk():
259 for part in msg.walk():
260 content_type = pycompat.bytestr(part.get_content_type())
260 content_type = pycompat.bytestr(part.get_content_type())
261 ui.debug('Content-Type: %s\n' % content_type)
261 ui.debug('Content-Type: %s\n' % content_type)
262 if content_type not in ok_types:
262 if content_type not in ok_types:
263 continue
263 continue
264 payload = part.get_payload(decode=True)
264 payload = part.get_payload(decode=True)
265 m = diffre.search(payload)
265 m = diffre.search(payload)
266 if m:
266 if m:
267 hgpatch = False
267 hgpatch = False
268 hgpatchheader = False
268 hgpatchheader = False
269 ignoretext = False
269 ignoretext = False
270
270
271 ui.debug('found patch at byte %d\n' % m.start(0))
271 ui.debug('found patch at byte %d\n' % m.start(0))
272 diffs_seen += 1
272 diffs_seen += 1
273 cfp = stringio()
273 cfp = stringio()
274 for line in payload[:m.start(0)].splitlines():
274 for line in payload[:m.start(0)].splitlines():
275 if line.startswith('# HG changeset patch') and not hgpatch:
275 if line.startswith('# HG changeset patch') and not hgpatch:
276 ui.debug('patch generated by hg export\n')
276 ui.debug('patch generated by hg export\n')
277 hgpatch = True
277 hgpatch = True
278 hgpatchheader = True
278 hgpatchheader = True
279 # drop earlier commit message content
279 # drop earlier commit message content
280 cfp.seek(0)
280 cfp.seek(0)
281 cfp.truncate()
281 cfp.truncate()
282 subject = None
282 subject = None
283 elif hgpatchheader:
283 elif hgpatchheader:
284 if line.startswith('# User '):
284 if line.startswith('# User '):
285 data['user'] = line[7:]
285 data['user'] = line[7:]
286 ui.debug('From: %s\n' % data['user'])
286 ui.debug('From: %s\n' % data['user'])
287 elif line.startswith("# Parent "):
287 elif line.startswith("# Parent "):
288 parents.append(line[9:].lstrip())
288 parents.append(line[9:].lstrip())
289 elif line.startswith("# "):
289 elif line.startswith("# "):
290 for header, key in patchheadermap:
290 for header, key in patchheadermap:
291 prefix = '# %s ' % header
291 prefix = '# %s ' % header
292 if line.startswith(prefix):
292 if line.startswith(prefix):
293 data[key] = line[len(prefix):]
293 data[key] = line[len(prefix):]
294 else:
294 else:
295 hgpatchheader = False
295 hgpatchheader = False
296 elif line == '---':
296 elif line == '---':
297 ignoretext = True
297 ignoretext = True
298 if not hgpatchheader and not ignoretext:
298 if not hgpatchheader and not ignoretext:
299 cfp.write(line)
299 cfp.write(line)
300 cfp.write('\n')
300 cfp.write('\n')
301 message = cfp.getvalue()
301 message = cfp.getvalue()
302 if tmpfp:
302 if tmpfp:
303 tmpfp.write(payload)
303 tmpfp.write(payload)
304 if not payload.endswith('\n'):
304 if not payload.endswith('\n'):
305 tmpfp.write('\n')
305 tmpfp.write('\n')
306 elif not diffs_seen and message and content_type == 'text/plain':
306 elif not diffs_seen and message and content_type == 'text/plain':
307 message += '\n' + payload
307 message += '\n' + payload
308
308
309 if subject and not message.startswith(subject):
309 if subject and not message.startswith(subject):
310 message = '%s\n%s' % (subject, message)
310 message = '%s\n%s' % (subject, message)
311 data['message'] = message
311 data['message'] = message
312 tmpfp.close()
312 tmpfp.close()
313 if parents:
313 if parents:
314 data['p1'] = parents.pop(0)
314 data['p1'] = parents.pop(0)
315 if parents:
315 if parents:
316 data['p2'] = parents.pop(0)
316 data['p2'] = parents.pop(0)
317
317
318 if diffs_seen:
318 if diffs_seen:
319 data['filename'] = tmpname
319 data['filename'] = tmpname
320
320
321 return data
321 return data
322
322
323 class patchmeta(object):
323 class patchmeta(object):
324 """Patched file metadata
324 """Patched file metadata
325
325
326 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
327 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 or COPY. 'path' is patched file path. 'oldpath' is set to the
328 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 origin file when 'op' is either COPY or RENAME, None otherwise. If
329 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 file mode is changed, 'mode' is a tuple (islink, isexec) where
330 'islink' is True if the file is a symlink and 'isexec' is True if
330 'islink' is True if the file is a symlink and 'isexec' is True if
331 the file is executable. Otherwise, 'mode' is None.
331 the file is executable. Otherwise, 'mode' is None.
332 """
332 """
333 def __init__(self, path):
333 def __init__(self, path):
334 self.path = path
334 self.path = path
335 self.oldpath = None
335 self.oldpath = None
336 self.mode = None
336 self.mode = None
337 self.op = 'MODIFY'
337 self.op = 'MODIFY'
338 self.binary = False
338 self.binary = False
339
339
340 def setmode(self, mode):
340 def setmode(self, mode):
341 islink = mode & 0o20000
341 islink = mode & 0o20000
342 isexec = mode & 0o100
342 isexec = mode & 0o100
343 self.mode = (islink, isexec)
343 self.mode = (islink, isexec)
344
344
345 def copy(self):
345 def copy(self):
346 other = patchmeta(self.path)
346 other = patchmeta(self.path)
347 other.oldpath = self.oldpath
347 other.oldpath = self.oldpath
348 other.mode = self.mode
348 other.mode = self.mode
349 other.op = self.op
349 other.op = self.op
350 other.binary = self.binary
350 other.binary = self.binary
351 return other
351 return other
352
352
353 def _ispatchinga(self, afile):
353 def _ispatchinga(self, afile):
354 if afile == '/dev/null':
354 if afile == '/dev/null':
355 return self.op == 'ADD'
355 return self.op == 'ADD'
356 return afile == 'a/' + (self.oldpath or self.path)
356 return afile == 'a/' + (self.oldpath or self.path)
357
357
358 def _ispatchingb(self, bfile):
358 def _ispatchingb(self, bfile):
359 if bfile == '/dev/null':
359 if bfile == '/dev/null':
360 return self.op == 'DELETE'
360 return self.op == 'DELETE'
361 return bfile == 'b/' + self.path
361 return bfile == 'b/' + self.path
362
362
363 def ispatching(self, afile, bfile):
363 def ispatching(self, afile, bfile):
364 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364 return self._ispatchinga(afile) and self._ispatchingb(bfile)
365
365
366 def __repr__(self):
366 def __repr__(self):
367 return "<patchmeta %s %r>" % (self.op, self.path)
367 return "<patchmeta %s %r>" % (self.op, self.path)
368
368
369 def readgitpatch(lr):
369 def readgitpatch(lr):
370 """extract git-style metadata about patches from <patchname>"""
370 """extract git-style metadata about patches from <patchname>"""
371
371
372 # Filter patch for git information
372 # Filter patch for git information
373 gp = None
373 gp = None
374 gitpatches = []
374 gitpatches = []
375 for line in lr:
375 for line in lr:
376 line = line.rstrip(' \r\n')
376 line = line.rstrip(' \r\n')
377 if line.startswith('diff --git a/'):
377 if line.startswith('diff --git a/'):
378 m = gitre.match(line)
378 m = gitre.match(line)
379 if m:
379 if m:
380 if gp:
380 if gp:
381 gitpatches.append(gp)
381 gitpatches.append(gp)
382 dst = m.group(2)
382 dst = m.group(2)
383 gp = patchmeta(dst)
383 gp = patchmeta(dst)
384 elif gp:
384 elif gp:
385 if line.startswith('--- '):
385 if line.startswith('--- '):
386 gitpatches.append(gp)
386 gitpatches.append(gp)
387 gp = None
387 gp = None
388 continue
388 continue
389 if line.startswith('rename from '):
389 if line.startswith('rename from '):
390 gp.op = 'RENAME'
390 gp.op = 'RENAME'
391 gp.oldpath = line[12:]
391 gp.oldpath = line[12:]
392 elif line.startswith('rename to '):
392 elif line.startswith('rename to '):
393 gp.path = line[10:]
393 gp.path = line[10:]
394 elif line.startswith('copy from '):
394 elif line.startswith('copy from '):
395 gp.op = 'COPY'
395 gp.op = 'COPY'
396 gp.oldpath = line[10:]
396 gp.oldpath = line[10:]
397 elif line.startswith('copy to '):
397 elif line.startswith('copy to '):
398 gp.path = line[8:]
398 gp.path = line[8:]
399 elif line.startswith('deleted file'):
399 elif line.startswith('deleted file'):
400 gp.op = 'DELETE'
400 gp.op = 'DELETE'
401 elif line.startswith('new file mode '):
401 elif line.startswith('new file mode '):
402 gp.op = 'ADD'
402 gp.op = 'ADD'
403 gp.setmode(int(line[-6:], 8))
403 gp.setmode(int(line[-6:], 8))
404 elif line.startswith('new mode '):
404 elif line.startswith('new mode '):
405 gp.setmode(int(line[-6:], 8))
405 gp.setmode(int(line[-6:], 8))
406 elif line.startswith('GIT binary patch'):
406 elif line.startswith('GIT binary patch'):
407 gp.binary = True
407 gp.binary = True
408 if gp:
408 if gp:
409 gitpatches.append(gp)
409 gitpatches.append(gp)
410
410
411 return gitpatches
411 return gitpatches
412
412
413 class linereader(object):
413 class linereader(object):
414 # simple class to allow pushing lines back into the input stream
414 # simple class to allow pushing lines back into the input stream
415 def __init__(self, fp):
415 def __init__(self, fp):
416 self.fp = fp
416 self.fp = fp
417 self.buf = []
417 self.buf = []
418
418
419 def push(self, line):
419 def push(self, line):
420 if line is not None:
420 if line is not None:
421 self.buf.append(line)
421 self.buf.append(line)
422
422
423 def readline(self):
423 def readline(self):
424 if self.buf:
424 if self.buf:
425 l = self.buf[0]
425 l = self.buf[0]
426 del self.buf[0]
426 del self.buf[0]
427 return l
427 return l
428 return self.fp.readline()
428 return self.fp.readline()
429
429
430 def __iter__(self):
430 def __iter__(self):
431 return iter(self.readline, '')
431 return iter(self.readline, '')
432
432
433 class abstractbackend(object):
433 class abstractbackend(object):
434 def __init__(self, ui):
434 def __init__(self, ui):
435 self.ui = ui
435 self.ui = ui
436
436
437 def getfile(self, fname):
437 def getfile(self, fname):
438 """Return target file data and flags as a (data, (islink,
438 """Return target file data and flags as a (data, (islink,
439 isexec)) tuple. Data is None if file is missing/deleted.
439 isexec)) tuple. Data is None if file is missing/deleted.
440 """
440 """
441 raise NotImplementedError
441 raise NotImplementedError
442
442
443 def setfile(self, fname, data, mode, copysource):
443 def setfile(self, fname, data, mode, copysource):
444 """Write data to target file fname and set its mode. mode is a
444 """Write data to target file fname and set its mode. mode is a
445 (islink, isexec) tuple. If data is None, the file content should
445 (islink, isexec) tuple. If data is None, the file content should
446 be left unchanged. If the file is modified after being copied,
446 be left unchanged. If the file is modified after being copied,
447 copysource is set to the original file name.
447 copysource is set to the original file name.
448 """
448 """
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def unlink(self, fname):
451 def unlink(self, fname):
452 """Unlink target file."""
452 """Unlink target file."""
453 raise NotImplementedError
453 raise NotImplementedError
454
454
455 def writerej(self, fname, failed, total, lines):
455 def writerej(self, fname, failed, total, lines):
456 """Write rejected lines for fname. total is the number of hunks
456 """Write rejected lines for fname. total is the number of hunks
457 which failed to apply and total the total number of hunks for this
457 which failed to apply and total the total number of hunks for this
458 files.
458 files.
459 """
459 """
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 raise NotImplementedError
462 raise NotImplementedError
463
463
464 def close(self):
464 def close(self):
465 raise NotImplementedError
465 raise NotImplementedError
466
466
467 class fsbackend(abstractbackend):
467 class fsbackend(abstractbackend):
468 def __init__(self, ui, basedir):
468 def __init__(self, ui, basedir):
469 super(fsbackend, self).__init__(ui)
469 super(fsbackend, self).__init__(ui)
470 self.opener = vfsmod.vfs(basedir)
470 self.opener = vfsmod.vfs(basedir)
471
471
472 def getfile(self, fname):
472 def getfile(self, fname):
473 if self.opener.islink(fname):
473 if self.opener.islink(fname):
474 return (self.opener.readlink(fname), (True, False))
474 return (self.opener.readlink(fname), (True, False))
475
475
476 isexec = False
476 isexec = False
477 try:
477 try:
478 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
479 except OSError as e:
479 except OSError as e:
480 if e.errno != errno.ENOENT:
480 if e.errno != errno.ENOENT:
481 raise
481 raise
482 try:
482 try:
483 return (self.opener.read(fname), (False, isexec))
483 return (self.opener.read(fname), (False, isexec))
484 except IOError as e:
484 except IOError as e:
485 if e.errno != errno.ENOENT:
485 if e.errno != errno.ENOENT:
486 raise
486 raise
487 return None, None
487 return None, None
488
488
489 def setfile(self, fname, data, mode, copysource):
489 def setfile(self, fname, data, mode, copysource):
490 islink, isexec = mode
490 islink, isexec = mode
491 if data is None:
491 if data is None:
492 self.opener.setflags(fname, islink, isexec)
492 self.opener.setflags(fname, islink, isexec)
493 return
493 return
494 if islink:
494 if islink:
495 self.opener.symlink(data, fname)
495 self.opener.symlink(data, fname)
496 else:
496 else:
497 self.opener.write(fname, data)
497 self.opener.write(fname, data)
498 if isexec:
498 if isexec:
499 self.opener.setflags(fname, False, True)
499 self.opener.setflags(fname, False, True)
500
500
501 def unlink(self, fname):
501 def unlink(self, fname):
502 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
503 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
504
504
505 def writerej(self, fname, failed, total, lines):
505 def writerej(self, fname, failed, total, lines):
506 fname = fname + ".rej"
506 fname = fname + ".rej"
507 self.ui.warn(
507 self.ui.warn(
508 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
509 (failed, total, fname))
509 (failed, total, fname))
510 fp = self.opener(fname, 'w')
510 fp = self.opener(fname, 'w')
511 fp.writelines(lines)
511 fp.writelines(lines)
512 fp.close()
512 fp.close()
513
513
514 def exists(self, fname):
514 def exists(self, fname):
515 return self.opener.lexists(fname)
515 return self.opener.lexists(fname)
516
516
517 class workingbackend(fsbackend):
517 class workingbackend(fsbackend):
518 def __init__(self, ui, repo, similarity):
518 def __init__(self, ui, repo, similarity):
519 super(workingbackend, self).__init__(ui, repo.root)
519 super(workingbackend, self).__init__(ui, repo.root)
520 self.repo = repo
520 self.repo = repo
521 self.similarity = similarity
521 self.similarity = similarity
522 self.removed = set()
522 self.removed = set()
523 self.changed = set()
523 self.changed = set()
524 self.copied = []
524 self.copied = []
525
525
526 def _checkknown(self, fname):
526 def _checkknown(self, fname):
527 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 if self.repo.dirstate[fname] == '?' and self.exists(fname):
528 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
529
529
530 def setfile(self, fname, data, mode, copysource):
530 def setfile(self, fname, data, mode, copysource):
531 self._checkknown(fname)
531 self._checkknown(fname)
532 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 super(workingbackend, self).setfile(fname, data, mode, copysource)
533 if copysource is not None:
533 if copysource is not None:
534 self.copied.append((copysource, fname))
534 self.copied.append((copysource, fname))
535 self.changed.add(fname)
535 self.changed.add(fname)
536
536
537 def unlink(self, fname):
537 def unlink(self, fname):
538 self._checkknown(fname)
538 self._checkknown(fname)
539 super(workingbackend, self).unlink(fname)
539 super(workingbackend, self).unlink(fname)
540 self.removed.add(fname)
540 self.removed.add(fname)
541 self.changed.add(fname)
541 self.changed.add(fname)
542
542
543 def close(self):
543 def close(self):
544 wctx = self.repo[None]
544 wctx = self.repo[None]
545 changed = set(self.changed)
545 changed = set(self.changed)
546 for src, dst in self.copied:
546 for src, dst in self.copied:
547 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
548 if self.removed:
548 if self.removed:
549 wctx.forget(sorted(self.removed))
549 wctx.forget(sorted(self.removed))
550 for f in self.removed:
550 for f in self.removed:
551 if f not in self.repo.dirstate:
551 if f not in self.repo.dirstate:
552 # File was deleted and no longer belongs to the
552 # File was deleted and no longer belongs to the
553 # dirstate, it was probably marked added then
553 # dirstate, it was probably marked added then
554 # deleted, and should not be considered by
554 # deleted, and should not be considered by
555 # marktouched().
555 # marktouched().
556 changed.discard(f)
556 changed.discard(f)
557 if changed:
557 if changed:
558 scmutil.marktouched(self.repo, changed, self.similarity)
558 scmutil.marktouched(self.repo, changed, self.similarity)
559 return sorted(self.changed)
559 return sorted(self.changed)
560
560
561 class filestore(object):
561 class filestore(object):
562 def __init__(self, maxsize=None):
562 def __init__(self, maxsize=None):
563 self.opener = None
563 self.opener = None
564 self.files = {}
564 self.files = {}
565 self.created = 0
565 self.created = 0
566 self.maxsize = maxsize
566 self.maxsize = maxsize
567 if self.maxsize is None:
567 if self.maxsize is None:
568 self.maxsize = 4*(2**20)
568 self.maxsize = 4*(2**20)
569 self.size = 0
569 self.size = 0
570 self.data = {}
570 self.data = {}
571
571
572 def setfile(self, fname, data, mode, copied=None):
572 def setfile(self, fname, data, mode, copied=None):
573 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
574 self.data[fname] = (data, mode, copied)
574 self.data[fname] = (data, mode, copied)
575 self.size += len(data)
575 self.size += len(data)
576 else:
576 else:
577 if self.opener is None:
577 if self.opener is None:
578 root = pycompat.mkdtemp(prefix='hg-patch-')
578 root = pycompat.mkdtemp(prefix='hg-patch-')
579 self.opener = vfsmod.vfs(root)
579 self.opener = vfsmod.vfs(root)
580 # Avoid filename issues with these simple names
580 # Avoid filename issues with these simple names
581 fn = '%d' % self.created
581 fn = '%d' % self.created
582 self.opener.write(fn, data)
582 self.opener.write(fn, data)
583 self.created += 1
583 self.created += 1
584 self.files[fname] = (fn, mode, copied)
584 self.files[fname] = (fn, mode, copied)
585
585
586 def getfile(self, fname):
586 def getfile(self, fname):
587 if fname in self.data:
587 if fname in self.data:
588 return self.data[fname]
588 return self.data[fname]
589 if not self.opener or fname not in self.files:
589 if not self.opener or fname not in self.files:
590 return None, None, None
590 return None, None, None
591 fn, mode, copied = self.files[fname]
591 fn, mode, copied = self.files[fname]
592 return self.opener.read(fn), mode, copied
592 return self.opener.read(fn), mode, copied
593
593
594 def close(self):
594 def close(self):
595 if self.opener:
595 if self.opener:
596 shutil.rmtree(self.opener.base)
596 shutil.rmtree(self.opener.base)
597
597
598 class repobackend(abstractbackend):
598 class repobackend(abstractbackend):
599 def __init__(self, ui, repo, ctx, store):
599 def __init__(self, ui, repo, ctx, store):
600 super(repobackend, self).__init__(ui)
600 super(repobackend, self).__init__(ui)
601 self.repo = repo
601 self.repo = repo
602 self.ctx = ctx
602 self.ctx = ctx
603 self.store = store
603 self.store = store
604 self.changed = set()
604 self.changed = set()
605 self.removed = set()
605 self.removed = set()
606 self.copied = {}
606 self.copied = {}
607
607
608 def _checkknown(self, fname):
608 def _checkknown(self, fname):
609 if fname not in self.ctx:
609 if fname not in self.ctx:
610 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
611
611
612 def getfile(self, fname):
612 def getfile(self, fname):
613 try:
613 try:
614 fctx = self.ctx[fname]
614 fctx = self.ctx[fname]
615 except error.LookupError:
615 except error.LookupError:
616 return None, None
616 return None, None
617 flags = fctx.flags()
617 flags = fctx.flags()
618 return fctx.data(), ('l' in flags, 'x' in flags)
618 return fctx.data(), ('l' in flags, 'x' in flags)
619
619
620 def setfile(self, fname, data, mode, copysource):
620 def setfile(self, fname, data, mode, copysource):
621 if copysource:
621 if copysource:
622 self._checkknown(copysource)
622 self._checkknown(copysource)
623 if data is None:
623 if data is None:
624 data = self.ctx[fname].data()
624 data = self.ctx[fname].data()
625 self.store.setfile(fname, data, mode, copysource)
625 self.store.setfile(fname, data, mode, copysource)
626 self.changed.add(fname)
626 self.changed.add(fname)
627 if copysource:
627 if copysource:
628 self.copied[fname] = copysource
628 self.copied[fname] = copysource
629
629
630 def unlink(self, fname):
630 def unlink(self, fname):
631 self._checkknown(fname)
631 self._checkknown(fname)
632 self.removed.add(fname)
632 self.removed.add(fname)
633
633
634 def exists(self, fname):
634 def exists(self, fname):
635 return fname in self.ctx
635 return fname in self.ctx
636
636
637 def close(self):
637 def close(self):
638 return self.changed | self.removed
638 return self.changed | self.removed
639
639
640 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
641 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
642 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
643 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643 eolmodes = ['strict', 'crlf', 'lf', 'auto']
644
644
645 class patchfile(object):
645 class patchfile(object):
646 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 def __init__(self, ui, gp, backend, store, eolmode='strict'):
647 self.fname = gp.path
647 self.fname = gp.path
648 self.eolmode = eolmode
648 self.eolmode = eolmode
649 self.eol = None
649 self.eol = None
650 self.backend = backend
650 self.backend = backend
651 self.ui = ui
651 self.ui = ui
652 self.lines = []
652 self.lines = []
653 self.exists = False
653 self.exists = False
654 self.missing = True
654 self.missing = True
655 self.mode = gp.mode
655 self.mode = gp.mode
656 self.copysource = gp.oldpath
656 self.copysource = gp.oldpath
657 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
658 self.remove = gp.op == 'DELETE'
658 self.remove = gp.op == 'DELETE'
659 if self.copysource is None:
659 if self.copysource is None:
660 data, mode = backend.getfile(self.fname)
660 data, mode = backend.getfile(self.fname)
661 else:
661 else:
662 data, mode = store.getfile(self.copysource)[:2]
662 data, mode = store.getfile(self.copysource)[:2]
663 if data is not None:
663 if data is not None:
664 self.exists = self.copysource is None or backend.exists(self.fname)
664 self.exists = self.copysource is None or backend.exists(self.fname)
665 self.missing = False
665 self.missing = False
666 if data:
666 if data:
667 self.lines = mdiff.splitnewlines(data)
667 self.lines = mdiff.splitnewlines(data)
668 if self.mode is None:
668 if self.mode is None:
669 self.mode = mode
669 self.mode = mode
670 if self.lines:
670 if self.lines:
671 # Normalize line endings
671 # Normalize line endings
672 if self.lines[0].endswith('\r\n'):
672 if self.lines[0].endswith('\r\n'):
673 self.eol = '\r\n'
673 self.eol = '\r\n'
674 elif self.lines[0].endswith('\n'):
674 elif self.lines[0].endswith('\n'):
675 self.eol = '\n'
675 self.eol = '\n'
676 if eolmode != 'strict':
676 if eolmode != 'strict':
677 nlines = []
677 nlines = []
678 for l in self.lines:
678 for l in self.lines:
679 if l.endswith('\r\n'):
679 if l.endswith('\r\n'):
680 l = l[:-2] + '\n'
680 l = l[:-2] + '\n'
681 nlines.append(l)
681 nlines.append(l)
682 self.lines = nlines
682 self.lines = nlines
683 else:
683 else:
684 if self.create:
684 if self.create:
685 self.missing = False
685 self.missing = False
686 if self.mode is None:
686 if self.mode is None:
687 self.mode = (False, False)
687 self.mode = (False, False)
688 if self.missing:
688 if self.missing:
689 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
690 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
691 "current directory)\n"))
691 "current directory)\n"))
692
692
693 self.hash = {}
693 self.hash = {}
694 self.dirty = 0
694 self.dirty = 0
695 self.offset = 0
695 self.offset = 0
696 self.skew = 0
696 self.skew = 0
697 self.rej = []
697 self.rej = []
698 self.fileprinted = False
698 self.fileprinted = False
699 self.printfile(False)
699 self.printfile(False)
700 self.hunks = 0
700 self.hunks = 0
701
701
702 def writelines(self, fname, lines, mode):
702 def writelines(self, fname, lines, mode):
703 if self.eolmode == 'auto':
703 if self.eolmode == 'auto':
704 eol = self.eol
704 eol = self.eol
705 elif self.eolmode == 'crlf':
705 elif self.eolmode == 'crlf':
706 eol = '\r\n'
706 eol = '\r\n'
707 else:
707 else:
708 eol = '\n'
708 eol = '\n'
709
709
710 if self.eolmode != 'strict' and eol and eol != '\n':
710 if self.eolmode != 'strict' and eol and eol != '\n':
711 rawlines = []
711 rawlines = []
712 for l in lines:
712 for l in lines:
713 if l and l.endswith('\n'):
713 if l and l.endswith('\n'):
714 l = l[:-1] + eol
714 l = l[:-1] + eol
715 rawlines.append(l)
715 rawlines.append(l)
716 lines = rawlines
716 lines = rawlines
717
717
718 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
719
719
720 def printfile(self, warn):
720 def printfile(self, warn):
721 if self.fileprinted:
721 if self.fileprinted:
722 return
722 return
723 if warn or self.ui.verbose:
723 if warn or self.ui.verbose:
724 self.fileprinted = True
724 self.fileprinted = True
725 s = _("patching file %s\n") % self.fname
725 s = _("patching file %s\n") % self.fname
726 if warn:
726 if warn:
727 self.ui.warn(s)
727 self.ui.warn(s)
728 else:
728 else:
729 self.ui.note(s)
729 self.ui.note(s)
730
730
731
731
732 def findlines(self, l, linenum):
732 def findlines(self, l, linenum):
733 # looks through the hash and finds candidate lines. The
733 # looks through the hash and finds candidate lines. The
734 # result is a list of line numbers sorted based on distance
734 # result is a list of line numbers sorted based on distance
735 # from linenum
735 # from linenum
736
736
737 cand = self.hash.get(l, [])
737 cand = self.hash.get(l, [])
738 if len(cand) > 1:
738 if len(cand) > 1:
739 # resort our list of potentials forward then back.
739 # resort our list of potentials forward then back.
740 cand.sort(key=lambda x: abs(x - linenum))
740 cand.sort(key=lambda x: abs(x - linenum))
741 return cand
741 return cand
742
742
743 def write_rej(self):
743 def write_rej(self):
744 # our rejects are a little different from patch(1). This always
744 # our rejects are a little different from patch(1). This always
745 # creates rejects in the same form as the original patch. A file
745 # creates rejects in the same form as the original patch. A file
746 # header is inserted so that you can run the reject through patch again
746 # header is inserted so that you can run the reject through patch again
747 # without having to type the filename.
747 # without having to type the filename.
748 if not self.rej:
748 if not self.rej:
749 return
749 return
750 base = os.path.basename(self.fname)
750 base = os.path.basename(self.fname)
751 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 lines = ["--- %s\n+++ %s\n" % (base, base)]
752 for x in self.rej:
752 for x in self.rej:
753 for l in x.hunk:
753 for l in x.hunk:
754 lines.append(l)
754 lines.append(l)
755 if l[-1:] != '\n':
755 if l[-1:] != '\n':
756 lines.append("\n\\ No newline at end of file\n")
756 lines.append("\n\\ No newline at end of file\n")
757 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
758
758
759 def apply(self, h):
759 def apply(self, h):
760 if not h.complete():
760 if not h.complete():
761 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
762 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 (h.number, h.desc, len(h.a), h.lena, len(h.b),
763 h.lenb))
763 h.lenb))
764
764
765 self.hunks += 1
765 self.hunks += 1
766
766
767 if self.missing:
767 if self.missing:
768 self.rej.append(h)
768 self.rej.append(h)
769 return -1
769 return -1
770
770
771 if self.exists and self.create:
771 if self.exists and self.create:
772 if self.copysource:
772 if self.copysource:
773 self.ui.warn(_("cannot create %s: destination already "
773 self.ui.warn(_("cannot create %s: destination already "
774 "exists\n") % self.fname)
774 "exists\n") % self.fname)
775 else:
775 else:
776 self.ui.warn(_("file %s already exists\n") % self.fname)
776 self.ui.warn(_("file %s already exists\n") % self.fname)
777 self.rej.append(h)
777 self.rej.append(h)
778 return -1
778 return -1
779
779
780 if isinstance(h, binhunk):
780 if isinstance(h, binhunk):
781 if self.remove:
781 if self.remove:
782 self.backend.unlink(self.fname)
782 self.backend.unlink(self.fname)
783 else:
783 else:
784 l = h.new(self.lines)
784 l = h.new(self.lines)
785 self.lines[:] = l
785 self.lines[:] = l
786 self.offset += len(l)
786 self.offset += len(l)
787 self.dirty = True
787 self.dirty = True
788 return 0
788 return 0
789
789
790 horig = h
790 horig = h
791 if (self.eolmode in ('crlf', 'lf')
791 if (self.eolmode in ('crlf', 'lf')
792 or self.eolmode == 'auto' and self.eol):
792 or self.eolmode == 'auto' and self.eol):
793 # If new eols are going to be normalized, then normalize
793 # If new eols are going to be normalized, then normalize
794 # hunk data before patching. Otherwise, preserve input
794 # hunk data before patching. Otherwise, preserve input
795 # line-endings.
795 # line-endings.
796 h = h.getnormalized()
796 h = h.getnormalized()
797
797
798 # fast case first, no offsets, no fuzz
798 # fast case first, no offsets, no fuzz
799 old, oldstart, new, newstart = h.fuzzit(0, False)
799 old, oldstart, new, newstart = h.fuzzit(0, False)
800 oldstart += self.offset
800 oldstart += self.offset
801 orig_start = oldstart
801 orig_start = oldstart
802 # if there's skew we want to emit the "(offset %d lines)" even
802 # if there's skew we want to emit the "(offset %d lines)" even
803 # when the hunk cleanly applies at start + skew, so skip the
803 # when the hunk cleanly applies at start + skew, so skip the
804 # fast case code
804 # fast case code
805 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
806 if self.remove:
806 if self.remove:
807 self.backend.unlink(self.fname)
807 self.backend.unlink(self.fname)
808 else:
808 else:
809 self.lines[oldstart:oldstart + len(old)] = new
809 self.lines[oldstart:oldstart + len(old)] = new
810 self.offset += len(new) - len(old)
810 self.offset += len(new) - len(old)
811 self.dirty = True
811 self.dirty = True
812 return 0
812 return 0
813
813
814 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
815 self.hash = {}
815 self.hash = {}
816 for x, s in enumerate(self.lines):
816 for x, s in enumerate(self.lines):
817 self.hash.setdefault(s, []).append(x)
817 self.hash.setdefault(s, []).append(x)
818
818
819 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
819 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
820 for toponly in [True, False]:
820 for toponly in [True, False]:
821 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
822 oldstart = oldstart + self.offset + self.skew
822 oldstart = oldstart + self.offset + self.skew
823 oldstart = min(oldstart, len(self.lines))
823 oldstart = min(oldstart, len(self.lines))
824 if old:
824 if old:
825 cand = self.findlines(old[0][1:], oldstart)
825 cand = self.findlines(old[0][1:], oldstart)
826 else:
826 else:
827 # Only adding lines with no or fuzzed context, just
827 # Only adding lines with no or fuzzed context, just
828 # take the skew in account
828 # take the skew in account
829 cand = [oldstart]
829 cand = [oldstart]
830
830
831 for l in cand:
831 for l in cand:
832 if not old or diffhelper.testhunk(old, self.lines, l):
832 if not old or diffhelper.testhunk(old, self.lines, l):
833 self.lines[l : l + len(old)] = new
833 self.lines[l : l + len(old)] = new
834 self.offset += len(new) - len(old)
834 self.offset += len(new) - len(old)
835 self.skew = l - orig_start
835 self.skew = l - orig_start
836 self.dirty = True
836 self.dirty = True
837 offset = l - orig_start - fuzzlen
837 offset = l - orig_start - fuzzlen
838 if fuzzlen:
838 if fuzzlen:
839 msg = _("Hunk #%d succeeded at %d "
839 msg = _("Hunk #%d succeeded at %d "
840 "with fuzz %d "
840 "with fuzz %d "
841 "(offset %d lines).\n")
841 "(offset %d lines).\n")
842 self.printfile(True)
842 self.printfile(True)
843 self.ui.warn(msg %
843 self.ui.warn(msg %
844 (h.number, l + 1, fuzzlen, offset))
844 (h.number, l + 1, fuzzlen, offset))
845 else:
845 else:
846 msg = _("Hunk #%d succeeded at %d "
846 msg = _("Hunk #%d succeeded at %d "
847 "(offset %d lines).\n")
847 "(offset %d lines).\n")
848 self.ui.note(msg % (h.number, l + 1, offset))
848 self.ui.note(msg % (h.number, l + 1, offset))
849 return fuzzlen
849 return fuzzlen
850 self.printfile(True)
850 self.printfile(True)
851 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
852 self.rej.append(horig)
852 self.rej.append(horig)
853 return -1
853 return -1
854
854
855 def close(self):
855 def close(self):
856 if self.dirty:
856 if self.dirty:
857 self.writelines(self.fname, self.lines, self.mode)
857 self.writelines(self.fname, self.lines, self.mode)
858 self.write_rej()
858 self.write_rej()
859 return len(self.rej)
859 return len(self.rej)
860
860
861 class header(object):
861 class header(object):
862 """patch header
862 """patch header
863 """
863 """
864 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
865 diff_re = re.compile('diff -r .* (.*)$')
865 diff_re = re.compile('diff -r .* (.*)$')
866 allhunks_re = re.compile('(?:index|deleted file) ')
866 allhunks_re = re.compile('(?:index|deleted file) ')
867 pretty_re = re.compile('(?:new file|deleted file) ')
867 pretty_re = re.compile('(?:new file|deleted file) ')
868 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 special_re = re.compile('(?:index|deleted|copy|rename) ')
869 newfile_re = re.compile('(?:new file)')
869 newfile_re = re.compile('(?:new file)')
870
870
871 def __init__(self, header):
871 def __init__(self, header):
872 self.header = header
872 self.header = header
873 self.hunks = []
873 self.hunks = []
874
874
875 def binary(self):
875 def binary(self):
876 return any(h.startswith('index ') for h in self.header)
876 return any(h.startswith('index ') for h in self.header)
877
877
878 def pretty(self, fp):
878 def pretty(self, fp):
879 for h in self.header:
879 for h in self.header:
880 if h.startswith('index '):
880 if h.startswith('index '):
881 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 fp.write(_('this modifies a binary file (all or nothing)\n'))
882 break
882 break
883 if self.pretty_re.match(h):
883 if self.pretty_re.match(h):
884 fp.write(h)
884 fp.write(h)
885 if self.binary():
885 if self.binary():
886 fp.write(_('this is a binary file\n'))
886 fp.write(_('this is a binary file\n'))
887 break
887 break
888 if h.startswith('---'):
888 if h.startswith('---'):
889 fp.write(_('%d hunks, %d lines changed\n') %
889 fp.write(_('%d hunks, %d lines changed\n') %
890 (len(self.hunks),
890 (len(self.hunks),
891 sum([max(h.added, h.removed) for h in self.hunks])))
891 sum([max(h.added, h.removed) for h in self.hunks])))
892 break
892 break
893 fp.write(h)
893 fp.write(h)
894
894
895 def write(self, fp):
895 def write(self, fp):
896 fp.write(''.join(self.header))
896 fp.write(''.join(self.header))
897
897
898 def allhunks(self):
898 def allhunks(self):
899 return any(self.allhunks_re.match(h) for h in self.header)
899 return any(self.allhunks_re.match(h) for h in self.header)
900
900
901 def files(self):
901 def files(self):
902 match = self.diffgit_re.match(self.header[0])
902 match = self.diffgit_re.match(self.header[0])
903 if match:
903 if match:
904 fromfile, tofile = match.groups()
904 fromfile, tofile = match.groups()
905 if fromfile == tofile:
905 if fromfile == tofile:
906 return [fromfile]
906 return [fromfile]
907 return [fromfile, tofile]
907 return [fromfile, tofile]
908 else:
908 else:
909 return self.diff_re.match(self.header[0]).groups()
909 return self.diff_re.match(self.header[0]).groups()
910
910
911 def filename(self):
911 def filename(self):
912 return self.files()[-1]
912 return self.files()[-1]
913
913
914 def __repr__(self):
914 def __repr__(self):
915 return '<header %s>' % (' '.join(map(repr, self.files())))
915 return '<header %s>' % (' '.join(map(repr, self.files())))
916
916
917 def isnewfile(self):
917 def isnewfile(self):
918 return any(self.newfile_re.match(h) for h in self.header)
918 return any(self.newfile_re.match(h) for h in self.header)
919
919
920 def special(self):
920 def special(self):
921 # Special files are shown only at the header level and not at the hunk
921 # Special files are shown only at the header level and not at the hunk
922 # level for example a file that has been deleted is a special file.
922 # level for example a file that has been deleted is a special file.
923 # The user cannot change the content of the operation, in the case of
923 # The user cannot change the content of the operation, in the case of
924 # the deleted file he has to take the deletion or not take it, he
924 # the deleted file he has to take the deletion or not take it, he
925 # cannot take some of it.
925 # cannot take some of it.
926 # Newly added files are special if they are empty, they are not special
926 # Newly added files are special if they are empty, they are not special
927 # if they have some content as we want to be able to change it
927 # if they have some content as we want to be able to change it
928 nocontent = len(self.header) == 2
928 nocontent = len(self.header) == 2
929 emptynewfile = self.isnewfile() and nocontent
929 emptynewfile = self.isnewfile() and nocontent
930 return emptynewfile or \
930 return emptynewfile or \
931 any(self.special_re.match(h) for h in self.header)
931 any(self.special_re.match(h) for h in self.header)
932
932
933 class recordhunk(object):
933 class recordhunk(object):
934 """patch hunk
934 """patch hunk
935
935
936 XXX shouldn't we merge this with the other hunk class?
936 XXX shouldn't we merge this with the other hunk class?
937 """
937 """
938
938
939 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 def __init__(self, header, fromline, toline, proc, before, hunk, after,
940 maxcontext=None):
940 maxcontext=None):
941 def trimcontext(lines, reverse=False):
941 def trimcontext(lines, reverse=False):
942 if maxcontext is not None:
942 if maxcontext is not None:
943 delta = len(lines) - maxcontext
943 delta = len(lines) - maxcontext
944 if delta > 0:
944 if delta > 0:
945 if reverse:
945 if reverse:
946 return delta, lines[delta:]
946 return delta, lines[delta:]
947 else:
947 else:
948 return delta, lines[:maxcontext]
948 return delta, lines[:maxcontext]
949 return 0, lines
949 return 0, lines
950
950
951 self.header = header
951 self.header = header
952 trimedbefore, self.before = trimcontext(before, True)
952 trimedbefore, self.before = trimcontext(before, True)
953 self.fromline = fromline + trimedbefore
953 self.fromline = fromline + trimedbefore
954 self.toline = toline + trimedbefore
954 self.toline = toline + trimedbefore
955 _trimedafter, self.after = trimcontext(after, False)
955 _trimedafter, self.after = trimcontext(after, False)
956 self.proc = proc
956 self.proc = proc
957 self.hunk = hunk
957 self.hunk = hunk
958 self.added, self.removed = self.countchanges(self.hunk)
958 self.added, self.removed = self.countchanges(self.hunk)
959
959
960 def __eq__(self, v):
960 def __eq__(self, v):
961 if not isinstance(v, recordhunk):
961 if not isinstance(v, recordhunk):
962 return False
962 return False
963
963
964 return ((v.hunk == self.hunk) and
964 return ((v.hunk == self.hunk) and
965 (v.proc == self.proc) and
965 (v.proc == self.proc) and
966 (self.fromline == v.fromline) and
966 (self.fromline == v.fromline) and
967 (self.header.files() == v.header.files()))
967 (self.header.files() == v.header.files()))
968
968
969 def __hash__(self):
969 def __hash__(self):
970 return hash((tuple(self.hunk),
970 return hash((tuple(self.hunk),
971 tuple(self.header.files()),
971 tuple(self.header.files()),
972 self.fromline,
972 self.fromline,
973 self.proc))
973 self.proc))
974
974
975 def countchanges(self, hunk):
975 def countchanges(self, hunk):
976 """hunk -> (n+,n-)"""
976 """hunk -> (n+,n-)"""
977 add = len([h for h in hunk if h.startswith('+')])
977 add = len([h for h in hunk if h.startswith('+')])
978 rem = len([h for h in hunk if h.startswith('-')])
978 rem = len([h for h in hunk if h.startswith('-')])
979 return add, rem
979 return add, rem
980
980
981 def reversehunk(self):
981 def reversehunk(self):
982 """return another recordhunk which is the reverse of the hunk
982 """return another recordhunk which is the reverse of the hunk
983
983
984 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
985 that, swap fromline/toline and +/- signs while keep other things
985 that, swap fromline/toline and +/- signs while keep other things
986 unchanged.
986 unchanged.
987 """
987 """
988 m = {'+': '-', '-': '+', '\\': '\\'}
988 m = {'+': '-', '-': '+', '\\': '\\'}
989 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
990 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 return recordhunk(self.header, self.toline, self.fromline, self.proc,
991 self.before, hunk, self.after)
991 self.before, hunk, self.after)
992
992
993 def write(self, fp):
993 def write(self, fp):
994 delta = len(self.before) + len(self.after)
994 delta = len(self.before) + len(self.after)
995 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 if self.after and self.after[-1] == '\\ No newline at end of file\n':
996 delta -= 1
996 delta -= 1
997 fromlen = delta + self.removed
997 fromlen = delta + self.removed
998 tolen = delta + self.added
998 tolen = delta + self.added
999 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
1000 (self.fromline, fromlen, self.toline, tolen,
1000 (self.fromline, fromlen, self.toline, tolen,
1001 self.proc and (' ' + self.proc)))
1001 self.proc and (' ' + self.proc)))
1002 fp.write(''.join(self.before + self.hunk + self.after))
1002 fp.write(''.join(self.before + self.hunk + self.after))
1003
1003
1004 pretty = write
1004 pretty = write
1005
1005
1006 def filename(self):
1006 def filename(self):
1007 return self.header.filename()
1007 return self.header.filename()
1008
1008
1009 def __repr__(self):
1009 def __repr__(self):
1010 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1011
1011
1012 def getmessages():
1012 def getmessages():
1013 return {
1013 return {
1014 'multiple': {
1014 'multiple': {
1015 'apply': _("apply change %d/%d to '%s'?"),
1015 'apply': _("apply change %d/%d to '%s'?"),
1016 'discard': _("discard change %d/%d to '%s'?"),
1016 'discard': _("discard change %d/%d to '%s'?"),
1017 'record': _("record change %d/%d to '%s'?"),
1017 'record': _("record change %d/%d to '%s'?"),
1018 },
1018 },
1019 'single': {
1019 'single': {
1020 'apply': _("apply this change to '%s'?"),
1020 'apply': _("apply this change to '%s'?"),
1021 'discard': _("discard this change to '%s'?"),
1021 'discard': _("discard this change to '%s'?"),
1022 'record': _("record this change to '%s'?"),
1022 'record': _("record this change to '%s'?"),
1023 },
1023 },
1024 'help': {
1024 'help': {
1025 'apply': _('[Ynesfdaq?]'
1025 'apply': _('[Ynesfdaq?]'
1026 '$$ &Yes, apply this change'
1026 '$$ &Yes, apply this change'
1027 '$$ &No, skip this change'
1027 '$$ &No, skip this change'
1028 '$$ &Edit this change manually'
1028 '$$ &Edit this change manually'
1029 '$$ &Skip remaining changes to this file'
1029 '$$ &Skip remaining changes to this file'
1030 '$$ Apply remaining changes to this &file'
1030 '$$ Apply remaining changes to this &file'
1031 '$$ &Done, skip remaining changes and files'
1031 '$$ &Done, skip remaining changes and files'
1032 '$$ Apply &all changes to all remaining files'
1032 '$$ Apply &all changes to all remaining files'
1033 '$$ &Quit, applying no changes'
1033 '$$ &Quit, applying no changes'
1034 '$$ &? (display help)'),
1034 '$$ &? (display help)'),
1035 'discard': _('[Ynesfdaq?]'
1035 'discard': _('[Ynesfdaq?]'
1036 '$$ &Yes, discard this change'
1036 '$$ &Yes, discard this change'
1037 '$$ &No, skip this change'
1037 '$$ &No, skip this change'
1038 '$$ &Edit this change manually'
1038 '$$ &Edit this change manually'
1039 '$$ &Skip remaining changes to this file'
1039 '$$ &Skip remaining changes to this file'
1040 '$$ Discard remaining changes to this &file'
1040 '$$ Discard remaining changes to this &file'
1041 '$$ &Done, skip remaining changes and files'
1041 '$$ &Done, skip remaining changes and files'
1042 '$$ Discard &all changes to all remaining files'
1042 '$$ Discard &all changes to all remaining files'
1043 '$$ &Quit, discarding no changes'
1043 '$$ &Quit, discarding no changes'
1044 '$$ &? (display help)'),
1044 '$$ &? (display help)'),
1045 'record': _('[Ynesfdaq?]'
1045 'record': _('[Ynesfdaq?]'
1046 '$$ &Yes, record this change'
1046 '$$ &Yes, record this change'
1047 '$$ &No, skip this change'
1047 '$$ &No, skip this change'
1048 '$$ &Edit this change manually'
1048 '$$ &Edit this change manually'
1049 '$$ &Skip remaining changes to this file'
1049 '$$ &Skip remaining changes to this file'
1050 '$$ Record remaining changes to this &file'
1050 '$$ Record remaining changes to this &file'
1051 '$$ &Done, skip remaining changes and files'
1051 '$$ &Done, skip remaining changes and files'
1052 '$$ Record &all changes to all remaining files'
1052 '$$ Record &all changes to all remaining files'
1053 '$$ &Quit, recording no changes'
1053 '$$ &Quit, recording no changes'
1054 '$$ &? (display help)'),
1054 '$$ &? (display help)'),
1055 }
1055 }
1056 }
1056 }
1057
1057
1058 def filterpatch(ui, headers, operation=None):
1058 def filterpatch(ui, headers, operation=None):
1059 """Interactively filter patch chunks into applied-only chunks"""
1059 """Interactively filter patch chunks into applied-only chunks"""
1060 messages = getmessages()
1060 messages = getmessages()
1061
1061
1062 if operation is None:
1062 if operation is None:
1063 operation = 'record'
1063 operation = 'record'
1064
1064
1065 def prompt(skipfile, skipall, query, chunk):
1065 def prompt(skipfile, skipall, query, chunk):
1066 """prompt query, and process base inputs
1066 """prompt query, and process base inputs
1067
1067
1068 - y/n for the rest of file
1068 - y/n for the rest of file
1069 - y/n for the rest
1069 - y/n for the rest
1070 - ? (help)
1070 - ? (help)
1071 - q (quit)
1071 - q (quit)
1072
1072
1073 Return True/False and possibly updated skipfile and skipall.
1073 Return True/False and possibly updated skipfile and skipall.
1074 """
1074 """
1075 newpatches = None
1075 newpatches = None
1076 if skipall is not None:
1076 if skipall is not None:
1077 return skipall, skipfile, skipall, newpatches
1077 return skipall, skipfile, skipall, newpatches
1078 if skipfile is not None:
1078 if skipfile is not None:
1079 return skipfile, skipfile, skipall, newpatches
1079 return skipfile, skipfile, skipall, newpatches
1080 while True:
1080 while True:
1081 resps = messages['help'][operation]
1081 resps = messages['help'][operation]
1082 r = ui.promptchoice("%s %s" % (query, resps))
1082 r = ui.promptchoice("%s %s" % (query, resps))
1083 ui.write("\n")
1083 ui.write("\n")
1084 if r == 8: # ?
1084 if r == 8: # ?
1085 for c, t in ui.extractchoices(resps)[1]:
1085 for c, t in ui.extractchoices(resps)[1]:
1086 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1087 continue
1087 continue
1088 elif r == 0: # yes
1088 elif r == 0: # yes
1089 ret = True
1089 ret = True
1090 elif r == 1: # no
1090 elif r == 1: # no
1091 ret = False
1091 ret = False
1092 elif r == 2: # Edit patch
1092 elif r == 2: # Edit patch
1093 if chunk is None:
1093 if chunk is None:
1094 ui.write(_('cannot edit patch for whole file'))
1094 ui.write(_('cannot edit patch for whole file'))
1095 ui.write("\n")
1095 ui.write("\n")
1096 continue
1096 continue
1097 if chunk.header.binary():
1097 if chunk.header.binary():
1098 ui.write(_('cannot edit patch for binary file'))
1098 ui.write(_('cannot edit patch for binary file'))
1099 ui.write("\n")
1099 ui.write("\n")
1100 continue
1100 continue
1101 # Patch comment based on the Git one (based on comment at end of
1101 # Patch comment based on the Git one (based on comment at end of
1102 # https://mercurial-scm.org/wiki/RecordExtension)
1102 # https://mercurial-scm.org/wiki/RecordExtension)
1103 phelp = '---' + _("""
1103 phelp = '---' + _("""
1104 To remove '-' lines, make them ' ' lines (context).
1104 To remove '-' lines, make them ' ' lines (context).
1105 To remove '+' lines, delete them.
1105 To remove '+' lines, delete them.
1106 Lines starting with # will be removed from the patch.
1106 Lines starting with # will be removed from the patch.
1107
1107
1108 If the patch applies cleanly, the edited hunk will immediately be
1108 If the patch applies cleanly, the edited hunk will immediately be
1109 added to the record list. If it does not apply cleanly, a rejects
1109 added to the record list. If it does not apply cleanly, a rejects
1110 file will be generated: you can use that when you try again. If
1110 file will be generated: you can use that when you try again. If
1111 all lines of the hunk are removed, then the edit is aborted and
1111 all lines of the hunk are removed, then the edit is aborted and
1112 the hunk is left unchanged.
1112 the hunk is left unchanged.
1113 """)
1113 """)
1114 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1115 suffix=".diff")
1115 suffix=".diff")
1116 ncpatchfp = None
1116 ncpatchfp = None
1117 try:
1117 try:
1118 # Write the initial patch
1118 # Write the initial patch
1119 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1120 chunk.header.write(f)
1120 chunk.header.write(f)
1121 chunk.write(f)
1121 chunk.write(f)
1122 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1123 f.close()
1123 f.close()
1124 # Start the editor and wait for it to complete
1124 # Start the editor and wait for it to complete
1125 editor = ui.geteditor()
1125 editor = ui.geteditor()
1126 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1127 environ={'HGUSER': ui.username()},
1127 environ={'HGUSER': ui.username()},
1128 blockedtag='filterpatch')
1128 blockedtag='filterpatch')
1129 if ret != 0:
1129 if ret != 0:
1130 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 ui.warn(_("editor exited with exit code %d\n") % ret)
1131 continue
1131 continue
1132 # Remove comment lines
1132 # Remove comment lines
1133 patchfp = open(patchfn, r'rb')
1133 patchfp = open(patchfn, r'rb')
1134 ncpatchfp = stringio()
1134 ncpatchfp = stringio()
1135 for line in util.iterfile(patchfp):
1135 for line in util.iterfile(patchfp):
1136 line = util.fromnativeeol(line)
1136 line = util.fromnativeeol(line)
1137 if not line.startswith('#'):
1137 if not line.startswith('#'):
1138 ncpatchfp.write(line)
1138 ncpatchfp.write(line)
1139 patchfp.close()
1139 patchfp.close()
1140 ncpatchfp.seek(0)
1140 ncpatchfp.seek(0)
1141 newpatches = parsepatch(ncpatchfp)
1141 newpatches = parsepatch(ncpatchfp)
1142 finally:
1142 finally:
1143 os.unlink(patchfn)
1143 os.unlink(patchfn)
1144 del ncpatchfp
1144 del ncpatchfp
1145 # Signal that the chunk shouldn't be applied as-is, but
1145 # Signal that the chunk shouldn't be applied as-is, but
1146 # provide the new patch to be used instead.
1146 # provide the new patch to be used instead.
1147 ret = False
1147 ret = False
1148 elif r == 3: # Skip
1148 elif r == 3: # Skip
1149 ret = skipfile = False
1149 ret = skipfile = False
1150 elif r == 4: # file (Record remaining)
1150 elif r == 4: # file (Record remaining)
1151 ret = skipfile = True
1151 ret = skipfile = True
1152 elif r == 5: # done, skip remaining
1152 elif r == 5: # done, skip remaining
1153 ret = skipall = False
1153 ret = skipall = False
1154 elif r == 6: # all
1154 elif r == 6: # all
1155 ret = skipall = True
1155 ret = skipall = True
1156 elif r == 7: # quit
1156 elif r == 7: # quit
1157 raise error.Abort(_('user quit'))
1157 raise error.Abort(_('user quit'))
1158 return ret, skipfile, skipall, newpatches
1158 return ret, skipfile, skipall, newpatches
1159
1159
1160 seen = set()
1160 seen = set()
1161 applied = {} # 'filename' -> [] of chunks
1161 applied = {} # 'filename' -> [] of chunks
1162 skipfile, skipall = None, None
1162 skipfile, skipall = None, None
1163 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 pos, total = 1, sum(len(h.hunks) for h in headers)
1164 for h in headers:
1164 for h in headers:
1165 pos += len(h.hunks)
1165 pos += len(h.hunks)
1166 skipfile = None
1166 skipfile = None
1167 fixoffset = 0
1167 fixoffset = 0
1168 hdr = ''.join(h.header)
1168 hdr = ''.join(h.header)
1169 if hdr in seen:
1169 if hdr in seen:
1170 continue
1170 continue
1171 seen.add(hdr)
1171 seen.add(hdr)
1172 if skipall is None:
1172 if skipall is None:
1173 h.pretty(ui)
1173 h.pretty(ui)
1174 msg = (_('examine changes to %s?') %
1174 msg = (_('examine changes to %s?') %
1175 _(' and ').join("'%s'" % f for f in h.files()))
1175 _(' and ').join("'%s'" % f for f in h.files()))
1176 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1177 if not r:
1177 if not r:
1178 continue
1178 continue
1179 applied[h.filename()] = [h]
1179 applied[h.filename()] = [h]
1180 if h.allhunks():
1180 if h.allhunks():
1181 applied[h.filename()] += h.hunks
1181 applied[h.filename()] += h.hunks
1182 continue
1182 continue
1183 for i, chunk in enumerate(h.hunks):
1183 for i, chunk in enumerate(h.hunks):
1184 if skipfile is None and skipall is None:
1184 if skipfile is None and skipall is None:
1185 chunk.pretty(ui)
1185 chunk.pretty(ui)
1186 if total == 1:
1186 if total == 1:
1187 msg = messages['single'][operation] % chunk.filename()
1187 msg = messages['single'][operation] % chunk.filename()
1188 else:
1188 else:
1189 idx = pos - len(h.hunks) + i
1189 idx = pos - len(h.hunks) + i
1190 msg = messages['multiple'][operation] % (idx, total,
1190 msg = messages['multiple'][operation] % (idx, total,
1191 chunk.filename())
1191 chunk.filename())
1192 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 r, skipfile, skipall, newpatches = prompt(skipfile,
1193 skipall, msg, chunk)
1193 skipall, msg, chunk)
1194 if r:
1194 if r:
1195 if fixoffset:
1195 if fixoffset:
1196 chunk = copy.copy(chunk)
1196 chunk = copy.copy(chunk)
1197 chunk.toline += fixoffset
1197 chunk.toline += fixoffset
1198 applied[chunk.filename()].append(chunk)
1198 applied[chunk.filename()].append(chunk)
1199 elif newpatches is not None:
1199 elif newpatches is not None:
1200 for newpatch in newpatches:
1200 for newpatch in newpatches:
1201 for newhunk in newpatch.hunks:
1201 for newhunk in newpatch.hunks:
1202 if fixoffset:
1202 if fixoffset:
1203 newhunk.toline += fixoffset
1203 newhunk.toline += fixoffset
1204 applied[newhunk.filename()].append(newhunk)
1204 applied[newhunk.filename()].append(newhunk)
1205 else:
1205 else:
1206 fixoffset += chunk.removed - chunk.added
1206 fixoffset += chunk.removed - chunk.added
1207 return (sum([h for h in applied.itervalues()
1207 return (sum([h for h in applied.itervalues()
1208 if h[0].special() or len(h) > 1], []), {})
1208 if h[0].special() or len(h) > 1], []), {})
1209 class hunk(object):
1209 class hunk(object):
1210 def __init__(self, desc, num, lr, context):
1210 def __init__(self, desc, num, lr, context):
1211 self.number = num
1211 self.number = num
1212 self.desc = desc
1212 self.desc = desc
1213 self.hunk = [desc]
1213 self.hunk = [desc]
1214 self.a = []
1214 self.a = []
1215 self.b = []
1215 self.b = []
1216 self.starta = self.lena = None
1216 self.starta = self.lena = None
1217 self.startb = self.lenb = None
1217 self.startb = self.lenb = None
1218 if lr is not None:
1218 if lr is not None:
1219 if context:
1219 if context:
1220 self.read_context_hunk(lr)
1220 self.read_context_hunk(lr)
1221 else:
1221 else:
1222 self.read_unified_hunk(lr)
1222 self.read_unified_hunk(lr)
1223
1223
1224 def getnormalized(self):
1224 def getnormalized(self):
1225 """Return a copy with line endings normalized to LF."""
1225 """Return a copy with line endings normalized to LF."""
1226
1226
1227 def normalize(lines):
1227 def normalize(lines):
1228 nlines = []
1228 nlines = []
1229 for line in lines:
1229 for line in lines:
1230 if line.endswith('\r\n'):
1230 if line.endswith('\r\n'):
1231 line = line[:-2] + '\n'
1231 line = line[:-2] + '\n'
1232 nlines.append(line)
1232 nlines.append(line)
1233 return nlines
1233 return nlines
1234
1234
1235 # Dummy object, it is rebuilt manually
1235 # Dummy object, it is rebuilt manually
1236 nh = hunk(self.desc, self.number, None, None)
1236 nh = hunk(self.desc, self.number, None, None)
1237 nh.number = self.number
1237 nh.number = self.number
1238 nh.desc = self.desc
1238 nh.desc = self.desc
1239 nh.hunk = self.hunk
1239 nh.hunk = self.hunk
1240 nh.a = normalize(self.a)
1240 nh.a = normalize(self.a)
1241 nh.b = normalize(self.b)
1241 nh.b = normalize(self.b)
1242 nh.starta = self.starta
1242 nh.starta = self.starta
1243 nh.startb = self.startb
1243 nh.startb = self.startb
1244 nh.lena = self.lena
1244 nh.lena = self.lena
1245 nh.lenb = self.lenb
1245 nh.lenb = self.lenb
1246 return nh
1246 return nh
1247
1247
1248 def read_unified_hunk(self, lr):
1248 def read_unified_hunk(self, lr):
1249 m = unidesc.match(self.desc)
1249 m = unidesc.match(self.desc)
1250 if not m:
1250 if not m:
1251 raise PatchError(_("bad hunk #%d") % self.number)
1251 raise PatchError(_("bad hunk #%d") % self.number)
1252 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 self.starta, self.lena, self.startb, self.lenb = m.groups()
1253 if self.lena is None:
1253 if self.lena is None:
1254 self.lena = 1
1254 self.lena = 1
1255 else:
1255 else:
1256 self.lena = int(self.lena)
1256 self.lena = int(self.lena)
1257 if self.lenb is None:
1257 if self.lenb is None:
1258 self.lenb = 1
1258 self.lenb = 1
1259 else:
1259 else:
1260 self.lenb = int(self.lenb)
1260 self.lenb = int(self.lenb)
1261 self.starta = int(self.starta)
1261 self.starta = int(self.starta)
1262 self.startb = int(self.startb)
1262 self.startb = int(self.startb)
1263 try:
1263 try:
1264 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1265 self.a, self.b)
1265 self.a, self.b)
1266 except error.ParseError as e:
1266 except error.ParseError as e:
1267 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1268 # if we hit eof before finishing out the hunk, the last line will
1268 # if we hit eof before finishing out the hunk, the last line will
1269 # be zero length. Lets try to fix it up.
1269 # be zero length. Lets try to fix it up.
1270 while len(self.hunk[-1]) == 0:
1270 while len(self.hunk[-1]) == 0:
1271 del self.hunk[-1]
1271 del self.hunk[-1]
1272 del self.a[-1]
1272 del self.a[-1]
1273 del self.b[-1]
1273 del self.b[-1]
1274 self.lena -= 1
1274 self.lena -= 1
1275 self.lenb -= 1
1275 self.lenb -= 1
1276 self._fixnewline(lr)
1276 self._fixnewline(lr)
1277
1277
1278 def read_context_hunk(self, lr):
1278 def read_context_hunk(self, lr):
1279 self.desc = lr.readline()
1279 self.desc = lr.readline()
1280 m = contextdesc.match(self.desc)
1280 m = contextdesc.match(self.desc)
1281 if not m:
1281 if not m:
1282 raise PatchError(_("bad hunk #%d") % self.number)
1282 raise PatchError(_("bad hunk #%d") % self.number)
1283 self.starta, aend = m.groups()
1283 self.starta, aend = m.groups()
1284 self.starta = int(self.starta)
1284 self.starta = int(self.starta)
1285 if aend is None:
1285 if aend is None:
1286 aend = self.starta
1286 aend = self.starta
1287 self.lena = int(aend) - self.starta
1287 self.lena = int(aend) - self.starta
1288 if self.starta:
1288 if self.starta:
1289 self.lena += 1
1289 self.lena += 1
1290 for x in pycompat.xrange(self.lena):
1290 for x in pycompat.xrange(self.lena):
1291 l = lr.readline()
1291 l = lr.readline()
1292 if l.startswith('---'):
1292 if l.startswith('---'):
1293 # lines addition, old block is empty
1293 # lines addition, old block is empty
1294 lr.push(l)
1294 lr.push(l)
1295 break
1295 break
1296 s = l[2:]
1296 s = l[2:]
1297 if l.startswith('- ') or l.startswith('! '):
1297 if l.startswith('- ') or l.startswith('! '):
1298 u = '-' + s
1298 u = '-' + s
1299 elif l.startswith(' '):
1299 elif l.startswith(' '):
1300 u = ' ' + s
1300 u = ' ' + s
1301 else:
1301 else:
1302 raise PatchError(_("bad hunk #%d old text line %d") %
1302 raise PatchError(_("bad hunk #%d old text line %d") %
1303 (self.number, x))
1303 (self.number, x))
1304 self.a.append(u)
1304 self.a.append(u)
1305 self.hunk.append(u)
1305 self.hunk.append(u)
1306
1306
1307 l = lr.readline()
1307 l = lr.readline()
1308 if l.startswith(br'\ '):
1308 if l.startswith(br'\ '):
1309 s = self.a[-1][:-1]
1309 s = self.a[-1][:-1]
1310 self.a[-1] = s
1310 self.a[-1] = s
1311 self.hunk[-1] = s
1311 self.hunk[-1] = s
1312 l = lr.readline()
1312 l = lr.readline()
1313 m = contextdesc.match(l)
1313 m = contextdesc.match(l)
1314 if not m:
1314 if not m:
1315 raise PatchError(_("bad hunk #%d") % self.number)
1315 raise PatchError(_("bad hunk #%d") % self.number)
1316 self.startb, bend = m.groups()
1316 self.startb, bend = m.groups()
1317 self.startb = int(self.startb)
1317 self.startb = int(self.startb)
1318 if bend is None:
1318 if bend is None:
1319 bend = self.startb
1319 bend = self.startb
1320 self.lenb = int(bend) - self.startb
1320 self.lenb = int(bend) - self.startb
1321 if self.startb:
1321 if self.startb:
1322 self.lenb += 1
1322 self.lenb += 1
1323 hunki = 1
1323 hunki = 1
1324 for x in pycompat.xrange(self.lenb):
1324 for x in pycompat.xrange(self.lenb):
1325 l = lr.readline()
1325 l = lr.readline()
1326 if l.startswith(br'\ '):
1326 if l.startswith(br'\ '):
1327 # XXX: the only way to hit this is with an invalid line range.
1327 # XXX: the only way to hit this is with an invalid line range.
1328 # The no-eol marker is not counted in the line range, but I
1328 # The no-eol marker is not counted in the line range, but I
1329 # guess there are diff(1) out there which behave differently.
1329 # guess there are diff(1) out there which behave differently.
1330 s = self.b[-1][:-1]
1330 s = self.b[-1][:-1]
1331 self.b[-1] = s
1331 self.b[-1] = s
1332 self.hunk[hunki - 1] = s
1332 self.hunk[hunki - 1] = s
1333 continue
1333 continue
1334 if not l:
1334 if not l:
1335 # line deletions, new block is empty and we hit EOF
1335 # line deletions, new block is empty and we hit EOF
1336 lr.push(l)
1336 lr.push(l)
1337 break
1337 break
1338 s = l[2:]
1338 s = l[2:]
1339 if l.startswith('+ ') or l.startswith('! '):
1339 if l.startswith('+ ') or l.startswith('! '):
1340 u = '+' + s
1340 u = '+' + s
1341 elif l.startswith(' '):
1341 elif l.startswith(' '):
1342 u = ' ' + s
1342 u = ' ' + s
1343 elif len(self.b) == 0:
1343 elif len(self.b) == 0:
1344 # line deletions, new block is empty
1344 # line deletions, new block is empty
1345 lr.push(l)
1345 lr.push(l)
1346 break
1346 break
1347 else:
1347 else:
1348 raise PatchError(_("bad hunk #%d old text line %d") %
1348 raise PatchError(_("bad hunk #%d old text line %d") %
1349 (self.number, x))
1349 (self.number, x))
1350 self.b.append(s)
1350 self.b.append(s)
1351 while True:
1351 while True:
1352 if hunki >= len(self.hunk):
1352 if hunki >= len(self.hunk):
1353 h = ""
1353 h = ""
1354 else:
1354 else:
1355 h = self.hunk[hunki]
1355 h = self.hunk[hunki]
1356 hunki += 1
1356 hunki += 1
1357 if h == u:
1357 if h == u:
1358 break
1358 break
1359 elif h.startswith('-'):
1359 elif h.startswith('-'):
1360 continue
1360 continue
1361 else:
1361 else:
1362 self.hunk.insert(hunki - 1, u)
1362 self.hunk.insert(hunki - 1, u)
1363 break
1363 break
1364
1364
1365 if not self.a:
1365 if not self.a:
1366 # this happens when lines were only added to the hunk
1366 # this happens when lines were only added to the hunk
1367 for x in self.hunk:
1367 for x in self.hunk:
1368 if x.startswith('-') or x.startswith(' '):
1368 if x.startswith('-') or x.startswith(' '):
1369 self.a.append(x)
1369 self.a.append(x)
1370 if not self.b:
1370 if not self.b:
1371 # this happens when lines were only deleted from the hunk
1371 # this happens when lines were only deleted from the hunk
1372 for x in self.hunk:
1372 for x in self.hunk:
1373 if x.startswith('+') or x.startswith(' '):
1373 if x.startswith('+') or x.startswith(' '):
1374 self.b.append(x[1:])
1374 self.b.append(x[1:])
1375 # @@ -start,len +start,len @@
1375 # @@ -start,len +start,len @@
1376 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1377 self.startb, self.lenb)
1377 self.startb, self.lenb)
1378 self.hunk[0] = self.desc
1378 self.hunk[0] = self.desc
1379 self._fixnewline(lr)
1379 self._fixnewline(lr)
1380
1380
1381 def _fixnewline(self, lr):
1381 def _fixnewline(self, lr):
1382 l = lr.readline()
1382 l = lr.readline()
1383 if l.startswith(br'\ '):
1383 if l.startswith(br'\ '):
1384 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 diffhelper.fixnewline(self.hunk, self.a, self.b)
1385 else:
1385 else:
1386 lr.push(l)
1386 lr.push(l)
1387
1387
1388 def complete(self):
1388 def complete(self):
1389 return len(self.a) == self.lena and len(self.b) == self.lenb
1389 return len(self.a) == self.lena and len(self.b) == self.lenb
1390
1390
1391 def _fuzzit(self, old, new, fuzz, toponly):
1391 def _fuzzit(self, old, new, fuzz, toponly):
1392 # this removes context lines from the top and bottom of list 'l'. It
1392 # this removes context lines from the top and bottom of list 'l'. It
1393 # checks the hunk to make sure only context lines are removed, and then
1393 # checks the hunk to make sure only context lines are removed, and then
1394 # returns a new shortened list of lines.
1394 # returns a new shortened list of lines.
1395 fuzz = min(fuzz, len(old))
1395 fuzz = min(fuzz, len(old))
1396 if fuzz:
1396 if fuzz:
1397 top = 0
1397 top = 0
1398 bot = 0
1398 bot = 0
1399 hlen = len(self.hunk)
1399 hlen = len(self.hunk)
1400 for x in pycompat.xrange(hlen - 1):
1400 for x in pycompat.xrange(hlen - 1):
1401 # the hunk starts with the @@ line, so use x+1
1401 # the hunk starts with the @@ line, so use x+1
1402 if self.hunk[x + 1].startswith(' '):
1402 if self.hunk[x + 1].startswith(' '):
1403 top += 1
1403 top += 1
1404 else:
1404 else:
1405 break
1405 break
1406 if not toponly:
1406 if not toponly:
1407 for x in pycompat.xrange(hlen - 1):
1407 for x in pycompat.xrange(hlen - 1):
1408 if self.hunk[hlen - bot - 1].startswith(' '):
1408 if self.hunk[hlen - bot - 1].startswith(' '):
1409 bot += 1
1409 bot += 1
1410 else:
1410 else:
1411 break
1411 break
1412
1412
1413 bot = min(fuzz, bot)
1413 bot = min(fuzz, bot)
1414 top = min(fuzz, top)
1414 top = min(fuzz, top)
1415 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 return old[top:len(old) - bot], new[top:len(new) - bot], top
1416 return old, new, 0
1416 return old, new, 0
1417
1417
1418 def fuzzit(self, fuzz, toponly):
1418 def fuzzit(self, fuzz, toponly):
1419 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1420 oldstart = self.starta + top
1420 oldstart = self.starta + top
1421 newstart = self.startb + top
1421 newstart = self.startb + top
1422 # zero length hunk ranges already have their start decremented
1422 # zero length hunk ranges already have their start decremented
1423 if self.lena and oldstart > 0:
1423 if self.lena and oldstart > 0:
1424 oldstart -= 1
1424 oldstart -= 1
1425 if self.lenb and newstart > 0:
1425 if self.lenb and newstart > 0:
1426 newstart -= 1
1426 newstart -= 1
1427 return old, oldstart, new, newstart
1427 return old, oldstart, new, newstart
1428
1428
1429 class binhunk(object):
1429 class binhunk(object):
1430 'A binary patch file.'
1430 'A binary patch file.'
1431 def __init__(self, lr, fname):
1431 def __init__(self, lr, fname):
1432 self.text = None
1432 self.text = None
1433 self.delta = False
1433 self.delta = False
1434 self.hunk = ['GIT binary patch\n']
1434 self.hunk = ['GIT binary patch\n']
1435 self._fname = fname
1435 self._fname = fname
1436 self._read(lr)
1436 self._read(lr)
1437
1437
1438 def complete(self):
1438 def complete(self):
1439 return self.text is not None
1439 return self.text is not None
1440
1440
1441 def new(self, lines):
1441 def new(self, lines):
1442 if self.delta:
1442 if self.delta:
1443 return [applybindelta(self.text, ''.join(lines))]
1443 return [applybindelta(self.text, ''.join(lines))]
1444 return [self.text]
1444 return [self.text]
1445
1445
1446 def _read(self, lr):
1446 def _read(self, lr):
1447 def getline(lr, hunk):
1447 def getline(lr, hunk):
1448 l = lr.readline()
1448 l = lr.readline()
1449 hunk.append(l)
1449 hunk.append(l)
1450 return l.rstrip('\r\n')
1450 return l.rstrip('\r\n')
1451
1451
1452 while True:
1452 while True:
1453 line = getline(lr, self.hunk)
1453 line = getline(lr, self.hunk)
1454 if not line:
1454 if not line:
1455 raise PatchError(_('could not extract "%s" binary data')
1455 raise PatchError(_('could not extract "%s" binary data')
1456 % self._fname)
1456 % self._fname)
1457 if line.startswith('literal '):
1457 if line.startswith('literal '):
1458 size = int(line[8:].rstrip())
1458 size = int(line[8:].rstrip())
1459 break
1459 break
1460 if line.startswith('delta '):
1460 if line.startswith('delta '):
1461 size = int(line[6:].rstrip())
1461 size = int(line[6:].rstrip())
1462 self.delta = True
1462 self.delta = True
1463 break
1463 break
1464 dec = []
1464 dec = []
1465 line = getline(lr, self.hunk)
1465 line = getline(lr, self.hunk)
1466 while len(line) > 1:
1466 while len(line) > 1:
1467 l = line[0:1]
1467 l = line[0:1]
1468 if l <= 'Z' and l >= 'A':
1468 if l <= 'Z' and l >= 'A':
1469 l = ord(l) - ord('A') + 1
1469 l = ord(l) - ord('A') + 1
1470 else:
1470 else:
1471 l = ord(l) - ord('a') + 27
1471 l = ord(l) - ord('a') + 27
1472 try:
1472 try:
1473 dec.append(util.b85decode(line[1:])[:l])
1473 dec.append(util.b85decode(line[1:])[:l])
1474 except ValueError as e:
1474 except ValueError as e:
1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1476 % (self._fname, stringutil.forcebytestr(e)))
1476 % (self._fname, stringutil.forcebytestr(e)))
1477 line = getline(lr, self.hunk)
1477 line = getline(lr, self.hunk)
1478 text = zlib.decompress(''.join(dec))
1478 text = zlib.decompress(''.join(dec))
1479 if len(text) != size:
1479 if len(text) != size:
1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1481 % (self._fname, len(text), size))
1481 % (self._fname, len(text), size))
1482 self.text = text
1482 self.text = text
1483
1483
1484 def parsefilename(str):
1484 def parsefilename(str):
1485 # --- filename \t|space stuff
1485 # --- filename \t|space stuff
1486 s = str[4:].rstrip('\r\n')
1486 s = str[4:].rstrip('\r\n')
1487 i = s.find('\t')
1487 i = s.find('\t')
1488 if i < 0:
1488 if i < 0:
1489 i = s.find(' ')
1489 i = s.find(' ')
1490 if i < 0:
1490 if i < 0:
1491 return s
1491 return s
1492 return s[:i]
1492 return s[:i]
1493
1493
1494 def reversehunks(hunks):
1494 def reversehunks(hunks):
1495 '''reverse the signs in the hunks given as argument
1495 '''reverse the signs in the hunks given as argument
1496
1496
1497 This function operates on hunks coming out of patch.filterpatch, that is
1497 This function operates on hunks coming out of patch.filterpatch, that is
1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1499
1499
1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1501 ... --- a/folder1/g
1501 ... --- a/folder1/g
1502 ... +++ b/folder1/g
1502 ... +++ b/folder1/g
1503 ... @@ -1,7 +1,7 @@
1503 ... @@ -1,7 +1,7 @@
1504 ... +firstline
1504 ... +firstline
1505 ... c
1505 ... c
1506 ... 1
1506 ... 1
1507 ... 2
1507 ... 2
1508 ... + 3
1508 ... + 3
1509 ... -4
1509 ... -4
1510 ... 5
1510 ... 5
1511 ... d
1511 ... d
1512 ... +lastline"""
1512 ... +lastline"""
1513 >>> hunks = parsepatch([rawpatch])
1513 >>> hunks = parsepatch([rawpatch])
1514 >>> hunkscomingfromfilterpatch = []
1514 >>> hunkscomingfromfilterpatch = []
1515 >>> for h in hunks:
1515 >>> for h in hunks:
1516 ... hunkscomingfromfilterpatch.append(h)
1516 ... hunkscomingfromfilterpatch.append(h)
1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1518
1518
1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1520 >>> from . import util
1520 >>> from . import util
1521 >>> fp = util.stringio()
1521 >>> fp = util.stringio()
1522 >>> for c in reversedhunks:
1522 >>> for c in reversedhunks:
1523 ... c.write(fp)
1523 ... c.write(fp)
1524 >>> fp.seek(0) or None
1524 >>> fp.seek(0) or None
1525 >>> reversedpatch = fp.read()
1525 >>> reversedpatch = fp.read()
1526 >>> print(pycompat.sysstr(reversedpatch))
1526 >>> print(pycompat.sysstr(reversedpatch))
1527 diff --git a/folder1/g b/folder1/g
1527 diff --git a/folder1/g b/folder1/g
1528 --- a/folder1/g
1528 --- a/folder1/g
1529 +++ b/folder1/g
1529 +++ b/folder1/g
1530 @@ -1,4 +1,3 @@
1530 @@ -1,4 +1,3 @@
1531 -firstline
1531 -firstline
1532 c
1532 c
1533 1
1533 1
1534 2
1534 2
1535 @@ -2,6 +1,6 @@
1535 @@ -2,6 +1,6 @@
1536 c
1536 c
1537 1
1537 1
1538 2
1538 2
1539 - 3
1539 - 3
1540 +4
1540 +4
1541 5
1541 5
1542 d
1542 d
1543 @@ -6,3 +5,2 @@
1543 @@ -6,3 +5,2 @@
1544 5
1544 5
1545 d
1545 d
1546 -lastline
1546 -lastline
1547
1547
1548 '''
1548 '''
1549
1549
1550 newhunks = []
1550 newhunks = []
1551 for c in hunks:
1551 for c in hunks:
1552 if util.safehasattr(c, 'reversehunk'):
1552 if util.safehasattr(c, 'reversehunk'):
1553 c = c.reversehunk()
1553 c = c.reversehunk()
1554 newhunks.append(c)
1554 newhunks.append(c)
1555 return newhunks
1555 return newhunks
1556
1556
1557 def parsepatch(originalchunks, maxcontext=None):
1557 def parsepatch(originalchunks, maxcontext=None):
1558 """patch -> [] of headers -> [] of hunks
1558 """patch -> [] of headers -> [] of hunks
1559
1559
1560 If maxcontext is not None, trim context lines if necessary.
1560 If maxcontext is not None, trim context lines if necessary.
1561
1561
1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1563 ... --- a/folder1/g
1563 ... --- a/folder1/g
1564 ... +++ b/folder1/g
1564 ... +++ b/folder1/g
1565 ... @@ -1,8 +1,10 @@
1565 ... @@ -1,8 +1,10 @@
1566 ... 1
1566 ... 1
1567 ... 2
1567 ... 2
1568 ... -3
1568 ... -3
1569 ... 4
1569 ... 4
1570 ... 5
1570 ... 5
1571 ... 6
1571 ... 6
1572 ... +6.1
1572 ... +6.1
1573 ... +6.2
1573 ... +6.2
1574 ... 7
1574 ... 7
1575 ... 8
1575 ... 8
1576 ... +9'''
1576 ... +9'''
1577 >>> out = util.stringio()
1577 >>> out = util.stringio()
1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1579 >>> for header in headers:
1579 >>> for header in headers:
1580 ... header.write(out)
1580 ... header.write(out)
1581 ... for hunk in header.hunks:
1581 ... for hunk in header.hunks:
1582 ... hunk.write(out)
1582 ... hunk.write(out)
1583 >>> print(pycompat.sysstr(out.getvalue()))
1583 >>> print(pycompat.sysstr(out.getvalue()))
1584 diff --git a/folder1/g b/folder1/g
1584 diff --git a/folder1/g b/folder1/g
1585 --- a/folder1/g
1585 --- a/folder1/g
1586 +++ b/folder1/g
1586 +++ b/folder1/g
1587 @@ -2,3 +2,2 @@
1587 @@ -2,3 +2,2 @@
1588 2
1588 2
1589 -3
1589 -3
1590 4
1590 4
1591 @@ -6,2 +5,4 @@
1591 @@ -6,2 +5,4 @@
1592 6
1592 6
1593 +6.1
1593 +6.1
1594 +6.2
1594 +6.2
1595 7
1595 7
1596 @@ -8,1 +9,2 @@
1596 @@ -8,1 +9,2 @@
1597 8
1597 8
1598 +9
1598 +9
1599 """
1599 """
1600 class parser(object):
1600 class parser(object):
1601 """patch parsing state machine"""
1601 """patch parsing state machine"""
1602 def __init__(self):
1602 def __init__(self):
1603 self.fromline = 0
1603 self.fromline = 0
1604 self.toline = 0
1604 self.toline = 0
1605 self.proc = ''
1605 self.proc = ''
1606 self.header = None
1606 self.header = None
1607 self.context = []
1607 self.context = []
1608 self.before = []
1608 self.before = []
1609 self.hunk = []
1609 self.hunk = []
1610 self.headers = []
1610 self.headers = []
1611
1611
1612 def addrange(self, limits):
1612 def addrange(self, limits):
1613 self.addcontext([])
1613 self.addcontext([])
1614 fromstart, fromend, tostart, toend, proc = limits
1614 fromstart, fromend, tostart, toend, proc = limits
1615 self.fromline = int(fromstart)
1615 self.fromline = int(fromstart)
1616 self.toline = int(tostart)
1616 self.toline = int(tostart)
1617 self.proc = proc
1617 self.proc = proc
1618
1618
1619 def addcontext(self, context):
1619 def addcontext(self, context):
1620 if self.hunk:
1620 if self.hunk:
1621 h = recordhunk(self.header, self.fromline, self.toline,
1621 h = recordhunk(self.header, self.fromline, self.toline,
1622 self.proc, self.before, self.hunk, context, maxcontext)
1622 self.proc, self.before, self.hunk, context, maxcontext)
1623 self.header.hunks.append(h)
1623 self.header.hunks.append(h)
1624 self.fromline += len(self.before) + h.removed
1624 self.fromline += len(self.before) + h.removed
1625 self.toline += len(self.before) + h.added
1625 self.toline += len(self.before) + h.added
1626 self.before = []
1626 self.before = []
1627 self.hunk = []
1627 self.hunk = []
1628 self.context = context
1628 self.context = context
1629
1629
1630 def addhunk(self, hunk):
1630 def addhunk(self, hunk):
1631 if self.context:
1631 if self.context:
1632 self.before = self.context
1632 self.before = self.context
1633 self.context = []
1633 self.context = []
1634 if self.hunk:
1634 if self.hunk:
1635 self.addcontext([])
1635 self.addcontext([])
1636 self.hunk = hunk
1636 self.hunk = hunk
1637
1637
1638 def newfile(self, hdr):
1638 def newfile(self, hdr):
1639 self.addcontext([])
1639 self.addcontext([])
1640 h = header(hdr)
1640 h = header(hdr)
1641 self.headers.append(h)
1641 self.headers.append(h)
1642 self.header = h
1642 self.header = h
1643
1643
1644 def addother(self, line):
1644 def addother(self, line):
1645 pass # 'other' lines are ignored
1645 pass # 'other' lines are ignored
1646
1646
1647 def finished(self):
1647 def finished(self):
1648 self.addcontext([])
1648 self.addcontext([])
1649 return self.headers
1649 return self.headers
1650
1650
1651 transitions = {
1651 transitions = {
1652 'file': {'context': addcontext,
1652 'file': {'context': addcontext,
1653 'file': newfile,
1653 'file': newfile,
1654 'hunk': addhunk,
1654 'hunk': addhunk,
1655 'range': addrange},
1655 'range': addrange},
1656 'context': {'file': newfile,
1656 'context': {'file': newfile,
1657 'hunk': addhunk,
1657 'hunk': addhunk,
1658 'range': addrange,
1658 'range': addrange,
1659 'other': addother},
1659 'other': addother},
1660 'hunk': {'context': addcontext,
1660 'hunk': {'context': addcontext,
1661 'file': newfile,
1661 'file': newfile,
1662 'range': addrange},
1662 'range': addrange},
1663 'range': {'context': addcontext,
1663 'range': {'context': addcontext,
1664 'hunk': addhunk},
1664 'hunk': addhunk},
1665 'other': {'other': addother},
1665 'other': {'other': addother},
1666 }
1666 }
1667
1667
1668 p = parser()
1668 p = parser()
1669 fp = stringio()
1669 fp = stringio()
1670 fp.write(''.join(originalchunks))
1670 fp.write(''.join(originalchunks))
1671 fp.seek(0)
1671 fp.seek(0)
1672
1672
1673 state = 'context'
1673 state = 'context'
1674 for newstate, data in scanpatch(fp):
1674 for newstate, data in scanpatch(fp):
1675 try:
1675 try:
1676 p.transitions[state][newstate](p, data)
1676 p.transitions[state][newstate](p, data)
1677 except KeyError:
1677 except KeyError:
1678 raise PatchError('unhandled transition: %s -> %s' %
1678 raise PatchError('unhandled transition: %s -> %s' %
1679 (state, newstate))
1679 (state, newstate))
1680 state = newstate
1680 state = newstate
1681 del fp
1681 del fp
1682 return p.finished()
1682 return p.finished()
1683
1683
1684 def pathtransform(path, strip, prefix):
1684 def pathtransform(path, strip, prefix):
1685 '''turn a path from a patch into a path suitable for the repository
1685 '''turn a path from a patch into a path suitable for the repository
1686
1686
1687 prefix, if not empty, is expected to be normalized with a / at the end.
1687 prefix, if not empty, is expected to be normalized with a / at the end.
1688
1688
1689 Returns (stripped components, path in repository).
1689 Returns (stripped components, path in repository).
1690
1690
1691 >>> pathtransform(b'a/b/c', 0, b'')
1691 >>> pathtransform(b'a/b/c', 0, b'')
1692 ('', 'a/b/c')
1692 ('', 'a/b/c')
1693 >>> pathtransform(b' a/b/c ', 0, b'')
1693 >>> pathtransform(b' a/b/c ', 0, b'')
1694 ('', ' a/b/c')
1694 ('', ' a/b/c')
1695 >>> pathtransform(b' a/b/c ', 2, b'')
1695 >>> pathtransform(b' a/b/c ', 2, b'')
1696 ('a/b/', 'c')
1696 ('a/b/', 'c')
1697 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1697 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1698 ('', 'd/e/a/b/c')
1698 ('', 'd/e/a/b/c')
1699 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1699 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1700 ('a//b/', 'd/e/c')
1700 ('a//b/', 'd/e/c')
1701 >>> pathtransform(b'a/b/c', 3, b'')
1701 >>> pathtransform(b'a/b/c', 3, b'')
1702 Traceback (most recent call last):
1702 Traceback (most recent call last):
1703 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1703 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1704 '''
1704 '''
1705 pathlen = len(path)
1705 pathlen = len(path)
1706 i = 0
1706 i = 0
1707 if strip == 0:
1707 if strip == 0:
1708 return '', prefix + path.rstrip()
1708 return '', prefix + path.rstrip()
1709 count = strip
1709 count = strip
1710 while count > 0:
1710 while count > 0:
1711 i = path.find('/', i)
1711 i = path.find('/', i)
1712 if i == -1:
1712 if i == -1:
1713 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1713 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1714 (count, strip, path))
1714 (count, strip, path))
1715 i += 1
1715 i += 1
1716 # consume '//' in the path
1716 # consume '//' in the path
1717 while i < pathlen - 1 and path[i:i + 1] == '/':
1717 while i < pathlen - 1 and path[i:i + 1] == '/':
1718 i += 1
1718 i += 1
1719 count -= 1
1719 count -= 1
1720 return path[:i].lstrip(), prefix + path[i:].rstrip()
1720 return path[:i].lstrip(), prefix + path[i:].rstrip()
1721
1721
1722 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1722 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1723 nulla = afile_orig == "/dev/null"
1723 nulla = afile_orig == "/dev/null"
1724 nullb = bfile_orig == "/dev/null"
1724 nullb = bfile_orig == "/dev/null"
1725 create = nulla and hunk.starta == 0 and hunk.lena == 0
1725 create = nulla and hunk.starta == 0 and hunk.lena == 0
1726 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1726 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1727 abase, afile = pathtransform(afile_orig, strip, prefix)
1727 abase, afile = pathtransform(afile_orig, strip, prefix)
1728 gooda = not nulla and backend.exists(afile)
1728 gooda = not nulla and backend.exists(afile)
1729 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1729 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1730 if afile == bfile:
1730 if afile == bfile:
1731 goodb = gooda
1731 goodb = gooda
1732 else:
1732 else:
1733 goodb = not nullb and backend.exists(bfile)
1733 goodb = not nullb and backend.exists(bfile)
1734 missing = not goodb and not gooda and not create
1734 missing = not goodb and not gooda and not create
1735
1735
1736 # some diff programs apparently produce patches where the afile is
1736 # some diff programs apparently produce patches where the afile is
1737 # not /dev/null, but afile starts with bfile
1737 # not /dev/null, but afile starts with bfile
1738 abasedir = afile[:afile.rfind('/') + 1]
1738 abasedir = afile[:afile.rfind('/') + 1]
1739 bbasedir = bfile[:bfile.rfind('/') + 1]
1739 bbasedir = bfile[:bfile.rfind('/') + 1]
1740 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1740 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1741 and hunk.starta == 0 and hunk.lena == 0):
1741 and hunk.starta == 0 and hunk.lena == 0):
1742 create = True
1742 create = True
1743 missing = False
1743 missing = False
1744
1744
1745 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1745 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1746 # diff is between a file and its backup. In this case, the original
1746 # diff is between a file and its backup. In this case, the original
1747 # file should be patched (see original mpatch code).
1747 # file should be patched (see original mpatch code).
1748 isbackup = (abase == bbase and bfile.startswith(afile))
1748 isbackup = (abase == bbase and bfile.startswith(afile))
1749 fname = None
1749 fname = None
1750 if not missing:
1750 if not missing:
1751 if gooda and goodb:
1751 if gooda and goodb:
1752 if isbackup:
1752 if isbackup:
1753 fname = afile
1753 fname = afile
1754 else:
1754 else:
1755 fname = bfile
1755 fname = bfile
1756 elif gooda:
1756 elif gooda:
1757 fname = afile
1757 fname = afile
1758
1758
1759 if not fname:
1759 if not fname:
1760 if not nullb:
1760 if not nullb:
1761 if isbackup:
1761 if isbackup:
1762 fname = afile
1762 fname = afile
1763 else:
1763 else:
1764 fname = bfile
1764 fname = bfile
1765 elif not nulla:
1765 elif not nulla:
1766 fname = afile
1766 fname = afile
1767 else:
1767 else:
1768 raise PatchError(_("undefined source and destination files"))
1768 raise PatchError(_("undefined source and destination files"))
1769
1769
1770 gp = patchmeta(fname)
1770 gp = patchmeta(fname)
1771 if create:
1771 if create:
1772 gp.op = 'ADD'
1772 gp.op = 'ADD'
1773 elif remove:
1773 elif remove:
1774 gp.op = 'DELETE'
1774 gp.op = 'DELETE'
1775 return gp
1775 return gp
1776
1776
1777 def scanpatch(fp):
1777 def scanpatch(fp):
1778 """like patch.iterhunks, but yield different events
1778 """like patch.iterhunks, but yield different events
1779
1779
1780 - ('file', [header_lines + fromfile + tofile])
1780 - ('file', [header_lines + fromfile + tofile])
1781 - ('context', [context_lines])
1781 - ('context', [context_lines])
1782 - ('hunk', [hunk_lines])
1782 - ('hunk', [hunk_lines])
1783 - ('range', (-start,len, +start,len, proc))
1783 - ('range', (-start,len, +start,len, proc))
1784 """
1784 """
1785 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1785 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1786 lr = linereader(fp)
1786 lr = linereader(fp)
1787
1787
1788 def scanwhile(first, p):
1788 def scanwhile(first, p):
1789 """scan lr while predicate holds"""
1789 """scan lr while predicate holds"""
1790 lines = [first]
1790 lines = [first]
1791 for line in iter(lr.readline, ''):
1791 for line in iter(lr.readline, ''):
1792 if p(line):
1792 if p(line):
1793 lines.append(line)
1793 lines.append(line)
1794 else:
1794 else:
1795 lr.push(line)
1795 lr.push(line)
1796 break
1796 break
1797 return lines
1797 return lines
1798
1798
1799 for line in iter(lr.readline, ''):
1799 for line in iter(lr.readline, ''):
1800 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1800 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1801 def notheader(line):
1801 def notheader(line):
1802 s = line.split(None, 1)
1802 s = line.split(None, 1)
1803 return not s or s[0] not in ('---', 'diff')
1803 return not s or s[0] not in ('---', 'diff')
1804 header = scanwhile(line, notheader)
1804 header = scanwhile(line, notheader)
1805 fromfile = lr.readline()
1805 fromfile = lr.readline()
1806 if fromfile.startswith('---'):
1806 if fromfile.startswith('---'):
1807 tofile = lr.readline()
1807 tofile = lr.readline()
1808 header += [fromfile, tofile]
1808 header += [fromfile, tofile]
1809 else:
1809 else:
1810 lr.push(fromfile)
1810 lr.push(fromfile)
1811 yield 'file', header
1811 yield 'file', header
1812 elif line.startswith(' '):
1812 elif line.startswith(' '):
1813 cs = (' ', '\\')
1813 cs = (' ', '\\')
1814 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1814 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1815 elif line.startswith(('-', '+')):
1815 elif line.startswith(('-', '+')):
1816 cs = ('-', '+', '\\')
1816 cs = ('-', '+', '\\')
1817 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1817 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1818 else:
1818 else:
1819 m = lines_re.match(line)
1819 m = lines_re.match(line)
1820 if m:
1820 if m:
1821 yield 'range', m.groups()
1821 yield 'range', m.groups()
1822 else:
1822 else:
1823 yield 'other', line
1823 yield 'other', line
1824
1824
1825 def scangitpatch(lr, firstline):
1825 def scangitpatch(lr, firstline):
1826 """
1826 """
1827 Git patches can emit:
1827 Git patches can emit:
1828 - rename a to b
1828 - rename a to b
1829 - change b
1829 - change b
1830 - copy a to c
1830 - copy a to c
1831 - change c
1831 - change c
1832
1832
1833 We cannot apply this sequence as-is, the renamed 'a' could not be
1833 We cannot apply this sequence as-is, the renamed 'a' could not be
1834 found for it would have been renamed already. And we cannot copy
1834 found for it would have been renamed already. And we cannot copy
1835 from 'b' instead because 'b' would have been changed already. So
1835 from 'b' instead because 'b' would have been changed already. So
1836 we scan the git patch for copy and rename commands so we can
1836 we scan the git patch for copy and rename commands so we can
1837 perform the copies ahead of time.
1837 perform the copies ahead of time.
1838 """
1838 """
1839 pos = 0
1839 pos = 0
1840 try:
1840 try:
1841 pos = lr.fp.tell()
1841 pos = lr.fp.tell()
1842 fp = lr.fp
1842 fp = lr.fp
1843 except IOError:
1843 except IOError:
1844 fp = stringio(lr.fp.read())
1844 fp = stringio(lr.fp.read())
1845 gitlr = linereader(fp)
1845 gitlr = linereader(fp)
1846 gitlr.push(firstline)
1846 gitlr.push(firstline)
1847 gitpatches = readgitpatch(gitlr)
1847 gitpatches = readgitpatch(gitlr)
1848 fp.seek(pos)
1848 fp.seek(pos)
1849 return gitpatches
1849 return gitpatches
1850
1850
1851 def iterhunks(fp):
1851 def iterhunks(fp):
1852 """Read a patch and yield the following events:
1852 """Read a patch and yield the following events:
1853 - ("file", afile, bfile, firsthunk): select a new target file.
1853 - ("file", afile, bfile, firsthunk): select a new target file.
1854 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1854 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1855 "file" event.
1855 "file" event.
1856 - ("git", gitchanges): current diff is in git format, gitchanges
1856 - ("git", gitchanges): current diff is in git format, gitchanges
1857 maps filenames to gitpatch records. Unique event.
1857 maps filenames to gitpatch records. Unique event.
1858 """
1858 """
1859 afile = ""
1859 afile = ""
1860 bfile = ""
1860 bfile = ""
1861 state = None
1861 state = None
1862 hunknum = 0
1862 hunknum = 0
1863 emitfile = newfile = False
1863 emitfile = newfile = False
1864 gitpatches = None
1864 gitpatches = None
1865
1865
1866 # our states
1866 # our states
1867 BFILE = 1
1867 BFILE = 1
1868 context = None
1868 context = None
1869 lr = linereader(fp)
1869 lr = linereader(fp)
1870
1870
1871 for x in iter(lr.readline, ''):
1871 for x in iter(lr.readline, ''):
1872 if state == BFILE and (
1872 if state == BFILE and (
1873 (not context and x.startswith('@'))
1873 (not context and x.startswith('@'))
1874 or (context is not False and x.startswith('***************'))
1874 or (context is not False and x.startswith('***************'))
1875 or x.startswith('GIT binary patch')):
1875 or x.startswith('GIT binary patch')):
1876 gp = None
1876 gp = None
1877 if (gitpatches and
1877 if (gitpatches and
1878 gitpatches[-1].ispatching(afile, bfile)):
1878 gitpatches[-1].ispatching(afile, bfile)):
1879 gp = gitpatches.pop()
1879 gp = gitpatches.pop()
1880 if x.startswith('GIT binary patch'):
1880 if x.startswith('GIT binary patch'):
1881 h = binhunk(lr, gp.path)
1881 h = binhunk(lr, gp.path)
1882 else:
1882 else:
1883 if context is None and x.startswith('***************'):
1883 if context is None and x.startswith('***************'):
1884 context = True
1884 context = True
1885 h = hunk(x, hunknum + 1, lr, context)
1885 h = hunk(x, hunknum + 1, lr, context)
1886 hunknum += 1
1886 hunknum += 1
1887 if emitfile:
1887 if emitfile:
1888 emitfile = False
1888 emitfile = False
1889 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1889 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1890 yield 'hunk', h
1890 yield 'hunk', h
1891 elif x.startswith('diff --git a/'):
1891 elif x.startswith('diff --git a/'):
1892 m = gitre.match(x.rstrip(' \r\n'))
1892 m = gitre.match(x.rstrip(' \r\n'))
1893 if not m:
1893 if not m:
1894 continue
1894 continue
1895 if gitpatches is None:
1895 if gitpatches is None:
1896 # scan whole input for git metadata
1896 # scan whole input for git metadata
1897 gitpatches = scangitpatch(lr, x)
1897 gitpatches = scangitpatch(lr, x)
1898 yield 'git', [g.copy() for g in gitpatches
1898 yield 'git', [g.copy() for g in gitpatches
1899 if g.op in ('COPY', 'RENAME')]
1899 if g.op in ('COPY', 'RENAME')]
1900 gitpatches.reverse()
1900 gitpatches.reverse()
1901 afile = 'a/' + m.group(1)
1901 afile = 'a/' + m.group(1)
1902 bfile = 'b/' + m.group(2)
1902 bfile = 'b/' + m.group(2)
1903 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1903 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1904 gp = gitpatches.pop()
1904 gp = gitpatches.pop()
1905 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1905 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1906 if not gitpatches:
1906 if not gitpatches:
1907 raise PatchError(_('failed to synchronize metadata for "%s"')
1907 raise PatchError(_('failed to synchronize metadata for "%s"')
1908 % afile[2:])
1908 % afile[2:])
1909 newfile = True
1909 newfile = True
1910 elif x.startswith('---'):
1910 elif x.startswith('---'):
1911 # check for a unified diff
1911 # check for a unified diff
1912 l2 = lr.readline()
1912 l2 = lr.readline()
1913 if not l2.startswith('+++'):
1913 if not l2.startswith('+++'):
1914 lr.push(l2)
1914 lr.push(l2)
1915 continue
1915 continue
1916 newfile = True
1916 newfile = True
1917 context = False
1917 context = False
1918 afile = parsefilename(x)
1918 afile = parsefilename(x)
1919 bfile = parsefilename(l2)
1919 bfile = parsefilename(l2)
1920 elif x.startswith('***'):
1920 elif x.startswith('***'):
1921 # check for a context diff
1921 # check for a context diff
1922 l2 = lr.readline()
1922 l2 = lr.readline()
1923 if not l2.startswith('---'):
1923 if not l2.startswith('---'):
1924 lr.push(l2)
1924 lr.push(l2)
1925 continue
1925 continue
1926 l3 = lr.readline()
1926 l3 = lr.readline()
1927 lr.push(l3)
1927 lr.push(l3)
1928 if not l3.startswith("***************"):
1928 if not l3.startswith("***************"):
1929 lr.push(l2)
1929 lr.push(l2)
1930 continue
1930 continue
1931 newfile = True
1931 newfile = True
1932 context = True
1932 context = True
1933 afile = parsefilename(x)
1933 afile = parsefilename(x)
1934 bfile = parsefilename(l2)
1934 bfile = parsefilename(l2)
1935
1935
1936 if newfile:
1936 if newfile:
1937 newfile = False
1937 newfile = False
1938 emitfile = True
1938 emitfile = True
1939 state = BFILE
1939 state = BFILE
1940 hunknum = 0
1940 hunknum = 0
1941
1941
1942 while gitpatches:
1942 while gitpatches:
1943 gp = gitpatches.pop()
1943 gp = gitpatches.pop()
1944 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1944 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1945
1945
1946 def applybindelta(binchunk, data):
1946 def applybindelta(binchunk, data):
1947 """Apply a binary delta hunk
1947 """Apply a binary delta hunk
1948 The algorithm used is the algorithm from git's patch-delta.c
1948 The algorithm used is the algorithm from git's patch-delta.c
1949 """
1949 """
1950 def deltahead(binchunk):
1950 def deltahead(binchunk):
1951 i = 0
1951 i = 0
1952 for c in pycompat.bytestr(binchunk):
1952 for c in pycompat.bytestr(binchunk):
1953 i += 1
1953 i += 1
1954 if not (ord(c) & 0x80):
1954 if not (ord(c) & 0x80):
1955 return i
1955 return i
1956 return i
1956 return i
1957 out = ""
1957 out = ""
1958 s = deltahead(binchunk)
1958 s = deltahead(binchunk)
1959 binchunk = binchunk[s:]
1959 binchunk = binchunk[s:]
1960 s = deltahead(binchunk)
1960 s = deltahead(binchunk)
1961 binchunk = binchunk[s:]
1961 binchunk = binchunk[s:]
1962 i = 0
1962 i = 0
1963 while i < len(binchunk):
1963 while i < len(binchunk):
1964 cmd = ord(binchunk[i:i + 1])
1964 cmd = ord(binchunk[i:i + 1])
1965 i += 1
1965 i += 1
1966 if (cmd & 0x80):
1966 if (cmd & 0x80):
1967 offset = 0
1967 offset = 0
1968 size = 0
1968 size = 0
1969 if (cmd & 0x01):
1969 if (cmd & 0x01):
1970 offset = ord(binchunk[i:i + 1])
1970 offset = ord(binchunk[i:i + 1])
1971 i += 1
1971 i += 1
1972 if (cmd & 0x02):
1972 if (cmd & 0x02):
1973 offset |= ord(binchunk[i:i + 1]) << 8
1973 offset |= ord(binchunk[i:i + 1]) << 8
1974 i += 1
1974 i += 1
1975 if (cmd & 0x04):
1975 if (cmd & 0x04):
1976 offset |= ord(binchunk[i:i + 1]) << 16
1976 offset |= ord(binchunk[i:i + 1]) << 16
1977 i += 1
1977 i += 1
1978 if (cmd & 0x08):
1978 if (cmd & 0x08):
1979 offset |= ord(binchunk[i:i + 1]) << 24
1979 offset |= ord(binchunk[i:i + 1]) << 24
1980 i += 1
1980 i += 1
1981 if (cmd & 0x10):
1981 if (cmd & 0x10):
1982 size = ord(binchunk[i:i + 1])
1982 size = ord(binchunk[i:i + 1])
1983 i += 1
1983 i += 1
1984 if (cmd & 0x20):
1984 if (cmd & 0x20):
1985 size |= ord(binchunk[i:i + 1]) << 8
1985 size |= ord(binchunk[i:i + 1]) << 8
1986 i += 1
1986 i += 1
1987 if (cmd & 0x40):
1987 if (cmd & 0x40):
1988 size |= ord(binchunk[i:i + 1]) << 16
1988 size |= ord(binchunk[i:i + 1]) << 16
1989 i += 1
1989 i += 1
1990 if size == 0:
1990 if size == 0:
1991 size = 0x10000
1991 size = 0x10000
1992 offset_end = offset + size
1992 offset_end = offset + size
1993 out += data[offset:offset_end]
1993 out += data[offset:offset_end]
1994 elif cmd != 0:
1994 elif cmd != 0:
1995 offset_end = i + cmd
1995 offset_end = i + cmd
1996 out += binchunk[i:offset_end]
1996 out += binchunk[i:offset_end]
1997 i += cmd
1997 i += cmd
1998 else:
1998 else:
1999 raise PatchError(_('unexpected delta opcode 0'))
1999 raise PatchError(_('unexpected delta opcode 0'))
2000 return out
2000 return out
2001
2001
2002 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2002 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2003 """Reads a patch from fp and tries to apply it.
2003 """Reads a patch from fp and tries to apply it.
2004
2004
2005 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2005 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2006 there was any fuzz.
2006 there was any fuzz.
2007
2007
2008 If 'eolmode' is 'strict', the patch content and patched file are
2008 If 'eolmode' is 'strict', the patch content and patched file are
2009 read in binary mode. Otherwise, line endings are ignored when
2009 read in binary mode. Otherwise, line endings are ignored when
2010 patching then normalized according to 'eolmode'.
2010 patching then normalized according to 'eolmode'.
2011 """
2011 """
2012 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2012 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2013 prefix=prefix, eolmode=eolmode)
2013 prefix=prefix, eolmode=eolmode)
2014
2014
2015 def _canonprefix(repo, prefix):
2015 def _canonprefix(repo, prefix):
2016 if prefix:
2016 if prefix:
2017 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2017 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2018 if prefix != '':
2018 if prefix != '':
2019 prefix += '/'
2019 prefix += '/'
2020 return prefix
2020 return prefix
2021
2021
2022 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2022 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2023 eolmode='strict'):
2023 eolmode='strict'):
2024 prefix = _canonprefix(backend.repo, prefix)
2024 prefix = _canonprefix(backend.repo, prefix)
2025 def pstrip(p):
2025 def pstrip(p):
2026 return pathtransform(p, strip - 1, prefix)[1]
2026 return pathtransform(p, strip - 1, prefix)[1]
2027
2027
2028 rejects = 0
2028 rejects = 0
2029 err = 0
2029 err = 0
2030 current_file = None
2030 current_file = None
2031
2031
2032 for state, values in iterhunks(fp):
2032 for state, values in iterhunks(fp):
2033 if state == 'hunk':
2033 if state == 'hunk':
2034 if not current_file:
2034 if not current_file:
2035 continue
2035 continue
2036 ret = current_file.apply(values)
2036 ret = current_file.apply(values)
2037 if ret > 0:
2037 if ret > 0:
2038 err = 1
2038 err = 1
2039 elif state == 'file':
2039 elif state == 'file':
2040 if current_file:
2040 if current_file:
2041 rejects += current_file.close()
2041 rejects += current_file.close()
2042 current_file = None
2042 current_file = None
2043 afile, bfile, first_hunk, gp = values
2043 afile, bfile, first_hunk, gp = values
2044 if gp:
2044 if gp:
2045 gp.path = pstrip(gp.path)
2045 gp.path = pstrip(gp.path)
2046 if gp.oldpath:
2046 if gp.oldpath:
2047 gp.oldpath = pstrip(gp.oldpath)
2047 gp.oldpath = pstrip(gp.oldpath)
2048 else:
2048 else:
2049 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2049 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2050 prefix)
2050 prefix)
2051 if gp.op == 'RENAME':
2051 if gp.op == 'RENAME':
2052 backend.unlink(gp.oldpath)
2052 backend.unlink(gp.oldpath)
2053 if not first_hunk:
2053 if not first_hunk:
2054 if gp.op == 'DELETE':
2054 if gp.op == 'DELETE':
2055 backend.unlink(gp.path)
2055 backend.unlink(gp.path)
2056 continue
2056 continue
2057 data, mode = None, None
2057 data, mode = None, None
2058 if gp.op in ('RENAME', 'COPY'):
2058 if gp.op in ('RENAME', 'COPY'):
2059 data, mode = store.getfile(gp.oldpath)[:2]
2059 data, mode = store.getfile(gp.oldpath)[:2]
2060 if data is None:
2060 if data is None:
2061 # This means that the old path does not exist
2061 # This means that the old path does not exist
2062 raise PatchError(_("source file '%s' does not exist")
2062 raise PatchError(_("source file '%s' does not exist")
2063 % gp.oldpath)
2063 % gp.oldpath)
2064 if gp.mode:
2064 if gp.mode:
2065 mode = gp.mode
2065 mode = gp.mode
2066 if gp.op == 'ADD':
2066 if gp.op == 'ADD':
2067 # Added files without content have no hunk and
2067 # Added files without content have no hunk and
2068 # must be created
2068 # must be created
2069 data = ''
2069 data = ''
2070 if data or mode:
2070 if data or mode:
2071 if (gp.op in ('ADD', 'RENAME', 'COPY')
2071 if (gp.op in ('ADD', 'RENAME', 'COPY')
2072 and backend.exists(gp.path)):
2072 and backend.exists(gp.path)):
2073 raise PatchError(_("cannot create %s: destination "
2073 raise PatchError(_("cannot create %s: destination "
2074 "already exists") % gp.path)
2074 "already exists") % gp.path)
2075 backend.setfile(gp.path, data, mode, gp.oldpath)
2075 backend.setfile(gp.path, data, mode, gp.oldpath)
2076 continue
2076 continue
2077 try:
2077 try:
2078 current_file = patcher(ui, gp, backend, store,
2078 current_file = patcher(ui, gp, backend, store,
2079 eolmode=eolmode)
2079 eolmode=eolmode)
2080 except PatchError as inst:
2080 except PatchError as inst:
2081 ui.warn(str(inst) + '\n')
2081 ui.warn(str(inst) + '\n')
2082 current_file = None
2082 current_file = None
2083 rejects += 1
2083 rejects += 1
2084 continue
2084 continue
2085 elif state == 'git':
2085 elif state == 'git':
2086 for gp in values:
2086 for gp in values:
2087 path = pstrip(gp.oldpath)
2087 path = pstrip(gp.oldpath)
2088 data, mode = backend.getfile(path)
2088 data, mode = backend.getfile(path)
2089 if data is None:
2089 if data is None:
2090 # The error ignored here will trigger a getfile()
2090 # The error ignored here will trigger a getfile()
2091 # error in a place more appropriate for error
2091 # error in a place more appropriate for error
2092 # handling, and will not interrupt the patching
2092 # handling, and will not interrupt the patching
2093 # process.
2093 # process.
2094 pass
2094 pass
2095 else:
2095 else:
2096 store.setfile(path, data, mode)
2096 store.setfile(path, data, mode)
2097 else:
2097 else:
2098 raise error.Abort(_('unsupported parser state: %s') % state)
2098 raise error.Abort(_('unsupported parser state: %s') % state)
2099
2099
2100 if current_file:
2100 if current_file:
2101 rejects += current_file.close()
2101 rejects += current_file.close()
2102
2102
2103 if rejects:
2103 if rejects:
2104 return -1
2104 return -1
2105 return err
2105 return err
2106
2106
2107 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2107 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2108 similarity):
2108 similarity):
2109 """use <patcher> to apply <patchname> to the working directory.
2109 """use <patcher> to apply <patchname> to the working directory.
2110 returns whether patch was applied with fuzz factor."""
2110 returns whether patch was applied with fuzz factor."""
2111
2111
2112 fuzz = False
2112 fuzz = False
2113 args = []
2113 args = []
2114 cwd = repo.root
2114 cwd = repo.root
2115 if cwd:
2115 if cwd:
2116 args.append('-d %s' % procutil.shellquote(cwd))
2116 args.append('-d %s' % procutil.shellquote(cwd))
2117 cmd = ('%s %s -p%d < %s'
2117 cmd = ('%s %s -p%d < %s'
2118 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2118 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2119 ui.debug('Using external patch tool: %s\n' % cmd)
2119 ui.debug('Using external patch tool: %s\n' % cmd)
2120 fp = procutil.popen(cmd, 'rb')
2120 fp = procutil.popen(cmd, 'rb')
2121 try:
2121 try:
2122 for line in util.iterfile(fp):
2122 for line in util.iterfile(fp):
2123 line = line.rstrip()
2123 line = line.rstrip()
2124 ui.note(line + '\n')
2124 ui.note(line + '\n')
2125 if line.startswith('patching file '):
2125 if line.startswith('patching file '):
2126 pf = util.parsepatchoutput(line)
2126 pf = util.parsepatchoutput(line)
2127 printed_file = False
2127 printed_file = False
2128 files.add(pf)
2128 files.add(pf)
2129 elif line.find('with fuzz') >= 0:
2129 elif line.find('with fuzz') >= 0:
2130 fuzz = True
2130 fuzz = True
2131 if not printed_file:
2131 if not printed_file:
2132 ui.warn(pf + '\n')
2132 ui.warn(pf + '\n')
2133 printed_file = True
2133 printed_file = True
2134 ui.warn(line + '\n')
2134 ui.warn(line + '\n')
2135 elif line.find('saving rejects to file') >= 0:
2135 elif line.find('saving rejects to file') >= 0:
2136 ui.warn(line + '\n')
2136 ui.warn(line + '\n')
2137 elif line.find('FAILED') >= 0:
2137 elif line.find('FAILED') >= 0:
2138 if not printed_file:
2138 if not printed_file:
2139 ui.warn(pf + '\n')
2139 ui.warn(pf + '\n')
2140 printed_file = True
2140 printed_file = True
2141 ui.warn(line + '\n')
2141 ui.warn(line + '\n')
2142 finally:
2142 finally:
2143 if files:
2143 if files:
2144 scmutil.marktouched(repo, files, similarity)
2144 scmutil.marktouched(repo, files, similarity)
2145 code = fp.close()
2145 code = fp.close()
2146 if code:
2146 if code:
2147 raise PatchError(_("patch command failed: %s") %
2147 raise PatchError(_("patch command failed: %s") %
2148 procutil.explainexit(code))
2148 procutil.explainexit(code))
2149 return fuzz
2149 return fuzz
2150
2150
2151 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2151 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2152 eolmode='strict'):
2152 eolmode='strict'):
2153 if files is None:
2153 if files is None:
2154 files = set()
2154 files = set()
2155 if eolmode is None:
2155 if eolmode is None:
2156 eolmode = ui.config('patch', 'eol')
2156 eolmode = ui.config('patch', 'eol')
2157 if eolmode.lower() not in eolmodes:
2157 if eolmode.lower() not in eolmodes:
2158 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2158 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2159 eolmode = eolmode.lower()
2159 eolmode = eolmode.lower()
2160
2160
2161 store = filestore()
2161 store = filestore()
2162 try:
2162 try:
2163 fp = open(patchobj, 'rb')
2163 fp = open(patchobj, 'rb')
2164 except TypeError:
2164 except TypeError:
2165 fp = patchobj
2165 fp = patchobj
2166 try:
2166 try:
2167 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2167 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2168 eolmode=eolmode)
2168 eolmode=eolmode)
2169 finally:
2169 finally:
2170 if fp != patchobj:
2170 if fp != patchobj:
2171 fp.close()
2171 fp.close()
2172 files.update(backend.close())
2172 files.update(backend.close())
2173 store.close()
2173 store.close()
2174 if ret < 0:
2174 if ret < 0:
2175 raise PatchError(_('patch failed to apply'))
2175 raise PatchError(_('patch failed to apply'))
2176 return ret > 0
2176 return ret > 0
2177
2177
2178 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2178 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2179 eolmode='strict', similarity=0):
2179 eolmode='strict', similarity=0):
2180 """use builtin patch to apply <patchobj> to the working directory.
2180 """use builtin patch to apply <patchobj> to the working directory.
2181 returns whether patch was applied with fuzz factor."""
2181 returns whether patch was applied with fuzz factor."""
2182 backend = workingbackend(ui, repo, similarity)
2182 backend = workingbackend(ui, repo, similarity)
2183 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2183 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2184
2184
2185 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2185 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2186 eolmode='strict'):
2186 eolmode='strict'):
2187 backend = repobackend(ui, repo, ctx, store)
2187 backend = repobackend(ui, repo, ctx, store)
2188 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2188 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2189
2189
2190 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2190 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2191 similarity=0):
2191 similarity=0):
2192 """Apply <patchname> to the working directory.
2192 """Apply <patchname> to the working directory.
2193
2193
2194 'eolmode' specifies how end of lines should be handled. It can be:
2194 'eolmode' specifies how end of lines should be handled. It can be:
2195 - 'strict': inputs are read in binary mode, EOLs are preserved
2195 - 'strict': inputs are read in binary mode, EOLs are preserved
2196 - 'crlf': EOLs are ignored when patching and reset to CRLF
2196 - 'crlf': EOLs are ignored when patching and reset to CRLF
2197 - 'lf': EOLs are ignored when patching and reset to LF
2197 - 'lf': EOLs are ignored when patching and reset to LF
2198 - None: get it from user settings, default to 'strict'
2198 - None: get it from user settings, default to 'strict'
2199 'eolmode' is ignored when using an external patcher program.
2199 'eolmode' is ignored when using an external patcher program.
2200
2200
2201 Returns whether patch was applied with fuzz factor.
2201 Returns whether patch was applied with fuzz factor.
2202 """
2202 """
2203 patcher = ui.config('ui', 'patch')
2203 patcher = ui.config('ui', 'patch')
2204 if files is None:
2204 if files is None:
2205 files = set()
2205 files = set()
2206 if patcher:
2206 if patcher:
2207 return _externalpatch(ui, repo, patcher, patchname, strip,
2207 return _externalpatch(ui, repo, patcher, patchname, strip,
2208 files, similarity)
2208 files, similarity)
2209 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2209 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2210 similarity)
2210 similarity)
2211
2211
2212 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2212 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2213 backend = fsbackend(ui, repo.root)
2213 backend = fsbackend(ui, repo.root)
2214 prefix = _canonprefix(repo, prefix)
2214 prefix = _canonprefix(repo, prefix)
2215 with open(patchpath, 'rb') as fp:
2215 with open(patchpath, 'rb') as fp:
2216 changed = set()
2216 changed = set()
2217 for state, values in iterhunks(fp):
2217 for state, values in iterhunks(fp):
2218 if state == 'file':
2218 if state == 'file':
2219 afile, bfile, first_hunk, gp = values
2219 afile, bfile, first_hunk, gp = values
2220 if gp:
2220 if gp:
2221 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2221 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2222 if gp.oldpath:
2222 if gp.oldpath:
2223 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2223 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2224 prefix)[1]
2224 prefix)[1]
2225 else:
2225 else:
2226 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2226 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2227 prefix)
2227 prefix)
2228 changed.add(gp.path)
2228 changed.add(gp.path)
2229 if gp.op == 'RENAME':
2229 if gp.op == 'RENAME':
2230 changed.add(gp.oldpath)
2230 changed.add(gp.oldpath)
2231 elif state not in ('hunk', 'git'):
2231 elif state not in ('hunk', 'git'):
2232 raise error.Abort(_('unsupported parser state: %s') % state)
2232 raise error.Abort(_('unsupported parser state: %s') % state)
2233 return changed
2233 return changed
2234
2234
2235 class GitDiffRequired(Exception):
2235 class GitDiffRequired(Exception):
2236 pass
2236 pass
2237
2237
2238 diffopts = diffutil.diffallopts
2238 diffopts = diffutil.diffallopts
2239 diffallopts = diffutil.diffallopts
2239 diffallopts = diffutil.diffallopts
2240 difffeatureopts = diffutil.difffeatureopts
2240 difffeatureopts = diffutil.difffeatureopts
2241
2241
2242 def diff(repo, node1=None, node2=None, match=None, changes=None,
2242 def diff(repo, node1=None, node2=None, match=None, changes=None,
2243 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2243 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2244 hunksfilterfn=None):
2244 hunksfilterfn=None):
2245 '''yields diff of changes to files between two nodes, or node and
2245 '''yields diff of changes to files between two nodes, or node and
2246 working directory.
2246 working directory.
2247
2247
2248 if node1 is None, use first dirstate parent instead.
2248 if node1 is None, use first dirstate parent instead.
2249 if node2 is None, compare node1 with working directory.
2249 if node2 is None, compare node1 with working directory.
2250
2250
2251 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2251 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2252 every time some change cannot be represented with the current
2252 every time some change cannot be represented with the current
2253 patch format. Return False to upgrade to git patch format, True to
2253 patch format. Return False to upgrade to git patch format, True to
2254 accept the loss or raise an exception to abort the diff. It is
2254 accept the loss or raise an exception to abort the diff. It is
2255 called with the name of current file being diffed as 'fn'. If set
2255 called with the name of current file being diffed as 'fn'. If set
2256 to None, patches will always be upgraded to git format when
2256 to None, patches will always be upgraded to git format when
2257 necessary.
2257 necessary.
2258
2258
2259 prefix is a filename prefix that is prepended to all filenames on
2259 prefix is a filename prefix that is prepended to all filenames on
2260 display (used for subrepos).
2260 display (used for subrepos).
2261
2261
2262 relroot, if not empty, must be normalized with a trailing /. Any match
2262 relroot, if not empty, must be normalized with a trailing /. Any match
2263 patterns that fall outside it will be ignored.
2263 patterns that fall outside it will be ignored.
2264
2264
2265 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2265 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2266 information.
2266 information.
2267
2267
2268 hunksfilterfn, if not None, should be a function taking a filectx and
2268 hunksfilterfn, if not None, should be a function taking a filectx and
2269 hunks generator that may yield filtered hunks.
2269 hunks generator that may yield filtered hunks.
2270 '''
2270 '''
2271 if not node1 and not node2:
2272 node1 = repo.dirstate.p1()
2273
2274 ctx1 = repo[node1]
2275 ctx2 = repo[node2]
2276
2271 for fctx1, fctx2, hdr, hunks in diffhunks(
2277 for fctx1, fctx2, hdr, hunks in diffhunks(
2272 repo, node1=node1, node2=node2,
2278 repo, ctx1=ctx1, ctx2=ctx2,
2273 match=match, changes=changes, opts=opts,
2279 match=match, changes=changes, opts=opts,
2274 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2280 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2275 ):
2281 ):
2276 if hunksfilterfn is not None:
2282 if hunksfilterfn is not None:
2277 # If the file has been removed, fctx2 is None; but this should
2283 # If the file has been removed, fctx2 is None; but this should
2278 # not occur here since we catch removed files early in
2284 # not occur here since we catch removed files early in
2279 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2285 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2280 assert fctx2 is not None, \
2286 assert fctx2 is not None, \
2281 'fctx2 unexpectly None in diff hunks filtering'
2287 'fctx2 unexpectly None in diff hunks filtering'
2282 hunks = hunksfilterfn(fctx2, hunks)
2288 hunks = hunksfilterfn(fctx2, hunks)
2283 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2289 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2284 if hdr and (text or len(hdr) > 1):
2290 if hdr and (text or len(hdr) > 1):
2285 yield '\n'.join(hdr) + '\n'
2291 yield '\n'.join(hdr) + '\n'
2286 if text:
2292 if text:
2287 yield text
2293 yield text
2288
2294
2289 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2295 def diffhunks(repo, ctx1, ctx2, match=None, changes=None,
2290 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2296 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2291 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2297 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2292 where `header` is a list of diff headers and `hunks` is an iterable of
2298 where `header` is a list of diff headers and `hunks` is an iterable of
2293 (`hunkrange`, `hunklines`) tuples.
2299 (`hunkrange`, `hunklines`) tuples.
2294
2300
2295 See diff() for the meaning of parameters.
2301 See diff() for the meaning of parameters.
2296 """
2302 """
2297
2303
2298 if opts is None:
2304 if opts is None:
2299 opts = mdiff.defaultopts
2305 opts = mdiff.defaultopts
2300
2306
2301 if not node1 and not node2:
2302 node1 = repo.dirstate.p1()
2303
2304 def lrugetfilectx():
2307 def lrugetfilectx():
2305 cache = {}
2308 cache = {}
2306 order = collections.deque()
2309 order = collections.deque()
2307 def getfilectx(f, ctx):
2310 def getfilectx(f, ctx):
2308 fctx = ctx.filectx(f, filelog=cache.get(f))
2311 fctx = ctx.filectx(f, filelog=cache.get(f))
2309 if f not in cache:
2312 if f not in cache:
2310 if len(cache) > 20:
2313 if len(cache) > 20:
2311 del cache[order.popleft()]
2314 del cache[order.popleft()]
2312 cache[f] = fctx.filelog()
2315 cache[f] = fctx.filelog()
2313 else:
2316 else:
2314 order.remove(f)
2317 order.remove(f)
2315 order.append(f)
2318 order.append(f)
2316 return fctx
2319 return fctx
2317 return getfilectx
2320 return getfilectx
2318 getfilectx = lrugetfilectx()
2321 getfilectx = lrugetfilectx()
2319
2322
2320 ctx1 = repo[node1]
2321 ctx2 = repo[node2]
2322
2323 if relroot:
2323 if relroot:
2324 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
2324 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
2325 match = matchmod.intersectmatchers(match, relrootmatch)
2325 match = matchmod.intersectmatchers(match, relrootmatch)
2326
2326
2327 if not changes:
2327 if not changes:
2328 changes = ctx1.status(ctx2, match=match)
2328 changes = ctx1.status(ctx2, match=match)
2329 modified, added, removed = changes[:3]
2329 modified, added, removed = changes[:3]
2330
2330
2331 if not modified and not added and not removed:
2331 if not modified and not added and not removed:
2332 return []
2332 return []
2333
2333
2334 if repo.ui.debugflag:
2334 if repo.ui.debugflag:
2335 hexfunc = hex
2335 hexfunc = hex
2336 else:
2336 else:
2337 hexfunc = short
2337 hexfunc = short
2338 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2338 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2339
2339
2340 if copy is None:
2340 if copy is None:
2341 copy = {}
2341 copy = {}
2342 if opts.git or opts.upgrade:
2342 if opts.git or opts.upgrade:
2343 copy = copies.pathcopies(ctx1, ctx2, match=match)
2343 copy = copies.pathcopies(ctx1, ctx2, match=match)
2344
2344
2345 if relroot:
2345 if relroot:
2346 # filter out copies where source side isn't inside the relative root
2346 # filter out copies where source side isn't inside the relative root
2347 # (copies.pathcopies() already filtered out the destination)
2347 # (copies.pathcopies() already filtered out the destination)
2348 copy = {dst: src for dst, src in copy.iteritems()
2348 copy = {dst: src for dst, src in copy.iteritems()
2349 if src.startswith(relroot)}
2349 if src.startswith(relroot)}
2350
2350
2351 modifiedset = set(modified)
2351 modifiedset = set(modified)
2352 addedset = set(added)
2352 addedset = set(added)
2353 removedset = set(removed)
2353 removedset = set(removed)
2354 for f in modified:
2354 for f in modified:
2355 if f not in ctx1:
2355 if f not in ctx1:
2356 # Fix up added, since merged-in additions appear as
2356 # Fix up added, since merged-in additions appear as
2357 # modifications during merges
2357 # modifications during merges
2358 modifiedset.remove(f)
2358 modifiedset.remove(f)
2359 addedset.add(f)
2359 addedset.add(f)
2360 for f in removed:
2360 for f in removed:
2361 if f not in ctx1:
2361 if f not in ctx1:
2362 # Merged-in additions that are then removed are reported as removed.
2362 # Merged-in additions that are then removed are reported as removed.
2363 # They are not in ctx1, so We don't want to show them in the diff.
2363 # They are not in ctx1, so We don't want to show them in the diff.
2364 removedset.remove(f)
2364 removedset.remove(f)
2365 modified = sorted(modifiedset)
2365 modified = sorted(modifiedset)
2366 added = sorted(addedset)
2366 added = sorted(addedset)
2367 removed = sorted(removedset)
2367 removed = sorted(removedset)
2368 for dst, src in list(copy.items()):
2368 for dst, src in list(copy.items()):
2369 if src not in ctx1:
2369 if src not in ctx1:
2370 # Files merged in during a merge and then copied/renamed are
2370 # Files merged in during a merge and then copied/renamed are
2371 # reported as copies. We want to show them in the diff as additions.
2371 # reported as copies. We want to show them in the diff as additions.
2372 del copy[dst]
2372 del copy[dst]
2373
2373
2374 prefetchmatch = scmutil.matchfiles(
2374 prefetchmatch = scmutil.matchfiles(
2375 repo, list(modifiedset | addedset | removedset))
2375 repo, list(modifiedset | addedset | removedset))
2376 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2376 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2377
2377
2378 def difffn(opts, losedata):
2378 def difffn(opts, losedata):
2379 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2379 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2380 copy, getfilectx, opts, losedata, prefix, relroot)
2380 copy, getfilectx, opts, losedata, prefix, relroot)
2381 if opts.upgrade and not opts.git:
2381 if opts.upgrade and not opts.git:
2382 try:
2382 try:
2383 def losedata(fn):
2383 def losedata(fn):
2384 if not losedatafn or not losedatafn(fn=fn):
2384 if not losedatafn or not losedatafn(fn=fn):
2385 raise GitDiffRequired
2385 raise GitDiffRequired
2386 # Buffer the whole output until we are sure it can be generated
2386 # Buffer the whole output until we are sure it can be generated
2387 return list(difffn(opts.copy(git=False), losedata))
2387 return list(difffn(opts.copy(git=False), losedata))
2388 except GitDiffRequired:
2388 except GitDiffRequired:
2389 return difffn(opts.copy(git=True), None)
2389 return difffn(opts.copy(git=True), None)
2390 else:
2390 else:
2391 return difffn(opts, None)
2391 return difffn(opts, None)
2392
2392
2393 def diffsinglehunk(hunklines):
2393 def diffsinglehunk(hunklines):
2394 """yield tokens for a list of lines in a single hunk"""
2394 """yield tokens for a list of lines in a single hunk"""
2395 for line in hunklines:
2395 for line in hunklines:
2396 # chomp
2396 # chomp
2397 chompline = line.rstrip('\r\n')
2397 chompline = line.rstrip('\r\n')
2398 # highlight tabs and trailing whitespace
2398 # highlight tabs and trailing whitespace
2399 stripline = chompline.rstrip()
2399 stripline = chompline.rstrip()
2400 if line.startswith('-'):
2400 if line.startswith('-'):
2401 label = 'diff.deleted'
2401 label = 'diff.deleted'
2402 elif line.startswith('+'):
2402 elif line.startswith('+'):
2403 label = 'diff.inserted'
2403 label = 'diff.inserted'
2404 else:
2404 else:
2405 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2405 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2406 for token in tabsplitter.findall(stripline):
2406 for token in tabsplitter.findall(stripline):
2407 if token.startswith('\t'):
2407 if token.startswith('\t'):
2408 yield (token, 'diff.tab')
2408 yield (token, 'diff.tab')
2409 else:
2409 else:
2410 yield (token, label)
2410 yield (token, label)
2411
2411
2412 if chompline != stripline:
2412 if chompline != stripline:
2413 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2413 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2414 if chompline != line:
2414 if chompline != line:
2415 yield (line[len(chompline):], '')
2415 yield (line[len(chompline):], '')
2416
2416
2417 def diffsinglehunkinline(hunklines):
2417 def diffsinglehunkinline(hunklines):
2418 """yield tokens for a list of lines in a single hunk, with inline colors"""
2418 """yield tokens for a list of lines in a single hunk, with inline colors"""
2419 # prepare deleted, and inserted content
2419 # prepare deleted, and inserted content
2420 a = ''
2420 a = ''
2421 b = ''
2421 b = ''
2422 for line in hunklines:
2422 for line in hunklines:
2423 if line[0:1] == '-':
2423 if line[0:1] == '-':
2424 a += line[1:]
2424 a += line[1:]
2425 elif line[0:1] == '+':
2425 elif line[0:1] == '+':
2426 b += line[1:]
2426 b += line[1:]
2427 else:
2427 else:
2428 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2428 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2429 # fast path: if either side is empty, use diffsinglehunk
2429 # fast path: if either side is empty, use diffsinglehunk
2430 if not a or not b:
2430 if not a or not b:
2431 for t in diffsinglehunk(hunklines):
2431 for t in diffsinglehunk(hunklines):
2432 yield t
2432 yield t
2433 return
2433 return
2434 # re-split the content into words
2434 # re-split the content into words
2435 al = wordsplitter.findall(a)
2435 al = wordsplitter.findall(a)
2436 bl = wordsplitter.findall(b)
2436 bl = wordsplitter.findall(b)
2437 # re-arrange the words to lines since the diff algorithm is line-based
2437 # re-arrange the words to lines since the diff algorithm is line-based
2438 aln = [s if s == '\n' else s + '\n' for s in al]
2438 aln = [s if s == '\n' else s + '\n' for s in al]
2439 bln = [s if s == '\n' else s + '\n' for s in bl]
2439 bln = [s if s == '\n' else s + '\n' for s in bl]
2440 an = ''.join(aln)
2440 an = ''.join(aln)
2441 bn = ''.join(bln)
2441 bn = ''.join(bln)
2442 # run the diff algorithm, prepare atokens and btokens
2442 # run the diff algorithm, prepare atokens and btokens
2443 atokens = []
2443 atokens = []
2444 btokens = []
2444 btokens = []
2445 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2445 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2446 for (a1, a2, b1, b2), btype in blocks:
2446 for (a1, a2, b1, b2), btype in blocks:
2447 changed = btype == '!'
2447 changed = btype == '!'
2448 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2448 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2449 atokens.append((changed, token))
2449 atokens.append((changed, token))
2450 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2450 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2451 btokens.append((changed, token))
2451 btokens.append((changed, token))
2452
2452
2453 # yield deleted tokens, then inserted ones
2453 # yield deleted tokens, then inserted ones
2454 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2454 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2455 ('+', 'diff.inserted', btokens)]:
2455 ('+', 'diff.inserted', btokens)]:
2456 nextisnewline = True
2456 nextisnewline = True
2457 for changed, token in tokens:
2457 for changed, token in tokens:
2458 if nextisnewline:
2458 if nextisnewline:
2459 yield (prefix, label)
2459 yield (prefix, label)
2460 nextisnewline = False
2460 nextisnewline = False
2461 # special handling line end
2461 # special handling line end
2462 isendofline = token.endswith('\n')
2462 isendofline = token.endswith('\n')
2463 if isendofline:
2463 if isendofline:
2464 chomp = token[:-1] # chomp
2464 chomp = token[:-1] # chomp
2465 if chomp.endswith('\r'):
2465 if chomp.endswith('\r'):
2466 chomp = chomp[:-1]
2466 chomp = chomp[:-1]
2467 endofline = token[len(chomp):]
2467 endofline = token[len(chomp):]
2468 token = chomp.rstrip() # detect spaces at the end
2468 token = chomp.rstrip() # detect spaces at the end
2469 endspaces = chomp[len(token):]
2469 endspaces = chomp[len(token):]
2470 # scan tabs
2470 # scan tabs
2471 for maybetab in tabsplitter.findall(token):
2471 for maybetab in tabsplitter.findall(token):
2472 if b'\t' == maybetab[0:1]:
2472 if b'\t' == maybetab[0:1]:
2473 currentlabel = 'diff.tab'
2473 currentlabel = 'diff.tab'
2474 else:
2474 else:
2475 if changed:
2475 if changed:
2476 currentlabel = label + '.changed'
2476 currentlabel = label + '.changed'
2477 else:
2477 else:
2478 currentlabel = label + '.unchanged'
2478 currentlabel = label + '.unchanged'
2479 yield (maybetab, currentlabel)
2479 yield (maybetab, currentlabel)
2480 if isendofline:
2480 if isendofline:
2481 if endspaces:
2481 if endspaces:
2482 yield (endspaces, 'diff.trailingwhitespace')
2482 yield (endspaces, 'diff.trailingwhitespace')
2483 yield (endofline, '')
2483 yield (endofline, '')
2484 nextisnewline = True
2484 nextisnewline = True
2485
2485
2486 def difflabel(func, *args, **kw):
2486 def difflabel(func, *args, **kw):
2487 '''yields 2-tuples of (output, label) based on the output of func()'''
2487 '''yields 2-tuples of (output, label) based on the output of func()'''
2488 if kw.get(r'opts') and kw[r'opts'].worddiff:
2488 if kw.get(r'opts') and kw[r'opts'].worddiff:
2489 dodiffhunk = diffsinglehunkinline
2489 dodiffhunk = diffsinglehunkinline
2490 else:
2490 else:
2491 dodiffhunk = diffsinglehunk
2491 dodiffhunk = diffsinglehunk
2492 headprefixes = [('diff', 'diff.diffline'),
2492 headprefixes = [('diff', 'diff.diffline'),
2493 ('copy', 'diff.extended'),
2493 ('copy', 'diff.extended'),
2494 ('rename', 'diff.extended'),
2494 ('rename', 'diff.extended'),
2495 ('old', 'diff.extended'),
2495 ('old', 'diff.extended'),
2496 ('new', 'diff.extended'),
2496 ('new', 'diff.extended'),
2497 ('deleted', 'diff.extended'),
2497 ('deleted', 'diff.extended'),
2498 ('index', 'diff.extended'),
2498 ('index', 'diff.extended'),
2499 ('similarity', 'diff.extended'),
2499 ('similarity', 'diff.extended'),
2500 ('---', 'diff.file_a'),
2500 ('---', 'diff.file_a'),
2501 ('+++', 'diff.file_b')]
2501 ('+++', 'diff.file_b')]
2502 textprefixes = [('@', 'diff.hunk'),
2502 textprefixes = [('@', 'diff.hunk'),
2503 # - and + are handled by diffsinglehunk
2503 # - and + are handled by diffsinglehunk
2504 ]
2504 ]
2505 head = False
2505 head = False
2506
2506
2507 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2507 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2508 hunkbuffer = []
2508 hunkbuffer = []
2509 def consumehunkbuffer():
2509 def consumehunkbuffer():
2510 if hunkbuffer:
2510 if hunkbuffer:
2511 for token in dodiffhunk(hunkbuffer):
2511 for token in dodiffhunk(hunkbuffer):
2512 yield token
2512 yield token
2513 hunkbuffer[:] = []
2513 hunkbuffer[:] = []
2514
2514
2515 for chunk in func(*args, **kw):
2515 for chunk in func(*args, **kw):
2516 lines = chunk.split('\n')
2516 lines = chunk.split('\n')
2517 linecount = len(lines)
2517 linecount = len(lines)
2518 for i, line in enumerate(lines):
2518 for i, line in enumerate(lines):
2519 if head:
2519 if head:
2520 if line.startswith('@'):
2520 if line.startswith('@'):
2521 head = False
2521 head = False
2522 else:
2522 else:
2523 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2523 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2524 head = True
2524 head = True
2525 diffline = False
2525 diffline = False
2526 if not head and line and line.startswith(('+', '-')):
2526 if not head and line and line.startswith(('+', '-')):
2527 diffline = True
2527 diffline = True
2528
2528
2529 prefixes = textprefixes
2529 prefixes = textprefixes
2530 if head:
2530 if head:
2531 prefixes = headprefixes
2531 prefixes = headprefixes
2532 if diffline:
2532 if diffline:
2533 # buffered
2533 # buffered
2534 bufferedline = line
2534 bufferedline = line
2535 if i + 1 < linecount:
2535 if i + 1 < linecount:
2536 bufferedline += "\n"
2536 bufferedline += "\n"
2537 hunkbuffer.append(bufferedline)
2537 hunkbuffer.append(bufferedline)
2538 else:
2538 else:
2539 # unbuffered
2539 # unbuffered
2540 for token in consumehunkbuffer():
2540 for token in consumehunkbuffer():
2541 yield token
2541 yield token
2542 stripline = line.rstrip()
2542 stripline = line.rstrip()
2543 for prefix, label in prefixes:
2543 for prefix, label in prefixes:
2544 if stripline.startswith(prefix):
2544 if stripline.startswith(prefix):
2545 yield (stripline, label)
2545 yield (stripline, label)
2546 if line != stripline:
2546 if line != stripline:
2547 yield (line[len(stripline):],
2547 yield (line[len(stripline):],
2548 'diff.trailingwhitespace')
2548 'diff.trailingwhitespace')
2549 break
2549 break
2550 else:
2550 else:
2551 yield (line, '')
2551 yield (line, '')
2552 if i + 1 < linecount:
2552 if i + 1 < linecount:
2553 yield ('\n', '')
2553 yield ('\n', '')
2554 for token in consumehunkbuffer():
2554 for token in consumehunkbuffer():
2555 yield token
2555 yield token
2556
2556
2557 def diffui(*args, **kw):
2557 def diffui(*args, **kw):
2558 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2558 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2559 return difflabel(diff, *args, **kw)
2559 return difflabel(diff, *args, **kw)
2560
2560
2561 def _filepairs(modified, added, removed, copy, opts):
2561 def _filepairs(modified, added, removed, copy, opts):
2562 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2562 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2563 before and f2 is the the name after. For added files, f1 will be None,
2563 before and f2 is the the name after. For added files, f1 will be None,
2564 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2564 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2565 or 'rename' (the latter two only if opts.git is set).'''
2565 or 'rename' (the latter two only if opts.git is set).'''
2566 gone = set()
2566 gone = set()
2567
2567
2568 copyto = dict([(v, k) for k, v in copy.items()])
2568 copyto = dict([(v, k) for k, v in copy.items()])
2569
2569
2570 addedset, removedset = set(added), set(removed)
2570 addedset, removedset = set(added), set(removed)
2571
2571
2572 for f in sorted(modified + added + removed):
2572 for f in sorted(modified + added + removed):
2573 copyop = None
2573 copyop = None
2574 f1, f2 = f, f
2574 f1, f2 = f, f
2575 if f in addedset:
2575 if f in addedset:
2576 f1 = None
2576 f1 = None
2577 if f in copy:
2577 if f in copy:
2578 if opts.git:
2578 if opts.git:
2579 f1 = copy[f]
2579 f1 = copy[f]
2580 if f1 in removedset and f1 not in gone:
2580 if f1 in removedset and f1 not in gone:
2581 copyop = 'rename'
2581 copyop = 'rename'
2582 gone.add(f1)
2582 gone.add(f1)
2583 else:
2583 else:
2584 copyop = 'copy'
2584 copyop = 'copy'
2585 elif f in removedset:
2585 elif f in removedset:
2586 f2 = None
2586 f2 = None
2587 if opts.git:
2587 if opts.git:
2588 # have we already reported a copy above?
2588 # have we already reported a copy above?
2589 if (f in copyto and copyto[f] in addedset
2589 if (f in copyto and copyto[f] in addedset
2590 and copy[copyto[f]] == f):
2590 and copy[copyto[f]] == f):
2591 continue
2591 continue
2592 yield f1, f2, copyop
2592 yield f1, f2, copyop
2593
2593
2594 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2594 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2595 copy, getfilectx, opts, losedatafn, prefix, relroot):
2595 copy, getfilectx, opts, losedatafn, prefix, relroot):
2596 '''given input data, generate a diff and yield it in blocks
2596 '''given input data, generate a diff and yield it in blocks
2597
2597
2598 If generating a diff would lose data like flags or binary data and
2598 If generating a diff would lose data like flags or binary data and
2599 losedatafn is not None, it will be called.
2599 losedatafn is not None, it will be called.
2600
2600
2601 relroot is removed and prefix is added to every path in the diff output.
2601 relroot is removed and prefix is added to every path in the diff output.
2602
2602
2603 If relroot is not empty, this function expects every path in modified,
2603 If relroot is not empty, this function expects every path in modified,
2604 added, removed and copy to start with it.'''
2604 added, removed and copy to start with it.'''
2605
2605
2606 def gitindex(text):
2606 def gitindex(text):
2607 if not text:
2607 if not text:
2608 text = ""
2608 text = ""
2609 l = len(text)
2609 l = len(text)
2610 s = hashlib.sha1('blob %d\0' % l)
2610 s = hashlib.sha1('blob %d\0' % l)
2611 s.update(text)
2611 s.update(text)
2612 return hex(s.digest())
2612 return hex(s.digest())
2613
2613
2614 if opts.noprefix:
2614 if opts.noprefix:
2615 aprefix = bprefix = ''
2615 aprefix = bprefix = ''
2616 else:
2616 else:
2617 aprefix = 'a/'
2617 aprefix = 'a/'
2618 bprefix = 'b/'
2618 bprefix = 'b/'
2619
2619
2620 def diffline(f, revs):
2620 def diffline(f, revs):
2621 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2621 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2622 return 'diff %s %s' % (revinfo, f)
2622 return 'diff %s %s' % (revinfo, f)
2623
2623
2624 def isempty(fctx):
2624 def isempty(fctx):
2625 return fctx is None or fctx.size() == 0
2625 return fctx is None or fctx.size() == 0
2626
2626
2627 date1 = dateutil.datestr(ctx1.date())
2627 date1 = dateutil.datestr(ctx1.date())
2628 date2 = dateutil.datestr(ctx2.date())
2628 date2 = dateutil.datestr(ctx2.date())
2629
2629
2630 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2630 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2631
2631
2632 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2632 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2633 or repo.ui.configbool('devel', 'check-relroot')):
2633 or repo.ui.configbool('devel', 'check-relroot')):
2634 for f in modified + added + removed + list(copy) + list(copy.values()):
2634 for f in modified + added + removed + list(copy) + list(copy.values()):
2635 if f is not None and not f.startswith(relroot):
2635 if f is not None and not f.startswith(relroot):
2636 raise AssertionError(
2636 raise AssertionError(
2637 "file %s doesn't start with relroot %s" % (f, relroot))
2637 "file %s doesn't start with relroot %s" % (f, relroot))
2638
2638
2639 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2639 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2640 content1 = None
2640 content1 = None
2641 content2 = None
2641 content2 = None
2642 fctx1 = None
2642 fctx1 = None
2643 fctx2 = None
2643 fctx2 = None
2644 flag1 = None
2644 flag1 = None
2645 flag2 = None
2645 flag2 = None
2646 if f1:
2646 if f1:
2647 fctx1 = getfilectx(f1, ctx1)
2647 fctx1 = getfilectx(f1, ctx1)
2648 if opts.git or losedatafn:
2648 if opts.git or losedatafn:
2649 flag1 = ctx1.flags(f1)
2649 flag1 = ctx1.flags(f1)
2650 if f2:
2650 if f2:
2651 fctx2 = getfilectx(f2, ctx2)
2651 fctx2 = getfilectx(f2, ctx2)
2652 if opts.git or losedatafn:
2652 if opts.git or losedatafn:
2653 flag2 = ctx2.flags(f2)
2653 flag2 = ctx2.flags(f2)
2654 # if binary is True, output "summary" or "base85", but not "text diff"
2654 # if binary is True, output "summary" or "base85", but not "text diff"
2655 if opts.text:
2655 if opts.text:
2656 binary = False
2656 binary = False
2657 else:
2657 else:
2658 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2658 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2659
2659
2660 if losedatafn and not opts.git:
2660 if losedatafn and not opts.git:
2661 if (binary or
2661 if (binary or
2662 # copy/rename
2662 # copy/rename
2663 f2 in copy or
2663 f2 in copy or
2664 # empty file creation
2664 # empty file creation
2665 (not f1 and isempty(fctx2)) or
2665 (not f1 and isempty(fctx2)) or
2666 # empty file deletion
2666 # empty file deletion
2667 (isempty(fctx1) and not f2) or
2667 (isempty(fctx1) and not f2) or
2668 # create with flags
2668 # create with flags
2669 (not f1 and flag2) or
2669 (not f1 and flag2) or
2670 # change flags
2670 # change flags
2671 (f1 and f2 and flag1 != flag2)):
2671 (f1 and f2 and flag1 != flag2)):
2672 losedatafn(f2 or f1)
2672 losedatafn(f2 or f1)
2673
2673
2674 path1 = f1 or f2
2674 path1 = f1 or f2
2675 path2 = f2 or f1
2675 path2 = f2 or f1
2676 path1 = posixpath.join(prefix, path1[len(relroot):])
2676 path1 = posixpath.join(prefix, path1[len(relroot):])
2677 path2 = posixpath.join(prefix, path2[len(relroot):])
2677 path2 = posixpath.join(prefix, path2[len(relroot):])
2678 header = []
2678 header = []
2679 if opts.git:
2679 if opts.git:
2680 header.append('diff --git %s%s %s%s' %
2680 header.append('diff --git %s%s %s%s' %
2681 (aprefix, path1, bprefix, path2))
2681 (aprefix, path1, bprefix, path2))
2682 if not f1: # added
2682 if not f1: # added
2683 header.append('new file mode %s' % gitmode[flag2])
2683 header.append('new file mode %s' % gitmode[flag2])
2684 elif not f2: # removed
2684 elif not f2: # removed
2685 header.append('deleted file mode %s' % gitmode[flag1])
2685 header.append('deleted file mode %s' % gitmode[flag1])
2686 else: # modified/copied/renamed
2686 else: # modified/copied/renamed
2687 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2687 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2688 if mode1 != mode2:
2688 if mode1 != mode2:
2689 header.append('old mode %s' % mode1)
2689 header.append('old mode %s' % mode1)
2690 header.append('new mode %s' % mode2)
2690 header.append('new mode %s' % mode2)
2691 if copyop is not None:
2691 if copyop is not None:
2692 if opts.showsimilarity:
2692 if opts.showsimilarity:
2693 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2693 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2694 header.append('similarity index %d%%' % sim)
2694 header.append('similarity index %d%%' % sim)
2695 header.append('%s from %s' % (copyop, path1))
2695 header.append('%s from %s' % (copyop, path1))
2696 header.append('%s to %s' % (copyop, path2))
2696 header.append('%s to %s' % (copyop, path2))
2697 elif revs and not repo.ui.quiet:
2697 elif revs and not repo.ui.quiet:
2698 header.append(diffline(path1, revs))
2698 header.append(diffline(path1, revs))
2699
2699
2700 # fctx.is | diffopts | what to | is fctx.data()
2700 # fctx.is | diffopts | what to | is fctx.data()
2701 # binary() | text nobinary git index | output? | outputted?
2701 # binary() | text nobinary git index | output? | outputted?
2702 # ------------------------------------|----------------------------
2702 # ------------------------------------|----------------------------
2703 # yes | no no no * | summary | no
2703 # yes | no no no * | summary | no
2704 # yes | no no yes * | base85 | yes
2704 # yes | no no yes * | base85 | yes
2705 # yes | no yes no * | summary | no
2705 # yes | no yes no * | summary | no
2706 # yes | no yes yes 0 | summary | no
2706 # yes | no yes yes 0 | summary | no
2707 # yes | no yes yes >0 | summary | semi [1]
2707 # yes | no yes yes >0 | summary | semi [1]
2708 # yes | yes * * * | text diff | yes
2708 # yes | yes * * * | text diff | yes
2709 # no | * * * * | text diff | yes
2709 # no | * * * * | text diff | yes
2710 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2710 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2711 if binary and (not opts.git or (opts.git and opts.nobinary and not
2711 if binary and (not opts.git or (opts.git and opts.nobinary and not
2712 opts.index)):
2712 opts.index)):
2713 # fast path: no binary content will be displayed, content1 and
2713 # fast path: no binary content will be displayed, content1 and
2714 # content2 are only used for equivalent test. cmp() could have a
2714 # content2 are only used for equivalent test. cmp() could have a
2715 # fast path.
2715 # fast path.
2716 if fctx1 is not None:
2716 if fctx1 is not None:
2717 content1 = b'\0'
2717 content1 = b'\0'
2718 if fctx2 is not None:
2718 if fctx2 is not None:
2719 if fctx1 is not None and not fctx1.cmp(fctx2):
2719 if fctx1 is not None and not fctx1.cmp(fctx2):
2720 content2 = b'\0' # not different
2720 content2 = b'\0' # not different
2721 else:
2721 else:
2722 content2 = b'\0\0'
2722 content2 = b'\0\0'
2723 else:
2723 else:
2724 # normal path: load contents
2724 # normal path: load contents
2725 if fctx1 is not None:
2725 if fctx1 is not None:
2726 content1 = fctx1.data()
2726 content1 = fctx1.data()
2727 if fctx2 is not None:
2727 if fctx2 is not None:
2728 content2 = fctx2.data()
2728 content2 = fctx2.data()
2729
2729
2730 if binary and opts.git and not opts.nobinary:
2730 if binary and opts.git and not opts.nobinary:
2731 text = mdiff.b85diff(content1, content2)
2731 text = mdiff.b85diff(content1, content2)
2732 if text:
2732 if text:
2733 header.append('index %s..%s' %
2733 header.append('index %s..%s' %
2734 (gitindex(content1), gitindex(content2)))
2734 (gitindex(content1), gitindex(content2)))
2735 hunks = (None, [text]),
2735 hunks = (None, [text]),
2736 else:
2736 else:
2737 if opts.git and opts.index > 0:
2737 if opts.git and opts.index > 0:
2738 flag = flag1
2738 flag = flag1
2739 if flag is None:
2739 if flag is None:
2740 flag = flag2
2740 flag = flag2
2741 header.append('index %s..%s %s' %
2741 header.append('index %s..%s %s' %
2742 (gitindex(content1)[0:opts.index],
2742 (gitindex(content1)[0:opts.index],
2743 gitindex(content2)[0:opts.index],
2743 gitindex(content2)[0:opts.index],
2744 gitmode[flag]))
2744 gitmode[flag]))
2745
2745
2746 uheaders, hunks = mdiff.unidiff(content1, date1,
2746 uheaders, hunks = mdiff.unidiff(content1, date1,
2747 content2, date2,
2747 content2, date2,
2748 path1, path2,
2748 path1, path2,
2749 binary=binary, opts=opts)
2749 binary=binary, opts=opts)
2750 header.extend(uheaders)
2750 header.extend(uheaders)
2751 yield fctx1, fctx2, header, hunks
2751 yield fctx1, fctx2, header, hunks
2752
2752
2753 def diffstatsum(stats):
2753 def diffstatsum(stats):
2754 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2754 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2755 for f, a, r, b in stats:
2755 for f, a, r, b in stats:
2756 maxfile = max(maxfile, encoding.colwidth(f))
2756 maxfile = max(maxfile, encoding.colwidth(f))
2757 maxtotal = max(maxtotal, a + r)
2757 maxtotal = max(maxtotal, a + r)
2758 addtotal += a
2758 addtotal += a
2759 removetotal += r
2759 removetotal += r
2760 binary = binary or b
2760 binary = binary or b
2761
2761
2762 return maxfile, maxtotal, addtotal, removetotal, binary
2762 return maxfile, maxtotal, addtotal, removetotal, binary
2763
2763
2764 def diffstatdata(lines):
2764 def diffstatdata(lines):
2765 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2765 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2766
2766
2767 results = []
2767 results = []
2768 filename, adds, removes, isbinary = None, 0, 0, False
2768 filename, adds, removes, isbinary = None, 0, 0, False
2769
2769
2770 def addresult():
2770 def addresult():
2771 if filename:
2771 if filename:
2772 results.append((filename, adds, removes, isbinary))
2772 results.append((filename, adds, removes, isbinary))
2773
2773
2774 # inheader is used to track if a line is in the
2774 # inheader is used to track if a line is in the
2775 # header portion of the diff. This helps properly account
2775 # header portion of the diff. This helps properly account
2776 # for lines that start with '--' or '++'
2776 # for lines that start with '--' or '++'
2777 inheader = False
2777 inheader = False
2778
2778
2779 for line in lines:
2779 for line in lines:
2780 if line.startswith('diff'):
2780 if line.startswith('diff'):
2781 addresult()
2781 addresult()
2782 # starting a new file diff
2782 # starting a new file diff
2783 # set numbers to 0 and reset inheader
2783 # set numbers to 0 and reset inheader
2784 inheader = True
2784 inheader = True
2785 adds, removes, isbinary = 0, 0, False
2785 adds, removes, isbinary = 0, 0, False
2786 if line.startswith('diff --git a/'):
2786 if line.startswith('diff --git a/'):
2787 filename = gitre.search(line).group(2)
2787 filename = gitre.search(line).group(2)
2788 elif line.startswith('diff -r'):
2788 elif line.startswith('diff -r'):
2789 # format: "diff -r ... -r ... filename"
2789 # format: "diff -r ... -r ... filename"
2790 filename = diffre.search(line).group(1)
2790 filename = diffre.search(line).group(1)
2791 elif line.startswith('@@'):
2791 elif line.startswith('@@'):
2792 inheader = False
2792 inheader = False
2793 elif line.startswith('+') and not inheader:
2793 elif line.startswith('+') and not inheader:
2794 adds += 1
2794 adds += 1
2795 elif line.startswith('-') and not inheader:
2795 elif line.startswith('-') and not inheader:
2796 removes += 1
2796 removes += 1
2797 elif (line.startswith('GIT binary patch') or
2797 elif (line.startswith('GIT binary patch') or
2798 line.startswith('Binary file')):
2798 line.startswith('Binary file')):
2799 isbinary = True
2799 isbinary = True
2800 elif line.startswith('rename from'):
2800 elif line.startswith('rename from'):
2801 filename = line[12:]
2801 filename = line[12:]
2802 elif line.startswith('rename to'):
2802 elif line.startswith('rename to'):
2803 filename += ' => %s' % line[10:]
2803 filename += ' => %s' % line[10:]
2804 addresult()
2804 addresult()
2805 return results
2805 return results
2806
2806
2807 def diffstat(lines, width=80):
2807 def diffstat(lines, width=80):
2808 output = []
2808 output = []
2809 stats = diffstatdata(lines)
2809 stats = diffstatdata(lines)
2810 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2810 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2811
2811
2812 countwidth = len(str(maxtotal))
2812 countwidth = len(str(maxtotal))
2813 if hasbinary and countwidth < 3:
2813 if hasbinary and countwidth < 3:
2814 countwidth = 3
2814 countwidth = 3
2815 graphwidth = width - countwidth - maxname - 6
2815 graphwidth = width - countwidth - maxname - 6
2816 if graphwidth < 10:
2816 if graphwidth < 10:
2817 graphwidth = 10
2817 graphwidth = 10
2818
2818
2819 def scale(i):
2819 def scale(i):
2820 if maxtotal <= graphwidth:
2820 if maxtotal <= graphwidth:
2821 return i
2821 return i
2822 # If diffstat runs out of room it doesn't print anything,
2822 # If diffstat runs out of room it doesn't print anything,
2823 # which isn't very useful, so always print at least one + or -
2823 # which isn't very useful, so always print at least one + or -
2824 # if there were at least some changes.
2824 # if there were at least some changes.
2825 return max(i * graphwidth // maxtotal, int(bool(i)))
2825 return max(i * graphwidth // maxtotal, int(bool(i)))
2826
2826
2827 for filename, adds, removes, isbinary in stats:
2827 for filename, adds, removes, isbinary in stats:
2828 if isbinary:
2828 if isbinary:
2829 count = 'Bin'
2829 count = 'Bin'
2830 else:
2830 else:
2831 count = '%d' % (adds + removes)
2831 count = '%d' % (adds + removes)
2832 pluses = '+' * scale(adds)
2832 pluses = '+' * scale(adds)
2833 minuses = '-' * scale(removes)
2833 minuses = '-' * scale(removes)
2834 output.append(' %s%s | %*s %s%s\n' %
2834 output.append(' %s%s | %*s %s%s\n' %
2835 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2835 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2836 countwidth, count, pluses, minuses))
2836 countwidth, count, pluses, minuses))
2837
2837
2838 if stats:
2838 if stats:
2839 output.append(_(' %d files changed, %d insertions(+), '
2839 output.append(_(' %d files changed, %d insertions(+), '
2840 '%d deletions(-)\n')
2840 '%d deletions(-)\n')
2841 % (len(stats), totaladds, totalremoves))
2841 % (len(stats), totaladds, totalremoves))
2842
2842
2843 return ''.join(output)
2843 return ''.join(output)
2844
2844
2845 def diffstatui(*args, **kw):
2845 def diffstatui(*args, **kw):
2846 '''like diffstat(), but yields 2-tuples of (output, label) for
2846 '''like diffstat(), but yields 2-tuples of (output, label) for
2847 ui.write()
2847 ui.write()
2848 '''
2848 '''
2849
2849
2850 for line in diffstat(*args, **kw).splitlines():
2850 for line in diffstat(*args, **kw).splitlines():
2851 if line and line[-1] in '+-':
2851 if line and line[-1] in '+-':
2852 name, graph = line.rsplit(' ', 1)
2852 name, graph = line.rsplit(' ', 1)
2853 yield (name + ' ', '')
2853 yield (name + ' ', '')
2854 m = re.search(br'\++', graph)
2854 m = re.search(br'\++', graph)
2855 if m:
2855 if m:
2856 yield (m.group(0), 'diffstat.inserted')
2856 yield (m.group(0), 'diffstat.inserted')
2857 m = re.search(br'-+', graph)
2857 m = re.search(br'-+', graph)
2858 if m:
2858 if m:
2859 yield (m.group(0), 'diffstat.deleted')
2859 yield (m.group(0), 'diffstat.deleted')
2860 else:
2860 else:
2861 yield (line, '')
2861 yield (line, '')
2862 yield ('\n', '')
2862 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now