##// END OF EJS Templates
diff: also yield file context objects in patch.trydiff() (API)...
Denis Laxalde -
r34856:35c6a54e default
parent child Browse files
Show More
@@ -1,643 +1,643 b''
1 # hgweb/webutil.py - utility library for the web interface.
1 # hgweb/webutil.py - utility library for the web interface.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import copy
11 import copy
12 import difflib
12 import difflib
13 import os
13 import os
14 import re
14 import re
15
15
16 from ..i18n import _
16 from ..i18n import _
17 from ..node import hex, nullid, short
17 from ..node import hex, nullid, short
18
18
19 from .common import (
19 from .common import (
20 ErrorResponse,
20 ErrorResponse,
21 HTTP_BAD_REQUEST,
21 HTTP_BAD_REQUEST,
22 HTTP_NOT_FOUND,
22 HTTP_NOT_FOUND,
23 paritygen,
23 paritygen,
24 )
24 )
25
25
26 from .. import (
26 from .. import (
27 context,
27 context,
28 error,
28 error,
29 match,
29 match,
30 mdiff,
30 mdiff,
31 patch,
31 patch,
32 pathutil,
32 pathutil,
33 pycompat,
33 pycompat,
34 templatefilters,
34 templatefilters,
35 ui as uimod,
35 ui as uimod,
36 util,
36 util,
37 )
37 )
38
38
39 def up(p):
39 def up(p):
40 if p[0] != "/":
40 if p[0] != "/":
41 p = "/" + p
41 p = "/" + p
42 if p[-1] == "/":
42 if p[-1] == "/":
43 p = p[:-1]
43 p = p[:-1]
44 up = os.path.dirname(p)
44 up = os.path.dirname(p)
45 if up == "/":
45 if up == "/":
46 return "/"
46 return "/"
47 return up + "/"
47 return up + "/"
48
48
49 def _navseq(step, firststep=None):
49 def _navseq(step, firststep=None):
50 if firststep:
50 if firststep:
51 yield firststep
51 yield firststep
52 if firststep >= 20 and firststep <= 40:
52 if firststep >= 20 and firststep <= 40:
53 firststep = 50
53 firststep = 50
54 yield firststep
54 yield firststep
55 assert step > 0
55 assert step > 0
56 assert firststep > 0
56 assert firststep > 0
57 while step <= firststep:
57 while step <= firststep:
58 step *= 10
58 step *= 10
59 while True:
59 while True:
60 yield 1 * step
60 yield 1 * step
61 yield 3 * step
61 yield 3 * step
62 step *= 10
62 step *= 10
63
63
64 class revnav(object):
64 class revnav(object):
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 """Navigation generation object
67 """Navigation generation object
68
68
69 :repo: repo object we generate nav for
69 :repo: repo object we generate nav for
70 """
70 """
71 # used for hex generation
71 # used for hex generation
72 self._revlog = repo.changelog
72 self._revlog = repo.changelog
73
73
74 def __nonzero__(self):
74 def __nonzero__(self):
75 """return True if any revision to navigate over"""
75 """return True if any revision to navigate over"""
76 return self._first() is not None
76 return self._first() is not None
77
77
78 __bool__ = __nonzero__
78 __bool__ = __nonzero__
79
79
80 def _first(self):
80 def _first(self):
81 """return the minimum non-filtered changeset or None"""
81 """return the minimum non-filtered changeset or None"""
82 try:
82 try:
83 return next(iter(self._revlog))
83 return next(iter(self._revlog))
84 except StopIteration:
84 except StopIteration:
85 return None
85 return None
86
86
87 def hex(self, rev):
87 def hex(self, rev):
88 return hex(self._revlog.node(rev))
88 return hex(self._revlog.node(rev))
89
89
90 def gen(self, pos, pagelen, limit):
90 def gen(self, pos, pagelen, limit):
91 """computes label and revision id for navigation link
91 """computes label and revision id for navigation link
92
92
93 :pos: is the revision relative to which we generate navigation.
93 :pos: is the revision relative to which we generate navigation.
94 :pagelen: the size of each navigation page
94 :pagelen: the size of each navigation page
95 :limit: how far shall we link
95 :limit: how far shall we link
96
96
97 The return is:
97 The return is:
98 - a single element tuple
98 - a single element tuple
99 - containing a dictionary with a `before` and `after` key
99 - containing a dictionary with a `before` and `after` key
100 - values are generator functions taking arbitrary number of kwargs
100 - values are generator functions taking arbitrary number of kwargs
101 - yield items are dictionaries with `label` and `node` keys
101 - yield items are dictionaries with `label` and `node` keys
102 """
102 """
103 if not self:
103 if not self:
104 # empty repo
104 # empty repo
105 return ({'before': (), 'after': ()},)
105 return ({'before': (), 'after': ()},)
106
106
107 targets = []
107 targets = []
108 for f in _navseq(1, pagelen):
108 for f in _navseq(1, pagelen):
109 if f > limit:
109 if f > limit:
110 break
110 break
111 targets.append(pos + f)
111 targets.append(pos + f)
112 targets.append(pos - f)
112 targets.append(pos - f)
113 targets.sort()
113 targets.sort()
114
114
115 first = self._first()
115 first = self._first()
116 navbefore = [("(%i)" % first, self.hex(first))]
116 navbefore = [("(%i)" % first, self.hex(first))]
117 navafter = []
117 navafter = []
118 for rev in targets:
118 for rev in targets:
119 if rev not in self._revlog:
119 if rev not in self._revlog:
120 continue
120 continue
121 if pos < rev < limit:
121 if pos < rev < limit:
122 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
122 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
123 if 0 < rev < pos:
123 if 0 < rev < pos:
124 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
124 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
125
125
126
126
127 navafter.append(("tip", "tip"))
127 navafter.append(("tip", "tip"))
128
128
129 data = lambda i: {"label": i[0], "node": i[1]}
129 data = lambda i: {"label": i[0], "node": i[1]}
130 return ({'before': lambda **map: (data(i) for i in navbefore),
130 return ({'before': lambda **map: (data(i) for i in navbefore),
131 'after': lambda **map: (data(i) for i in navafter)},)
131 'after': lambda **map: (data(i) for i in navafter)},)
132
132
133 class filerevnav(revnav):
133 class filerevnav(revnav):
134
134
135 def __init__(self, repo, path):
135 def __init__(self, repo, path):
136 """Navigation generation object
136 """Navigation generation object
137
137
138 :repo: repo object we generate nav for
138 :repo: repo object we generate nav for
139 :path: path of the file we generate nav for
139 :path: path of the file we generate nav for
140 """
140 """
141 # used for iteration
141 # used for iteration
142 self._changelog = repo.unfiltered().changelog
142 self._changelog = repo.unfiltered().changelog
143 # used for hex generation
143 # used for hex generation
144 self._revlog = repo.file(path)
144 self._revlog = repo.file(path)
145
145
146 def hex(self, rev):
146 def hex(self, rev):
147 return hex(self._changelog.node(self._revlog.linkrev(rev)))
147 return hex(self._changelog.node(self._revlog.linkrev(rev)))
148
148
149 class _siblings(object):
149 class _siblings(object):
150 def __init__(self, siblings=None, hiderev=None):
150 def __init__(self, siblings=None, hiderev=None):
151 if siblings is None:
151 if siblings is None:
152 siblings = []
152 siblings = []
153 self.siblings = [s for s in siblings if s.node() != nullid]
153 self.siblings = [s for s in siblings if s.node() != nullid]
154 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
154 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
155 self.siblings = []
155 self.siblings = []
156
156
157 def __iter__(self):
157 def __iter__(self):
158 for s in self.siblings:
158 for s in self.siblings:
159 d = {
159 d = {
160 'node': s.hex(),
160 'node': s.hex(),
161 'rev': s.rev(),
161 'rev': s.rev(),
162 'user': s.user(),
162 'user': s.user(),
163 'date': s.date(),
163 'date': s.date(),
164 'description': s.description(),
164 'description': s.description(),
165 'branch': s.branch(),
165 'branch': s.branch(),
166 }
166 }
167 if util.safehasattr(s, 'path'):
167 if util.safehasattr(s, 'path'):
168 d['file'] = s.path()
168 d['file'] = s.path()
169 yield d
169 yield d
170
170
171 def __len__(self):
171 def __len__(self):
172 return len(self.siblings)
172 return len(self.siblings)
173
173
174 def difffeatureopts(req, ui, section):
174 def difffeatureopts(req, ui, section):
175 diffopts = patch.difffeatureopts(ui, untrusted=True,
175 diffopts = patch.difffeatureopts(ui, untrusted=True,
176 section=section, whitespace=True)
176 section=section, whitespace=True)
177
177
178 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
178 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
179 v = req.form.get(k, [None])[0]
179 v = req.form.get(k, [None])[0]
180 if v is not None:
180 if v is not None:
181 v = util.parsebool(v)
181 v = util.parsebool(v)
182 setattr(diffopts, k, v if v is not None else True)
182 setattr(diffopts, k, v if v is not None else True)
183
183
184 return diffopts
184 return diffopts
185
185
186 def annotate(req, fctx, ui):
186 def annotate(req, fctx, ui):
187 diffopts = difffeatureopts(req, ui, 'annotate')
187 diffopts = difffeatureopts(req, ui, 'annotate')
188 return fctx.annotate(follow=True, linenumber=True, diffopts=diffopts)
188 return fctx.annotate(follow=True, linenumber=True, diffopts=diffopts)
189
189
190 def parents(ctx, hide=None):
190 def parents(ctx, hide=None):
191 if isinstance(ctx, context.basefilectx):
191 if isinstance(ctx, context.basefilectx):
192 introrev = ctx.introrev()
192 introrev = ctx.introrev()
193 if ctx.changectx().rev() != introrev:
193 if ctx.changectx().rev() != introrev:
194 return _siblings([ctx.repo()[introrev]], hide)
194 return _siblings([ctx.repo()[introrev]], hide)
195 return _siblings(ctx.parents(), hide)
195 return _siblings(ctx.parents(), hide)
196
196
197 def children(ctx, hide=None):
197 def children(ctx, hide=None):
198 return _siblings(ctx.children(), hide)
198 return _siblings(ctx.children(), hide)
199
199
200 def renamelink(fctx):
200 def renamelink(fctx):
201 r = fctx.renamed()
201 r = fctx.renamed()
202 if r:
202 if r:
203 return [{'file': r[0], 'node': hex(r[1])}]
203 return [{'file': r[0], 'node': hex(r[1])}]
204 return []
204 return []
205
205
206 def nodetagsdict(repo, node):
206 def nodetagsdict(repo, node):
207 return [{"name": i} for i in repo.nodetags(node)]
207 return [{"name": i} for i in repo.nodetags(node)]
208
208
209 def nodebookmarksdict(repo, node):
209 def nodebookmarksdict(repo, node):
210 return [{"name": i} for i in repo.nodebookmarks(node)]
210 return [{"name": i} for i in repo.nodebookmarks(node)]
211
211
212 def nodebranchdict(repo, ctx):
212 def nodebranchdict(repo, ctx):
213 branches = []
213 branches = []
214 branch = ctx.branch()
214 branch = ctx.branch()
215 # If this is an empty repo, ctx.node() == nullid,
215 # If this is an empty repo, ctx.node() == nullid,
216 # ctx.branch() == 'default'.
216 # ctx.branch() == 'default'.
217 try:
217 try:
218 branchnode = repo.branchtip(branch)
218 branchnode = repo.branchtip(branch)
219 except error.RepoLookupError:
219 except error.RepoLookupError:
220 branchnode = None
220 branchnode = None
221 if branchnode == ctx.node():
221 if branchnode == ctx.node():
222 branches.append({"name": branch})
222 branches.append({"name": branch})
223 return branches
223 return branches
224
224
225 def nodeinbranch(repo, ctx):
225 def nodeinbranch(repo, ctx):
226 branches = []
226 branches = []
227 branch = ctx.branch()
227 branch = ctx.branch()
228 try:
228 try:
229 branchnode = repo.branchtip(branch)
229 branchnode = repo.branchtip(branch)
230 except error.RepoLookupError:
230 except error.RepoLookupError:
231 branchnode = None
231 branchnode = None
232 if branch != 'default' and branchnode != ctx.node():
232 if branch != 'default' and branchnode != ctx.node():
233 branches.append({"name": branch})
233 branches.append({"name": branch})
234 return branches
234 return branches
235
235
236 def nodebranchnodefault(ctx):
236 def nodebranchnodefault(ctx):
237 branches = []
237 branches = []
238 branch = ctx.branch()
238 branch = ctx.branch()
239 if branch != 'default':
239 if branch != 'default':
240 branches.append({"name": branch})
240 branches.append({"name": branch})
241 return branches
241 return branches
242
242
243 def showtag(repo, tmpl, t1, node=nullid, **args):
243 def showtag(repo, tmpl, t1, node=nullid, **args):
244 for t in repo.nodetags(node):
244 for t in repo.nodetags(node):
245 yield tmpl(t1, tag=t, **args)
245 yield tmpl(t1, tag=t, **args)
246
246
247 def showbookmark(repo, tmpl, t1, node=nullid, **args):
247 def showbookmark(repo, tmpl, t1, node=nullid, **args):
248 for t in repo.nodebookmarks(node):
248 for t in repo.nodebookmarks(node):
249 yield tmpl(t1, bookmark=t, **args)
249 yield tmpl(t1, bookmark=t, **args)
250
250
251 def branchentries(repo, stripecount, limit=0):
251 def branchentries(repo, stripecount, limit=0):
252 tips = []
252 tips = []
253 heads = repo.heads()
253 heads = repo.heads()
254 parity = paritygen(stripecount)
254 parity = paritygen(stripecount)
255 sortkey = lambda item: (not item[1], item[0].rev())
255 sortkey = lambda item: (not item[1], item[0].rev())
256
256
257 def entries(**map):
257 def entries(**map):
258 count = 0
258 count = 0
259 if not tips:
259 if not tips:
260 for tag, hs, tip, closed in repo.branchmap().iterbranches():
260 for tag, hs, tip, closed in repo.branchmap().iterbranches():
261 tips.append((repo[tip], closed))
261 tips.append((repo[tip], closed))
262 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
262 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
263 if limit > 0 and count >= limit:
263 if limit > 0 and count >= limit:
264 return
264 return
265 count += 1
265 count += 1
266 if closed:
266 if closed:
267 status = 'closed'
267 status = 'closed'
268 elif ctx.node() not in heads:
268 elif ctx.node() not in heads:
269 status = 'inactive'
269 status = 'inactive'
270 else:
270 else:
271 status = 'open'
271 status = 'open'
272 yield {
272 yield {
273 'parity': next(parity),
273 'parity': next(parity),
274 'branch': ctx.branch(),
274 'branch': ctx.branch(),
275 'status': status,
275 'status': status,
276 'node': ctx.hex(),
276 'node': ctx.hex(),
277 'date': ctx.date()
277 'date': ctx.date()
278 }
278 }
279
279
280 return entries
280 return entries
281
281
282 def cleanpath(repo, path):
282 def cleanpath(repo, path):
283 path = path.lstrip('/')
283 path = path.lstrip('/')
284 return pathutil.canonpath(repo.root, '', path)
284 return pathutil.canonpath(repo.root, '', path)
285
285
286 def changeidctx(repo, changeid):
286 def changeidctx(repo, changeid):
287 try:
287 try:
288 ctx = repo[changeid]
288 ctx = repo[changeid]
289 except error.RepoError:
289 except error.RepoError:
290 man = repo.manifestlog._revlog
290 man = repo.manifestlog._revlog
291 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
291 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
292
292
293 return ctx
293 return ctx
294
294
295 def changectx(repo, req):
295 def changectx(repo, req):
296 changeid = "tip"
296 changeid = "tip"
297 if 'node' in req.form:
297 if 'node' in req.form:
298 changeid = req.form['node'][0]
298 changeid = req.form['node'][0]
299 ipos = changeid.find(':')
299 ipos = changeid.find(':')
300 if ipos != -1:
300 if ipos != -1:
301 changeid = changeid[(ipos + 1):]
301 changeid = changeid[(ipos + 1):]
302 elif 'manifest' in req.form:
302 elif 'manifest' in req.form:
303 changeid = req.form['manifest'][0]
303 changeid = req.form['manifest'][0]
304
304
305 return changeidctx(repo, changeid)
305 return changeidctx(repo, changeid)
306
306
307 def basechangectx(repo, req):
307 def basechangectx(repo, req):
308 if 'node' in req.form:
308 if 'node' in req.form:
309 changeid = req.form['node'][0]
309 changeid = req.form['node'][0]
310 ipos = changeid.find(':')
310 ipos = changeid.find(':')
311 if ipos != -1:
311 if ipos != -1:
312 changeid = changeid[:ipos]
312 changeid = changeid[:ipos]
313 return changeidctx(repo, changeid)
313 return changeidctx(repo, changeid)
314
314
315 return None
315 return None
316
316
317 def filectx(repo, req):
317 def filectx(repo, req):
318 if 'file' not in req.form:
318 if 'file' not in req.form:
319 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
319 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
320 path = cleanpath(repo, req.form['file'][0])
320 path = cleanpath(repo, req.form['file'][0])
321 if 'node' in req.form:
321 if 'node' in req.form:
322 changeid = req.form['node'][0]
322 changeid = req.form['node'][0]
323 elif 'filenode' in req.form:
323 elif 'filenode' in req.form:
324 changeid = req.form['filenode'][0]
324 changeid = req.form['filenode'][0]
325 else:
325 else:
326 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
326 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
327 try:
327 try:
328 fctx = repo[changeid][path]
328 fctx = repo[changeid][path]
329 except error.RepoError:
329 except error.RepoError:
330 fctx = repo.filectx(path, fileid=changeid)
330 fctx = repo.filectx(path, fileid=changeid)
331
331
332 return fctx
332 return fctx
333
333
334 def linerange(req):
334 def linerange(req):
335 linerange = req.form.get('linerange')
335 linerange = req.form.get('linerange')
336 if linerange is None:
336 if linerange is None:
337 return None
337 return None
338 if len(linerange) > 1:
338 if len(linerange) > 1:
339 raise ErrorResponse(HTTP_BAD_REQUEST,
339 raise ErrorResponse(HTTP_BAD_REQUEST,
340 'redundant linerange parameter')
340 'redundant linerange parameter')
341 try:
341 try:
342 fromline, toline = map(int, linerange[0].split(':', 1))
342 fromline, toline = map(int, linerange[0].split(':', 1))
343 except ValueError:
343 except ValueError:
344 raise ErrorResponse(HTTP_BAD_REQUEST,
344 raise ErrorResponse(HTTP_BAD_REQUEST,
345 'invalid linerange parameter')
345 'invalid linerange parameter')
346 try:
346 try:
347 return util.processlinerange(fromline, toline)
347 return util.processlinerange(fromline, toline)
348 except error.ParseError as exc:
348 except error.ParseError as exc:
349 raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
349 raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
350
350
351 def formatlinerange(fromline, toline):
351 def formatlinerange(fromline, toline):
352 return '%d:%d' % (fromline + 1, toline)
352 return '%d:%d' % (fromline + 1, toline)
353
353
354 def commonentry(repo, ctx):
354 def commonentry(repo, ctx):
355 node = ctx.node()
355 node = ctx.node()
356 return {
356 return {
357 'rev': ctx.rev(),
357 'rev': ctx.rev(),
358 'node': hex(node),
358 'node': hex(node),
359 'author': ctx.user(),
359 'author': ctx.user(),
360 'desc': ctx.description(),
360 'desc': ctx.description(),
361 'date': ctx.date(),
361 'date': ctx.date(),
362 'extra': ctx.extra(),
362 'extra': ctx.extra(),
363 'phase': ctx.phasestr(),
363 'phase': ctx.phasestr(),
364 'branch': nodebranchnodefault(ctx),
364 'branch': nodebranchnodefault(ctx),
365 'inbranch': nodeinbranch(repo, ctx),
365 'inbranch': nodeinbranch(repo, ctx),
366 'branches': nodebranchdict(repo, ctx),
366 'branches': nodebranchdict(repo, ctx),
367 'tags': nodetagsdict(repo, node),
367 'tags': nodetagsdict(repo, node),
368 'bookmarks': nodebookmarksdict(repo, node),
368 'bookmarks': nodebookmarksdict(repo, node),
369 'parent': lambda **x: parents(ctx),
369 'parent': lambda **x: parents(ctx),
370 'child': lambda **x: children(ctx),
370 'child': lambda **x: children(ctx),
371 }
371 }
372
372
373 def changelistentry(web, ctx, tmpl):
373 def changelistentry(web, ctx, tmpl):
374 '''Obtain a dictionary to be used for entries in a changelist.
374 '''Obtain a dictionary to be used for entries in a changelist.
375
375
376 This function is called when producing items for the "entries" list passed
376 This function is called when producing items for the "entries" list passed
377 to the "shortlog" and "changelog" templates.
377 to the "shortlog" and "changelog" templates.
378 '''
378 '''
379 repo = web.repo
379 repo = web.repo
380 rev = ctx.rev()
380 rev = ctx.rev()
381 n = ctx.node()
381 n = ctx.node()
382 showtags = showtag(repo, tmpl, 'changelogtag', n)
382 showtags = showtag(repo, tmpl, 'changelogtag', n)
383 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
383 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
384
384
385 entry = commonentry(repo, ctx)
385 entry = commonentry(repo, ctx)
386 entry.update(
386 entry.update(
387 allparents=lambda **x: parents(ctx),
387 allparents=lambda **x: parents(ctx),
388 parent=lambda **x: parents(ctx, rev - 1),
388 parent=lambda **x: parents(ctx, rev - 1),
389 child=lambda **x: children(ctx, rev + 1),
389 child=lambda **x: children(ctx, rev + 1),
390 changelogtag=showtags,
390 changelogtag=showtags,
391 files=files,
391 files=files,
392 )
392 )
393 return entry
393 return entry
394
394
395 def symrevorshortnode(req, ctx):
395 def symrevorshortnode(req, ctx):
396 if 'node' in req.form:
396 if 'node' in req.form:
397 return templatefilters.revescape(req.form['node'][0])
397 return templatefilters.revescape(req.form['node'][0])
398 else:
398 else:
399 return short(ctx.node())
399 return short(ctx.node())
400
400
401 def changesetentry(web, req, tmpl, ctx):
401 def changesetentry(web, req, tmpl, ctx):
402 '''Obtain a dictionary to be used to render the "changeset" template.'''
402 '''Obtain a dictionary to be used to render the "changeset" template.'''
403
403
404 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
404 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
405 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
405 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
406 ctx.node())
406 ctx.node())
407 showbranch = nodebranchnodefault(ctx)
407 showbranch = nodebranchnodefault(ctx)
408
408
409 files = []
409 files = []
410 parity = paritygen(web.stripecount)
410 parity = paritygen(web.stripecount)
411 for blockno, f in enumerate(ctx.files()):
411 for blockno, f in enumerate(ctx.files()):
412 template = f in ctx and 'filenodelink' or 'filenolink'
412 template = f in ctx and 'filenodelink' or 'filenolink'
413 files.append(tmpl(template,
413 files.append(tmpl(template,
414 node=ctx.hex(), file=f, blockno=blockno + 1,
414 node=ctx.hex(), file=f, blockno=blockno + 1,
415 parity=next(parity)))
415 parity=next(parity)))
416
416
417 basectx = basechangectx(web.repo, req)
417 basectx = basechangectx(web.repo, req)
418 if basectx is None:
418 if basectx is None:
419 basectx = ctx.p1()
419 basectx = ctx.p1()
420
420
421 style = web.config('web', 'style')
421 style = web.config('web', 'style')
422 if 'style' in req.form:
422 if 'style' in req.form:
423 style = req.form['style'][0]
423 style = req.form['style'][0]
424
424
425 diff = diffs(web, tmpl, ctx, basectx, None, style)
425 diff = diffs(web, tmpl, ctx, basectx, None, style)
426
426
427 parity = paritygen(web.stripecount)
427 parity = paritygen(web.stripecount)
428 diffstatsgen = diffstatgen(ctx, basectx)
428 diffstatsgen = diffstatgen(ctx, basectx)
429 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
429 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
430
430
431 return dict(
431 return dict(
432 diff=diff,
432 diff=diff,
433 symrev=symrevorshortnode(req, ctx),
433 symrev=symrevorshortnode(req, ctx),
434 basenode=basectx.hex(),
434 basenode=basectx.hex(),
435 changesettag=showtags,
435 changesettag=showtags,
436 changesetbookmark=showbookmarks,
436 changesetbookmark=showbookmarks,
437 changesetbranch=showbranch,
437 changesetbranch=showbranch,
438 files=files,
438 files=files,
439 diffsummary=lambda **x: diffsummary(diffstatsgen),
439 diffsummary=lambda **x: diffsummary(diffstatsgen),
440 diffstat=diffstats,
440 diffstat=diffstats,
441 archives=web.archivelist(ctx.hex()),
441 archives=web.archivelist(ctx.hex()),
442 **commonentry(web.repo, ctx))
442 **commonentry(web.repo, ctx))
443
443
444 def listfilediffs(tmpl, files, node, max):
444 def listfilediffs(tmpl, files, node, max):
445 for f in files[:max]:
445 for f in files[:max]:
446 yield tmpl('filedifflink', node=hex(node), file=f)
446 yield tmpl('filedifflink', node=hex(node), file=f)
447 if len(files) > max:
447 if len(files) > max:
448 yield tmpl('fileellipses')
448 yield tmpl('fileellipses')
449
449
450 def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
450 def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
451 lineidprefix=''):
451 lineidprefix=''):
452
452
453 def prettyprintlines(lines, blockno):
453 def prettyprintlines(lines, blockno):
454 for lineno, l in enumerate(lines, 1):
454 for lineno, l in enumerate(lines, 1):
455 difflineno = "%d.%d" % (blockno, lineno)
455 difflineno = "%d.%d" % (blockno, lineno)
456 if l.startswith('+'):
456 if l.startswith('+'):
457 ltype = "difflineplus"
457 ltype = "difflineplus"
458 elif l.startswith('-'):
458 elif l.startswith('-'):
459 ltype = "difflineminus"
459 ltype = "difflineminus"
460 elif l.startswith('@'):
460 elif l.startswith('@'):
461 ltype = "difflineat"
461 ltype = "difflineat"
462 else:
462 else:
463 ltype = "diffline"
463 ltype = "diffline"
464 yield tmpl(ltype,
464 yield tmpl(ltype,
465 line=l,
465 line=l,
466 lineno=lineno,
466 lineno=lineno,
467 lineid=lineidprefix + "l%s" % difflineno,
467 lineid=lineidprefix + "l%s" % difflineno,
468 linenumber="% 8s" % difflineno)
468 linenumber="% 8s" % difflineno)
469
469
470 repo = web.repo
470 repo = web.repo
471 if files:
471 if files:
472 m = match.exact(repo.root, repo.getcwd(), files)
472 m = match.exact(repo.root, repo.getcwd(), files)
473 else:
473 else:
474 m = match.always(repo.root, repo.getcwd())
474 m = match.always(repo.root, repo.getcwd())
475
475
476 diffopts = patch.diffopts(repo.ui, untrusted=True)
476 diffopts = patch.diffopts(repo.ui, untrusted=True)
477 node1 = basectx.node()
477 node1 = basectx.node()
478 node2 = ctx.node()
478 node2 = ctx.node()
479 parity = paritygen(web.stripecount)
479 parity = paritygen(web.stripecount)
480
480
481 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
481 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
482 for blockno, (header, hunks) in enumerate(diffhunks, 1):
482 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
483 if style != 'raw':
483 if style != 'raw':
484 header = header[1:]
484 header = header[1:]
485 lines = [h + '\n' for h in header]
485 lines = [h + '\n' for h in header]
486 for hunkrange, hunklines in hunks:
486 for hunkrange, hunklines in hunks:
487 if linerange is not None and hunkrange is not None:
487 if linerange is not None and hunkrange is not None:
488 s1, l1, s2, l2 = hunkrange
488 s1, l1, s2, l2 = hunkrange
489 if not mdiff.hunkinrange((s2, l2), linerange):
489 if not mdiff.hunkinrange((s2, l2), linerange):
490 continue
490 continue
491 lines.extend(hunklines)
491 lines.extend(hunklines)
492 if lines:
492 if lines:
493 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
493 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
494 lines=prettyprintlines(lines, blockno))
494 lines=prettyprintlines(lines, blockno))
495
495
496 def compare(tmpl, context, leftlines, rightlines):
496 def compare(tmpl, context, leftlines, rightlines):
497 '''Generator function that provides side-by-side comparison data.'''
497 '''Generator function that provides side-by-side comparison data.'''
498
498
499 def compline(type, leftlineno, leftline, rightlineno, rightline):
499 def compline(type, leftlineno, leftline, rightlineno, rightline):
500 lineid = leftlineno and ("l%s" % leftlineno) or ''
500 lineid = leftlineno and ("l%s" % leftlineno) or ''
501 lineid += rightlineno and ("r%s" % rightlineno) or ''
501 lineid += rightlineno and ("r%s" % rightlineno) or ''
502 return tmpl('comparisonline',
502 return tmpl('comparisonline',
503 type=type,
503 type=type,
504 lineid=lineid,
504 lineid=lineid,
505 leftlineno=leftlineno,
505 leftlineno=leftlineno,
506 leftlinenumber="% 6s" % (leftlineno or ''),
506 leftlinenumber="% 6s" % (leftlineno or ''),
507 leftline=leftline or '',
507 leftline=leftline or '',
508 rightlineno=rightlineno,
508 rightlineno=rightlineno,
509 rightlinenumber="% 6s" % (rightlineno or ''),
509 rightlinenumber="% 6s" % (rightlineno or ''),
510 rightline=rightline or '')
510 rightline=rightline or '')
511
511
512 def getblock(opcodes):
512 def getblock(opcodes):
513 for type, llo, lhi, rlo, rhi in opcodes:
513 for type, llo, lhi, rlo, rhi in opcodes:
514 len1 = lhi - llo
514 len1 = lhi - llo
515 len2 = rhi - rlo
515 len2 = rhi - rlo
516 count = min(len1, len2)
516 count = min(len1, len2)
517 for i in xrange(count):
517 for i in xrange(count):
518 yield compline(type=type,
518 yield compline(type=type,
519 leftlineno=llo + i + 1,
519 leftlineno=llo + i + 1,
520 leftline=leftlines[llo + i],
520 leftline=leftlines[llo + i],
521 rightlineno=rlo + i + 1,
521 rightlineno=rlo + i + 1,
522 rightline=rightlines[rlo + i])
522 rightline=rightlines[rlo + i])
523 if len1 > len2:
523 if len1 > len2:
524 for i in xrange(llo + count, lhi):
524 for i in xrange(llo + count, lhi):
525 yield compline(type=type,
525 yield compline(type=type,
526 leftlineno=i + 1,
526 leftlineno=i + 1,
527 leftline=leftlines[i],
527 leftline=leftlines[i],
528 rightlineno=None,
528 rightlineno=None,
529 rightline=None)
529 rightline=None)
530 elif len2 > len1:
530 elif len2 > len1:
531 for i in xrange(rlo + count, rhi):
531 for i in xrange(rlo + count, rhi):
532 yield compline(type=type,
532 yield compline(type=type,
533 leftlineno=None,
533 leftlineno=None,
534 leftline=None,
534 leftline=None,
535 rightlineno=i + 1,
535 rightlineno=i + 1,
536 rightline=rightlines[i])
536 rightline=rightlines[i])
537
537
538 s = difflib.SequenceMatcher(None, leftlines, rightlines)
538 s = difflib.SequenceMatcher(None, leftlines, rightlines)
539 if context < 0:
539 if context < 0:
540 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
540 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
541 else:
541 else:
542 for oc in s.get_grouped_opcodes(n=context):
542 for oc in s.get_grouped_opcodes(n=context):
543 yield tmpl('comparisonblock', lines=getblock(oc))
543 yield tmpl('comparisonblock', lines=getblock(oc))
544
544
545 def diffstatgen(ctx, basectx):
545 def diffstatgen(ctx, basectx):
546 '''Generator function that provides the diffstat data.'''
546 '''Generator function that provides the diffstat data.'''
547
547
548 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
548 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
549 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
549 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
550 while True:
550 while True:
551 yield stats, maxname, maxtotal, addtotal, removetotal, binary
551 yield stats, maxname, maxtotal, addtotal, removetotal, binary
552
552
553 def diffsummary(statgen):
553 def diffsummary(statgen):
554 '''Return a short summary of the diff.'''
554 '''Return a short summary of the diff.'''
555
555
556 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
556 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
557 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
557 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
558 len(stats), addtotal, removetotal)
558 len(stats), addtotal, removetotal)
559
559
560 def diffstat(tmpl, ctx, statgen, parity):
560 def diffstat(tmpl, ctx, statgen, parity):
561 '''Return a diffstat template for each file in the diff.'''
561 '''Return a diffstat template for each file in the diff.'''
562
562
563 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
563 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
564 files = ctx.files()
564 files = ctx.files()
565
565
566 def pct(i):
566 def pct(i):
567 if maxtotal == 0:
567 if maxtotal == 0:
568 return 0
568 return 0
569 return (float(i) / maxtotal) * 100
569 return (float(i) / maxtotal) * 100
570
570
571 fileno = 0
571 fileno = 0
572 for filename, adds, removes, isbinary in stats:
572 for filename, adds, removes, isbinary in stats:
573 template = filename in files and 'diffstatlink' or 'diffstatnolink'
573 template = filename in files and 'diffstatlink' or 'diffstatnolink'
574 total = adds + removes
574 total = adds + removes
575 fileno += 1
575 fileno += 1
576 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
576 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
577 total=total, addpct=pct(adds), removepct=pct(removes),
577 total=total, addpct=pct(adds), removepct=pct(removes),
578 parity=next(parity))
578 parity=next(parity))
579
579
580 class sessionvars(object):
580 class sessionvars(object):
581 def __init__(self, vars, start='?'):
581 def __init__(self, vars, start='?'):
582 self.start = start
582 self.start = start
583 self.vars = vars
583 self.vars = vars
584 def __getitem__(self, key):
584 def __getitem__(self, key):
585 return self.vars[key]
585 return self.vars[key]
586 def __setitem__(self, key, value):
586 def __setitem__(self, key, value):
587 self.vars[key] = value
587 self.vars[key] = value
588 def __copy__(self):
588 def __copy__(self):
589 return sessionvars(copy.copy(self.vars), self.start)
589 return sessionvars(copy.copy(self.vars), self.start)
590 def __iter__(self):
590 def __iter__(self):
591 separator = self.start
591 separator = self.start
592 for key, value in sorted(self.vars.iteritems()):
592 for key, value in sorted(self.vars.iteritems()):
593 yield {'name': key,
593 yield {'name': key,
594 'value': pycompat.bytestr(value),
594 'value': pycompat.bytestr(value),
595 'separator': separator,
595 'separator': separator,
596 }
596 }
597 separator = '&'
597 separator = '&'
598
598
599 class wsgiui(uimod.ui):
599 class wsgiui(uimod.ui):
600 # default termwidth breaks under mod_wsgi
600 # default termwidth breaks under mod_wsgi
601 def termwidth(self):
601 def termwidth(self):
602 return 80
602 return 80
603
603
604 def getwebsubs(repo):
604 def getwebsubs(repo):
605 websubtable = []
605 websubtable = []
606 websubdefs = repo.ui.configitems('websub')
606 websubdefs = repo.ui.configitems('websub')
607 # we must maintain interhg backwards compatibility
607 # we must maintain interhg backwards compatibility
608 websubdefs += repo.ui.configitems('interhg')
608 websubdefs += repo.ui.configitems('interhg')
609 for key, pattern in websubdefs:
609 for key, pattern in websubdefs:
610 # grab the delimiter from the character after the "s"
610 # grab the delimiter from the character after the "s"
611 unesc = pattern[1]
611 unesc = pattern[1]
612 delim = re.escape(unesc)
612 delim = re.escape(unesc)
613
613
614 # identify portions of the pattern, taking care to avoid escaped
614 # identify portions of the pattern, taking care to avoid escaped
615 # delimiters. the replace format and flags are optional, but
615 # delimiters. the replace format and flags are optional, but
616 # delimiters are required.
616 # delimiters are required.
617 match = re.match(
617 match = re.match(
618 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
618 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
619 % (delim, delim, delim), pattern)
619 % (delim, delim, delim), pattern)
620 if not match:
620 if not match:
621 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
621 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
622 % (key, pattern))
622 % (key, pattern))
623 continue
623 continue
624
624
625 # we need to unescape the delimiter for regexp and format
625 # we need to unescape the delimiter for regexp and format
626 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
626 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
627 regexp = delim_re.sub(unesc, match.group(1))
627 regexp = delim_re.sub(unesc, match.group(1))
628 format = delim_re.sub(unesc, match.group(2))
628 format = delim_re.sub(unesc, match.group(2))
629
629
630 # the pattern allows for 6 regexp flags, so set them if necessary
630 # the pattern allows for 6 regexp flags, so set them if necessary
631 flagin = match.group(3)
631 flagin = match.group(3)
632 flags = 0
632 flags = 0
633 if flagin:
633 if flagin:
634 for flag in flagin.upper():
634 for flag in flagin.upper():
635 flags |= re.__dict__[flag]
635 flags |= re.__dict__[flag]
636
636
637 try:
637 try:
638 regexp = re.compile(regexp, flags)
638 regexp = re.compile(regexp, flags)
639 websubtable.append((regexp, format))
639 websubtable.append((regexp, format))
640 except re.error:
640 except re.error:
641 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
641 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
642 % (key, regexp))
642 % (key, regexp))
643 return websubtable
643 return websubtable
@@ -1,2797 +1,2798 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 PatchError = error.PatchError
49 PatchError = error.PatchError
50
50
51 # public functions
51 # public functions
52
52
53 def split(stream):
53 def split(stream):
54 '''return an iterator of individual patches from a stream'''
54 '''return an iterator of individual patches from a stream'''
55 def isheader(line, inheader):
55 def isheader(line, inheader):
56 if inheader and line[0] in (' ', '\t'):
56 if inheader and line[0] in (' ', '\t'):
57 # continuation
57 # continuation
58 return True
58 return True
59 if line[0] in (' ', '-', '+'):
59 if line[0] in (' ', '-', '+'):
60 # diff line - don't check for header pattern in there
60 # diff line - don't check for header pattern in there
61 return False
61 return False
62 l = line.split(': ', 1)
62 l = line.split(': ', 1)
63 return len(l) == 2 and ' ' not in l[0]
63 return len(l) == 2 and ' ' not in l[0]
64
64
65 def chunk(lines):
65 def chunk(lines):
66 return stringio(''.join(lines))
66 return stringio(''.join(lines))
67
67
68 def hgsplit(stream, cur):
68 def hgsplit(stream, cur):
69 inheader = True
69 inheader = True
70
70
71 for line in stream:
71 for line in stream:
72 if not line.strip():
72 if not line.strip():
73 inheader = False
73 inheader = False
74 if not inheader and line.startswith('# HG changeset patch'):
74 if not inheader and line.startswith('# HG changeset patch'):
75 yield chunk(cur)
75 yield chunk(cur)
76 cur = []
76 cur = []
77 inheader = True
77 inheader = True
78
78
79 cur.append(line)
79 cur.append(line)
80
80
81 if cur:
81 if cur:
82 yield chunk(cur)
82 yield chunk(cur)
83
83
84 def mboxsplit(stream, cur):
84 def mboxsplit(stream, cur):
85 for line in stream:
85 for line in stream:
86 if line.startswith('From '):
86 if line.startswith('From '):
87 for c in split(chunk(cur[1:])):
87 for c in split(chunk(cur[1:])):
88 yield c
88 yield c
89 cur = []
89 cur = []
90
90
91 cur.append(line)
91 cur.append(line)
92
92
93 if cur:
93 if cur:
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96
96
97 def mimesplit(stream, cur):
97 def mimesplit(stream, cur):
98 def msgfp(m):
98 def msgfp(m):
99 fp = stringio()
99 fp = stringio()
100 g = email.Generator.Generator(fp, mangle_from_=False)
100 g = email.Generator.Generator(fp, mangle_from_=False)
101 g.flatten(m)
101 g.flatten(m)
102 fp.seek(0)
102 fp.seek(0)
103 return fp
103 return fp
104
104
105 for line in stream:
105 for line in stream:
106 cur.append(line)
106 cur.append(line)
107 c = chunk(cur)
107 c = chunk(cur)
108
108
109 m = email.Parser.Parser().parse(c)
109 m = email.Parser.Parser().parse(c)
110 if not m.is_multipart():
110 if not m.is_multipart():
111 yield msgfp(m)
111 yield msgfp(m)
112 else:
112 else:
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 for part in m.walk():
114 for part in m.walk():
115 ct = part.get_content_type()
115 ct = part.get_content_type()
116 if ct not in ok_types:
116 if ct not in ok_types:
117 continue
117 continue
118 yield msgfp(part)
118 yield msgfp(part)
119
119
120 def headersplit(stream, cur):
120 def headersplit(stream, cur):
121 inheader = False
121 inheader = False
122
122
123 for line in stream:
123 for line in stream:
124 if not inheader and isheader(line, inheader):
124 if not inheader and isheader(line, inheader):
125 yield chunk(cur)
125 yield chunk(cur)
126 cur = []
126 cur = []
127 inheader = True
127 inheader = True
128 if inheader and not isheader(line, inheader):
128 if inheader and not isheader(line, inheader):
129 inheader = False
129 inheader = False
130
130
131 cur.append(line)
131 cur.append(line)
132
132
133 if cur:
133 if cur:
134 yield chunk(cur)
134 yield chunk(cur)
135
135
136 def remainder(cur):
136 def remainder(cur):
137 yield chunk(cur)
137 yield chunk(cur)
138
138
139 class fiter(object):
139 class fiter(object):
140 def __init__(self, fp):
140 def __init__(self, fp):
141 self.fp = fp
141 self.fp = fp
142
142
143 def __iter__(self):
143 def __iter__(self):
144 return self
144 return self
145
145
146 def next(self):
146 def next(self):
147 l = self.fp.readline()
147 l = self.fp.readline()
148 if not l:
148 if not l:
149 raise StopIteration
149 raise StopIteration
150 return l
150 return l
151
151
152 inheader = False
152 inheader = False
153 cur = []
153 cur = []
154
154
155 mimeheaders = ['content-type']
155 mimeheaders = ['content-type']
156
156
157 if not util.safehasattr(stream, 'next'):
157 if not util.safehasattr(stream, 'next'):
158 # http responses, for example, have readline but not next
158 # http responses, for example, have readline but not next
159 stream = fiter(stream)
159 stream = fiter(stream)
160
160
161 for line in stream:
161 for line in stream:
162 cur.append(line)
162 cur.append(line)
163 if line.startswith('# HG changeset patch'):
163 if line.startswith('# HG changeset patch'):
164 return hgsplit(stream, cur)
164 return hgsplit(stream, cur)
165 elif line.startswith('From '):
165 elif line.startswith('From '):
166 return mboxsplit(stream, cur)
166 return mboxsplit(stream, cur)
167 elif isheader(line, inheader):
167 elif isheader(line, inheader):
168 inheader = True
168 inheader = True
169 if line.split(':', 1)[0].lower() in mimeheaders:
169 if line.split(':', 1)[0].lower() in mimeheaders:
170 # let email parser handle this
170 # let email parser handle this
171 return mimesplit(stream, cur)
171 return mimesplit(stream, cur)
172 elif line.startswith('--- ') and inheader:
172 elif line.startswith('--- ') and inheader:
173 # No evil headers seen by diff start, split by hand
173 # No evil headers seen by diff start, split by hand
174 return headersplit(stream, cur)
174 return headersplit(stream, cur)
175 # Not enough info, keep reading
175 # Not enough info, keep reading
176
176
177 # if we are here, we have a very plain patch
177 # if we are here, we have a very plain patch
178 return remainder(cur)
178 return remainder(cur)
179
179
180 ## Some facility for extensible patch parsing:
180 ## Some facility for extensible patch parsing:
181 # list of pairs ("header to match", "data key")
181 # list of pairs ("header to match", "data key")
182 patchheadermap = [('Date', 'date'),
182 patchheadermap = [('Date', 'date'),
183 ('Branch', 'branch'),
183 ('Branch', 'branch'),
184 ('Node ID', 'nodeid'),
184 ('Node ID', 'nodeid'),
185 ]
185 ]
186
186
187 def extract(ui, fileobj):
187 def extract(ui, fileobj):
188 '''extract patch from data read from fileobj.
188 '''extract patch from data read from fileobj.
189
189
190 patch can be a normal patch or contained in an email message.
190 patch can be a normal patch or contained in an email message.
191
191
192 return a dictionary. Standard keys are:
192 return a dictionary. Standard keys are:
193 - filename,
193 - filename,
194 - message,
194 - message,
195 - user,
195 - user,
196 - date,
196 - date,
197 - branch,
197 - branch,
198 - node,
198 - node,
199 - p1,
199 - p1,
200 - p2.
200 - p2.
201 Any item can be missing from the dictionary. If filename is missing,
201 Any item can be missing from the dictionary. If filename is missing,
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
203
203
204 # attempt to detect the start of a patch
204 # attempt to detect the start of a patch
205 # (this heuristic is borrowed from quilt)
205 # (this heuristic is borrowed from quilt)
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
209 br'\*\*\*[ \t].*?^---[ \t])',
209 br'\*\*\*[ \t].*?^---[ \t])',
210 re.MULTILINE | re.DOTALL)
210 re.MULTILINE | re.DOTALL)
211
211
212 data = {}
212 data = {}
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 try:
215 try:
216 msg = email.Parser.Parser().parse(fileobj)
216 msg = email.Parser.Parser().parse(fileobj)
217
217
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 if not subject and not data['user']:
220 if not subject and not data['user']:
221 # Not an email, restore parsed headers if any
221 # Not an email, restore parsed headers if any
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223
223
224 # should try to parse msg['Date']
224 # should try to parse msg['Date']
225 parents = []
225 parents = []
226
226
227 if subject:
227 if subject:
228 if subject.startswith('[PATCH'):
228 if subject.startswith('[PATCH'):
229 pend = subject.find(']')
229 pend = subject.find(']')
230 if pend >= 0:
230 if pend >= 0:
231 subject = subject[pend + 1:].lstrip()
231 subject = subject[pend + 1:].lstrip()
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
233 ui.debug('Subject: %s\n' % subject)
233 ui.debug('Subject: %s\n' % subject)
234 if data['user']:
234 if data['user']:
235 ui.debug('From: %s\n' % data['user'])
235 ui.debug('From: %s\n' % data['user'])
236 diffs_seen = 0
236 diffs_seen = 0
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 message = ''
238 message = ''
239 for part in msg.walk():
239 for part in msg.walk():
240 content_type = part.get_content_type()
240 content_type = part.get_content_type()
241 ui.debug('Content-Type: %s\n' % content_type)
241 ui.debug('Content-Type: %s\n' % content_type)
242 if content_type not in ok_types:
242 if content_type not in ok_types:
243 continue
243 continue
244 payload = part.get_payload(decode=True)
244 payload = part.get_payload(decode=True)
245 m = diffre.search(payload)
245 m = diffre.search(payload)
246 if m:
246 if m:
247 hgpatch = False
247 hgpatch = False
248 hgpatchheader = False
248 hgpatchheader = False
249 ignoretext = False
249 ignoretext = False
250
250
251 ui.debug('found patch at byte %d\n' % m.start(0))
251 ui.debug('found patch at byte %d\n' % m.start(0))
252 diffs_seen += 1
252 diffs_seen += 1
253 cfp = stringio()
253 cfp = stringio()
254 for line in payload[:m.start(0)].splitlines():
254 for line in payload[:m.start(0)].splitlines():
255 if line.startswith('# HG changeset patch') and not hgpatch:
255 if line.startswith('# HG changeset patch') and not hgpatch:
256 ui.debug('patch generated by hg export\n')
256 ui.debug('patch generated by hg export\n')
257 hgpatch = True
257 hgpatch = True
258 hgpatchheader = True
258 hgpatchheader = True
259 # drop earlier commit message content
259 # drop earlier commit message content
260 cfp.seek(0)
260 cfp.seek(0)
261 cfp.truncate()
261 cfp.truncate()
262 subject = None
262 subject = None
263 elif hgpatchheader:
263 elif hgpatchheader:
264 if line.startswith('# User '):
264 if line.startswith('# User '):
265 data['user'] = line[7:]
265 data['user'] = line[7:]
266 ui.debug('From: %s\n' % data['user'])
266 ui.debug('From: %s\n' % data['user'])
267 elif line.startswith("# Parent "):
267 elif line.startswith("# Parent "):
268 parents.append(line[9:].lstrip())
268 parents.append(line[9:].lstrip())
269 elif line.startswith("# "):
269 elif line.startswith("# "):
270 for header, key in patchheadermap:
270 for header, key in patchheadermap:
271 prefix = '# %s ' % header
271 prefix = '# %s ' % header
272 if line.startswith(prefix):
272 if line.startswith(prefix):
273 data[key] = line[len(prefix):]
273 data[key] = line[len(prefix):]
274 else:
274 else:
275 hgpatchheader = False
275 hgpatchheader = False
276 elif line == '---':
276 elif line == '---':
277 ignoretext = True
277 ignoretext = True
278 if not hgpatchheader and not ignoretext:
278 if not hgpatchheader and not ignoretext:
279 cfp.write(line)
279 cfp.write(line)
280 cfp.write('\n')
280 cfp.write('\n')
281 message = cfp.getvalue()
281 message = cfp.getvalue()
282 if tmpfp:
282 if tmpfp:
283 tmpfp.write(payload)
283 tmpfp.write(payload)
284 if not payload.endswith('\n'):
284 if not payload.endswith('\n'):
285 tmpfp.write('\n')
285 tmpfp.write('\n')
286 elif not diffs_seen and message and content_type == 'text/plain':
286 elif not diffs_seen and message and content_type == 'text/plain':
287 message += '\n' + payload
287 message += '\n' + payload
288 except: # re-raises
288 except: # re-raises
289 tmpfp.close()
289 tmpfp.close()
290 os.unlink(tmpname)
290 os.unlink(tmpname)
291 raise
291 raise
292
292
293 if subject and not message.startswith(subject):
293 if subject and not message.startswith(subject):
294 message = '%s\n%s' % (subject, message)
294 message = '%s\n%s' % (subject, message)
295 data['message'] = message
295 data['message'] = message
296 tmpfp.close()
296 tmpfp.close()
297 if parents:
297 if parents:
298 data['p1'] = parents.pop(0)
298 data['p1'] = parents.pop(0)
299 if parents:
299 if parents:
300 data['p2'] = parents.pop(0)
300 data['p2'] = parents.pop(0)
301
301
302 if diffs_seen:
302 if diffs_seen:
303 data['filename'] = tmpname
303 data['filename'] = tmpname
304 else:
304 else:
305 os.unlink(tmpname)
305 os.unlink(tmpname)
306 return data
306 return data
307
307
308 class patchmeta(object):
308 class patchmeta(object):
309 """Patched file metadata
309 """Patched file metadata
310
310
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 'islink' is True if the file is a symlink and 'isexec' is True if
315 'islink' is True if the file is a symlink and 'isexec' is True if
316 the file is executable. Otherwise, 'mode' is None.
316 the file is executable. Otherwise, 'mode' is None.
317 """
317 """
318 def __init__(self, path):
318 def __init__(self, path):
319 self.path = path
319 self.path = path
320 self.oldpath = None
320 self.oldpath = None
321 self.mode = None
321 self.mode = None
322 self.op = 'MODIFY'
322 self.op = 'MODIFY'
323 self.binary = False
323 self.binary = False
324
324
325 def setmode(self, mode):
325 def setmode(self, mode):
326 islink = mode & 0o20000
326 islink = mode & 0o20000
327 isexec = mode & 0o100
327 isexec = mode & 0o100
328 self.mode = (islink, isexec)
328 self.mode = (islink, isexec)
329
329
330 def copy(self):
330 def copy(self):
331 other = patchmeta(self.path)
331 other = patchmeta(self.path)
332 other.oldpath = self.oldpath
332 other.oldpath = self.oldpath
333 other.mode = self.mode
333 other.mode = self.mode
334 other.op = self.op
334 other.op = self.op
335 other.binary = self.binary
335 other.binary = self.binary
336 return other
336 return other
337
337
338 def _ispatchinga(self, afile):
338 def _ispatchinga(self, afile):
339 if afile == '/dev/null':
339 if afile == '/dev/null':
340 return self.op == 'ADD'
340 return self.op == 'ADD'
341 return afile == 'a/' + (self.oldpath or self.path)
341 return afile == 'a/' + (self.oldpath or self.path)
342
342
343 def _ispatchingb(self, bfile):
343 def _ispatchingb(self, bfile):
344 if bfile == '/dev/null':
344 if bfile == '/dev/null':
345 return self.op == 'DELETE'
345 return self.op == 'DELETE'
346 return bfile == 'b/' + self.path
346 return bfile == 'b/' + self.path
347
347
348 def ispatching(self, afile, bfile):
348 def ispatching(self, afile, bfile):
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350
350
351 def __repr__(self):
351 def __repr__(self):
352 return "<patchmeta %s %r>" % (self.op, self.path)
352 return "<patchmeta %s %r>" % (self.op, self.path)
353
353
354 def readgitpatch(lr):
354 def readgitpatch(lr):
355 """extract git-style metadata about patches from <patchname>"""
355 """extract git-style metadata about patches from <patchname>"""
356
356
357 # Filter patch for git information
357 # Filter patch for git information
358 gp = None
358 gp = None
359 gitpatches = []
359 gitpatches = []
360 for line in lr:
360 for line in lr:
361 line = line.rstrip(' \r\n')
361 line = line.rstrip(' \r\n')
362 if line.startswith('diff --git a/'):
362 if line.startswith('diff --git a/'):
363 m = gitre.match(line)
363 m = gitre.match(line)
364 if m:
364 if m:
365 if gp:
365 if gp:
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 dst = m.group(2)
367 dst = m.group(2)
368 gp = patchmeta(dst)
368 gp = patchmeta(dst)
369 elif gp:
369 elif gp:
370 if line.startswith('--- '):
370 if line.startswith('--- '):
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 gp = None
372 gp = None
373 continue
373 continue
374 if line.startswith('rename from '):
374 if line.startswith('rename from '):
375 gp.op = 'RENAME'
375 gp.op = 'RENAME'
376 gp.oldpath = line[12:]
376 gp.oldpath = line[12:]
377 elif line.startswith('rename to '):
377 elif line.startswith('rename to '):
378 gp.path = line[10:]
378 gp.path = line[10:]
379 elif line.startswith('copy from '):
379 elif line.startswith('copy from '):
380 gp.op = 'COPY'
380 gp.op = 'COPY'
381 gp.oldpath = line[10:]
381 gp.oldpath = line[10:]
382 elif line.startswith('copy to '):
382 elif line.startswith('copy to '):
383 gp.path = line[8:]
383 gp.path = line[8:]
384 elif line.startswith('deleted file'):
384 elif line.startswith('deleted file'):
385 gp.op = 'DELETE'
385 gp.op = 'DELETE'
386 elif line.startswith('new file mode '):
386 elif line.startswith('new file mode '):
387 gp.op = 'ADD'
387 gp.op = 'ADD'
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('new mode '):
389 elif line.startswith('new mode '):
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('GIT binary patch'):
391 elif line.startswith('GIT binary patch'):
392 gp.binary = True
392 gp.binary = True
393 if gp:
393 if gp:
394 gitpatches.append(gp)
394 gitpatches.append(gp)
395
395
396 return gitpatches
396 return gitpatches
397
397
398 class linereader(object):
398 class linereader(object):
399 # simple class to allow pushing lines back into the input stream
399 # simple class to allow pushing lines back into the input stream
400 def __init__(self, fp):
400 def __init__(self, fp):
401 self.fp = fp
401 self.fp = fp
402 self.buf = []
402 self.buf = []
403
403
404 def push(self, line):
404 def push(self, line):
405 if line is not None:
405 if line is not None:
406 self.buf.append(line)
406 self.buf.append(line)
407
407
408 def readline(self):
408 def readline(self):
409 if self.buf:
409 if self.buf:
410 l = self.buf[0]
410 l = self.buf[0]
411 del self.buf[0]
411 del self.buf[0]
412 return l
412 return l
413 return self.fp.readline()
413 return self.fp.readline()
414
414
415 def __iter__(self):
415 def __iter__(self):
416 return iter(self.readline, '')
416 return iter(self.readline, '')
417
417
418 class abstractbackend(object):
418 class abstractbackend(object):
419 def __init__(self, ui):
419 def __init__(self, ui):
420 self.ui = ui
420 self.ui = ui
421
421
422 def getfile(self, fname):
422 def getfile(self, fname):
423 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
424 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
425 """
425 """
426 raise NotImplementedError
426 raise NotImplementedError
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
430 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
431 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
432 copysource is set to the original file name.
432 copysource is set to the original file name.
433 """
433 """
434 raise NotImplementedError
434 raise NotImplementedError
435
435
436 def unlink(self, fname):
436 def unlink(self, fname):
437 """Unlink target file."""
437 """Unlink target file."""
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
441 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
442 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
443 files.
443 files.
444 """
444 """
445
445
446 def exists(self, fname):
446 def exists(self, fname):
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def close(self):
449 def close(self):
450 raise NotImplementedError
450 raise NotImplementedError
451
451
452 class fsbackend(abstractbackend):
452 class fsbackend(abstractbackend):
453 def __init__(self, ui, basedir):
453 def __init__(self, ui, basedir):
454 super(fsbackend, self).__init__(ui)
454 super(fsbackend, self).__init__(ui)
455 self.opener = vfsmod.vfs(basedir)
455 self.opener = vfsmod.vfs(basedir)
456
456
457 def getfile(self, fname):
457 def getfile(self, fname):
458 if self.opener.islink(fname):
458 if self.opener.islink(fname):
459 return (self.opener.readlink(fname), (True, False))
459 return (self.opener.readlink(fname), (True, False))
460
460
461 isexec = False
461 isexec = False
462 try:
462 try:
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 except OSError as e:
464 except OSError as e:
465 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
466 raise
466 raise
467 try:
467 try:
468 return (self.opener.read(fname), (False, isexec))
468 return (self.opener.read(fname), (False, isexec))
469 except IOError as e:
469 except IOError as e:
470 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
471 raise
471 raise
472 return None, None
472 return None, None
473
473
474 def setfile(self, fname, data, mode, copysource):
474 def setfile(self, fname, data, mode, copysource):
475 islink, isexec = mode
475 islink, isexec = mode
476 if data is None:
476 if data is None:
477 self.opener.setflags(fname, islink, isexec)
477 self.opener.setflags(fname, islink, isexec)
478 return
478 return
479 if islink:
479 if islink:
480 self.opener.symlink(data, fname)
480 self.opener.symlink(data, fname)
481 else:
481 else:
482 self.opener.write(fname, data)
482 self.opener.write(fname, data)
483 if isexec:
483 if isexec:
484 self.opener.setflags(fname, False, True)
484 self.opener.setflags(fname, False, True)
485
485
486 def unlink(self, fname):
486 def unlink(self, fname):
487 self.opener.unlinkpath(fname, ignoremissing=True)
487 self.opener.unlinkpath(fname, ignoremissing=True)
488
488
489 def writerej(self, fname, failed, total, lines):
489 def writerej(self, fname, failed, total, lines):
490 fname = fname + ".rej"
490 fname = fname + ".rej"
491 self.ui.warn(
491 self.ui.warn(
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 (failed, total, fname))
493 (failed, total, fname))
494 fp = self.opener(fname, 'w')
494 fp = self.opener(fname, 'w')
495 fp.writelines(lines)
495 fp.writelines(lines)
496 fp.close()
496 fp.close()
497
497
498 def exists(self, fname):
498 def exists(self, fname):
499 return self.opener.lexists(fname)
499 return self.opener.lexists(fname)
500
500
501 class workingbackend(fsbackend):
501 class workingbackend(fsbackend):
502 def __init__(self, ui, repo, similarity):
502 def __init__(self, ui, repo, similarity):
503 super(workingbackend, self).__init__(ui, repo.root)
503 super(workingbackend, self).__init__(ui, repo.root)
504 self.repo = repo
504 self.repo = repo
505 self.similarity = similarity
505 self.similarity = similarity
506 self.removed = set()
506 self.removed = set()
507 self.changed = set()
507 self.changed = set()
508 self.copied = []
508 self.copied = []
509
509
510 def _checkknown(self, fname):
510 def _checkknown(self, fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513
513
514 def setfile(self, fname, data, mode, copysource):
514 def setfile(self, fname, data, mode, copysource):
515 self._checkknown(fname)
515 self._checkknown(fname)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 if copysource is not None:
517 if copysource is not None:
518 self.copied.append((copysource, fname))
518 self.copied.append((copysource, fname))
519 self.changed.add(fname)
519 self.changed.add(fname)
520
520
521 def unlink(self, fname):
521 def unlink(self, fname):
522 self._checkknown(fname)
522 self._checkknown(fname)
523 super(workingbackend, self).unlink(fname)
523 super(workingbackend, self).unlink(fname)
524 self.removed.add(fname)
524 self.removed.add(fname)
525 self.changed.add(fname)
525 self.changed.add(fname)
526
526
527 def close(self):
527 def close(self):
528 wctx = self.repo[None]
528 wctx = self.repo[None]
529 changed = set(self.changed)
529 changed = set(self.changed)
530 for src, dst in self.copied:
530 for src, dst in self.copied:
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 if self.removed:
532 if self.removed:
533 wctx.forget(sorted(self.removed))
533 wctx.forget(sorted(self.removed))
534 for f in self.removed:
534 for f in self.removed:
535 if f not in self.repo.dirstate:
535 if f not in self.repo.dirstate:
536 # File was deleted and no longer belongs to the
536 # File was deleted and no longer belongs to the
537 # dirstate, it was probably marked added then
537 # dirstate, it was probably marked added then
538 # deleted, and should not be considered by
538 # deleted, and should not be considered by
539 # marktouched().
539 # marktouched().
540 changed.discard(f)
540 changed.discard(f)
541 if changed:
541 if changed:
542 scmutil.marktouched(self.repo, changed, self.similarity)
542 scmutil.marktouched(self.repo, changed, self.similarity)
543 return sorted(self.changed)
543 return sorted(self.changed)
544
544
545 class filestore(object):
545 class filestore(object):
546 def __init__(self, maxsize=None):
546 def __init__(self, maxsize=None):
547 self.opener = None
547 self.opener = None
548 self.files = {}
548 self.files = {}
549 self.created = 0
549 self.created = 0
550 self.maxsize = maxsize
550 self.maxsize = maxsize
551 if self.maxsize is None:
551 if self.maxsize is None:
552 self.maxsize = 4*(2**20)
552 self.maxsize = 4*(2**20)
553 self.size = 0
553 self.size = 0
554 self.data = {}
554 self.data = {}
555
555
556 def setfile(self, fname, data, mode, copied=None):
556 def setfile(self, fname, data, mode, copied=None):
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 self.data[fname] = (data, mode, copied)
558 self.data[fname] = (data, mode, copied)
559 self.size += len(data)
559 self.size += len(data)
560 else:
560 else:
561 if self.opener is None:
561 if self.opener is None:
562 root = tempfile.mkdtemp(prefix='hg-patch-')
562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 self.opener = vfsmod.vfs(root)
563 self.opener = vfsmod.vfs(root)
564 # Avoid filename issues with these simple names
564 # Avoid filename issues with these simple names
565 fn = str(self.created)
565 fn = str(self.created)
566 self.opener.write(fn, data)
566 self.opener.write(fn, data)
567 self.created += 1
567 self.created += 1
568 self.files[fname] = (fn, mode, copied)
568 self.files[fname] = (fn, mode, copied)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 if fname in self.data:
571 if fname in self.data:
572 return self.data[fname]
572 return self.data[fname]
573 if not self.opener or fname not in self.files:
573 if not self.opener or fname not in self.files:
574 return None, None, None
574 return None, None, None
575 fn, mode, copied = self.files[fname]
575 fn, mode, copied = self.files[fname]
576 return self.opener.read(fn), mode, copied
576 return self.opener.read(fn), mode, copied
577
577
578 def close(self):
578 def close(self):
579 if self.opener:
579 if self.opener:
580 shutil.rmtree(self.opener.base)
580 shutil.rmtree(self.opener.base)
581
581
582 class repobackend(abstractbackend):
582 class repobackend(abstractbackend):
583 def __init__(self, ui, repo, ctx, store):
583 def __init__(self, ui, repo, ctx, store):
584 super(repobackend, self).__init__(ui)
584 super(repobackend, self).__init__(ui)
585 self.repo = repo
585 self.repo = repo
586 self.ctx = ctx
586 self.ctx = ctx
587 self.store = store
587 self.store = store
588 self.changed = set()
588 self.changed = set()
589 self.removed = set()
589 self.removed = set()
590 self.copied = {}
590 self.copied = {}
591
591
592 def _checkknown(self, fname):
592 def _checkknown(self, fname):
593 if fname not in self.ctx:
593 if fname not in self.ctx:
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595
595
596 def getfile(self, fname):
596 def getfile(self, fname):
597 try:
597 try:
598 fctx = self.ctx[fname]
598 fctx = self.ctx[fname]
599 except error.LookupError:
599 except error.LookupError:
600 return None, None
600 return None, None
601 flags = fctx.flags()
601 flags = fctx.flags()
602 return fctx.data(), ('l' in flags, 'x' in flags)
602 return fctx.data(), ('l' in flags, 'x' in flags)
603
603
604 def setfile(self, fname, data, mode, copysource):
604 def setfile(self, fname, data, mode, copysource):
605 if copysource:
605 if copysource:
606 self._checkknown(copysource)
606 self._checkknown(copysource)
607 if data is None:
607 if data is None:
608 data = self.ctx[fname].data()
608 data = self.ctx[fname].data()
609 self.store.setfile(fname, data, mode, copysource)
609 self.store.setfile(fname, data, mode, copysource)
610 self.changed.add(fname)
610 self.changed.add(fname)
611 if copysource:
611 if copysource:
612 self.copied[fname] = copysource
612 self.copied[fname] = copysource
613
613
614 def unlink(self, fname):
614 def unlink(self, fname):
615 self._checkknown(fname)
615 self._checkknown(fname)
616 self.removed.add(fname)
616 self.removed.add(fname)
617
617
618 def exists(self, fname):
618 def exists(self, fname):
619 return fname in self.ctx
619 return fname in self.ctx
620
620
621 def close(self):
621 def close(self):
622 return self.changed | self.removed
622 return self.changed | self.removed
623
623
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628
628
629 class patchfile(object):
629 class patchfile(object):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 self.fname = gp.path
631 self.fname = gp.path
632 self.eolmode = eolmode
632 self.eolmode = eolmode
633 self.eol = None
633 self.eol = None
634 self.backend = backend
634 self.backend = backend
635 self.ui = ui
635 self.ui = ui
636 self.lines = []
636 self.lines = []
637 self.exists = False
637 self.exists = False
638 self.missing = True
638 self.missing = True
639 self.mode = gp.mode
639 self.mode = gp.mode
640 self.copysource = gp.oldpath
640 self.copysource = gp.oldpath
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.remove = gp.op == 'DELETE'
642 self.remove = gp.op == 'DELETE'
643 if self.copysource is None:
643 if self.copysource is None:
644 data, mode = backend.getfile(self.fname)
644 data, mode = backend.getfile(self.fname)
645 else:
645 else:
646 data, mode = store.getfile(self.copysource)[:2]
646 data, mode = store.getfile(self.copysource)[:2]
647 if data is not None:
647 if data is not None:
648 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.missing = False
649 self.missing = False
650 if data:
650 if data:
651 self.lines = mdiff.splitnewlines(data)
651 self.lines = mdiff.splitnewlines(data)
652 if self.mode is None:
652 if self.mode is None:
653 self.mode = mode
653 self.mode = mode
654 if self.lines:
654 if self.lines:
655 # Normalize line endings
655 # Normalize line endings
656 if self.lines[0].endswith('\r\n'):
656 if self.lines[0].endswith('\r\n'):
657 self.eol = '\r\n'
657 self.eol = '\r\n'
658 elif self.lines[0].endswith('\n'):
658 elif self.lines[0].endswith('\n'):
659 self.eol = '\n'
659 self.eol = '\n'
660 if eolmode != 'strict':
660 if eolmode != 'strict':
661 nlines = []
661 nlines = []
662 for l in self.lines:
662 for l in self.lines:
663 if l.endswith('\r\n'):
663 if l.endswith('\r\n'):
664 l = l[:-2] + '\n'
664 l = l[:-2] + '\n'
665 nlines.append(l)
665 nlines.append(l)
666 self.lines = nlines
666 self.lines = nlines
667 else:
667 else:
668 if self.create:
668 if self.create:
669 self.missing = False
669 self.missing = False
670 if self.mode is None:
670 if self.mode is None:
671 self.mode = (False, False)
671 self.mode = (False, False)
672 if self.missing:
672 if self.missing:
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
675 "current directory)\n"))
675 "current directory)\n"))
676
676
677 self.hash = {}
677 self.hash = {}
678 self.dirty = 0
678 self.dirty = 0
679 self.offset = 0
679 self.offset = 0
680 self.skew = 0
680 self.skew = 0
681 self.rej = []
681 self.rej = []
682 self.fileprinted = False
682 self.fileprinted = False
683 self.printfile(False)
683 self.printfile(False)
684 self.hunks = 0
684 self.hunks = 0
685
685
686 def writelines(self, fname, lines, mode):
686 def writelines(self, fname, lines, mode):
687 if self.eolmode == 'auto':
687 if self.eolmode == 'auto':
688 eol = self.eol
688 eol = self.eol
689 elif self.eolmode == 'crlf':
689 elif self.eolmode == 'crlf':
690 eol = '\r\n'
690 eol = '\r\n'
691 else:
691 else:
692 eol = '\n'
692 eol = '\n'
693
693
694 if self.eolmode != 'strict' and eol and eol != '\n':
694 if self.eolmode != 'strict' and eol and eol != '\n':
695 rawlines = []
695 rawlines = []
696 for l in lines:
696 for l in lines:
697 if l and l[-1] == '\n':
697 if l and l[-1] == '\n':
698 l = l[:-1] + eol
698 l = l[:-1] + eol
699 rawlines.append(l)
699 rawlines.append(l)
700 lines = rawlines
700 lines = rawlines
701
701
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703
703
704 def printfile(self, warn):
704 def printfile(self, warn):
705 if self.fileprinted:
705 if self.fileprinted:
706 return
706 return
707 if warn or self.ui.verbose:
707 if warn or self.ui.verbose:
708 self.fileprinted = True
708 self.fileprinted = True
709 s = _("patching file %s\n") % self.fname
709 s = _("patching file %s\n") % self.fname
710 if warn:
710 if warn:
711 self.ui.warn(s)
711 self.ui.warn(s)
712 else:
712 else:
713 self.ui.note(s)
713 self.ui.note(s)
714
714
715
715
716 def findlines(self, l, linenum):
716 def findlines(self, l, linenum):
717 # looks through the hash and finds candidate lines. The
717 # looks through the hash and finds candidate lines. The
718 # result is a list of line numbers sorted based on distance
718 # result is a list of line numbers sorted based on distance
719 # from linenum
719 # from linenum
720
720
721 cand = self.hash.get(l, [])
721 cand = self.hash.get(l, [])
722 if len(cand) > 1:
722 if len(cand) > 1:
723 # resort our list of potentials forward then back.
723 # resort our list of potentials forward then back.
724 cand.sort(key=lambda x: abs(x - linenum))
724 cand.sort(key=lambda x: abs(x - linenum))
725 return cand
725 return cand
726
726
727 def write_rej(self):
727 def write_rej(self):
728 # our rejects are a little different from patch(1). This always
728 # our rejects are a little different from patch(1). This always
729 # creates rejects in the same form as the original patch. A file
729 # creates rejects in the same form as the original patch. A file
730 # header is inserted so that you can run the reject through patch again
730 # header is inserted so that you can run the reject through patch again
731 # without having to type the filename.
731 # without having to type the filename.
732 if not self.rej:
732 if not self.rej:
733 return
733 return
734 base = os.path.basename(self.fname)
734 base = os.path.basename(self.fname)
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 for x in self.rej:
736 for x in self.rej:
737 for l in x.hunk:
737 for l in x.hunk:
738 lines.append(l)
738 lines.append(l)
739 if l[-1:] != '\n':
739 if l[-1:] != '\n':
740 lines.append("\n\ No newline at end of file\n")
740 lines.append("\n\ No newline at end of file\n")
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742
742
743 def apply(self, h):
743 def apply(self, h):
744 if not h.complete():
744 if not h.complete():
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 h.lenb))
747 h.lenb))
748
748
749 self.hunks += 1
749 self.hunks += 1
750
750
751 if self.missing:
751 if self.missing:
752 self.rej.append(h)
752 self.rej.append(h)
753 return -1
753 return -1
754
754
755 if self.exists and self.create:
755 if self.exists and self.create:
756 if self.copysource:
756 if self.copysource:
757 self.ui.warn(_("cannot create %s: destination already "
757 self.ui.warn(_("cannot create %s: destination already "
758 "exists\n") % self.fname)
758 "exists\n") % self.fname)
759 else:
759 else:
760 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.rej.append(h)
761 self.rej.append(h)
762 return -1
762 return -1
763
763
764 if isinstance(h, binhunk):
764 if isinstance(h, binhunk):
765 if self.remove:
765 if self.remove:
766 self.backend.unlink(self.fname)
766 self.backend.unlink(self.fname)
767 else:
767 else:
768 l = h.new(self.lines)
768 l = h.new(self.lines)
769 self.lines[:] = l
769 self.lines[:] = l
770 self.offset += len(l)
770 self.offset += len(l)
771 self.dirty = True
771 self.dirty = True
772 return 0
772 return 0
773
773
774 horig = h
774 horig = h
775 if (self.eolmode in ('crlf', 'lf')
775 if (self.eolmode in ('crlf', 'lf')
776 or self.eolmode == 'auto' and self.eol):
776 or self.eolmode == 'auto' and self.eol):
777 # If new eols are going to be normalized, then normalize
777 # If new eols are going to be normalized, then normalize
778 # hunk data before patching. Otherwise, preserve input
778 # hunk data before patching. Otherwise, preserve input
779 # line-endings.
779 # line-endings.
780 h = h.getnormalized()
780 h = h.getnormalized()
781
781
782 # fast case first, no offsets, no fuzz
782 # fast case first, no offsets, no fuzz
783 old, oldstart, new, newstart = h.fuzzit(0, False)
783 old, oldstart, new, newstart = h.fuzzit(0, False)
784 oldstart += self.offset
784 oldstart += self.offset
785 orig_start = oldstart
785 orig_start = oldstart
786 # if there's skew we want to emit the "(offset %d lines)" even
786 # if there's skew we want to emit the "(offset %d lines)" even
787 # when the hunk cleanly applies at start + skew, so skip the
787 # when the hunk cleanly applies at start + skew, so skip the
788 # fast case code
788 # fast case code
789 if (self.skew == 0 and
789 if (self.skew == 0 and
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 if self.remove:
791 if self.remove:
792 self.backend.unlink(self.fname)
792 self.backend.unlink(self.fname)
793 else:
793 else:
794 self.lines[oldstart:oldstart + len(old)] = new
794 self.lines[oldstart:oldstart + len(old)] = new
795 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
796 self.dirty = True
796 self.dirty = True
797 return 0
797 return 0
798
798
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 self.hash = {}
800 self.hash = {}
801 for x, s in enumerate(self.lines):
801 for x, s in enumerate(self.lines):
802 self.hash.setdefault(s, []).append(x)
802 self.hash.setdefault(s, []).append(x)
803
803
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
805 for toponly in [True, False]:
805 for toponly in [True, False]:
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 oldstart = oldstart + self.offset + self.skew
807 oldstart = oldstart + self.offset + self.skew
808 oldstart = min(oldstart, len(self.lines))
808 oldstart = min(oldstart, len(self.lines))
809 if old:
809 if old:
810 cand = self.findlines(old[0][1:], oldstart)
810 cand = self.findlines(old[0][1:], oldstart)
811 else:
811 else:
812 # Only adding lines with no or fuzzed context, just
812 # Only adding lines with no or fuzzed context, just
813 # take the skew in account
813 # take the skew in account
814 cand = [oldstart]
814 cand = [oldstart]
815
815
816 for l in cand:
816 for l in cand:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 self.lines[l : l + len(old)] = new
818 self.lines[l : l + len(old)] = new
819 self.offset += len(new) - len(old)
819 self.offset += len(new) - len(old)
820 self.skew = l - orig_start
820 self.skew = l - orig_start
821 self.dirty = True
821 self.dirty = True
822 offset = l - orig_start - fuzzlen
822 offset = l - orig_start - fuzzlen
823 if fuzzlen:
823 if fuzzlen:
824 msg = _("Hunk #%d succeeded at %d "
824 msg = _("Hunk #%d succeeded at %d "
825 "with fuzz %d "
825 "with fuzz %d "
826 "(offset %d lines).\n")
826 "(offset %d lines).\n")
827 self.printfile(True)
827 self.printfile(True)
828 self.ui.warn(msg %
828 self.ui.warn(msg %
829 (h.number, l + 1, fuzzlen, offset))
829 (h.number, l + 1, fuzzlen, offset))
830 else:
830 else:
831 msg = _("Hunk #%d succeeded at %d "
831 msg = _("Hunk #%d succeeded at %d "
832 "(offset %d lines).\n")
832 "(offset %d lines).\n")
833 self.ui.note(msg % (h.number, l + 1, offset))
833 self.ui.note(msg % (h.number, l + 1, offset))
834 return fuzzlen
834 return fuzzlen
835 self.printfile(True)
835 self.printfile(True)
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.rej.append(horig)
837 self.rej.append(horig)
838 return -1
838 return -1
839
839
840 def close(self):
840 def close(self):
841 if self.dirty:
841 if self.dirty:
842 self.writelines(self.fname, self.lines, self.mode)
842 self.writelines(self.fname, self.lines, self.mode)
843 self.write_rej()
843 self.write_rej()
844 return len(self.rej)
844 return len(self.rej)
845
845
846 class header(object):
846 class header(object):
847 """patch header
847 """patch header
848 """
848 """
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
851 allhunks_re = re.compile('(?:index|deleted file) ')
851 allhunks_re = re.compile('(?:index|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 newfile_re = re.compile('(?:new file)')
854 newfile_re = re.compile('(?:new file)')
855
855
856 def __init__(self, header):
856 def __init__(self, header):
857 self.header = header
857 self.header = header
858 self.hunks = []
858 self.hunks = []
859
859
860 def binary(self):
860 def binary(self):
861 return any(h.startswith('index ') for h in self.header)
861 return any(h.startswith('index ') for h in self.header)
862
862
863 def pretty(self, fp):
863 def pretty(self, fp):
864 for h in self.header:
864 for h in self.header:
865 if h.startswith('index '):
865 if h.startswith('index '):
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 break
867 break
868 if self.pretty_re.match(h):
868 if self.pretty_re.match(h):
869 fp.write(h)
869 fp.write(h)
870 if self.binary():
870 if self.binary():
871 fp.write(_('this is a binary file\n'))
871 fp.write(_('this is a binary file\n'))
872 break
872 break
873 if h.startswith('---'):
873 if h.startswith('---'):
874 fp.write(_('%d hunks, %d lines changed\n') %
874 fp.write(_('%d hunks, %d lines changed\n') %
875 (len(self.hunks),
875 (len(self.hunks),
876 sum([max(h.added, h.removed) for h in self.hunks])))
876 sum([max(h.added, h.removed) for h in self.hunks])))
877 break
877 break
878 fp.write(h)
878 fp.write(h)
879
879
880 def write(self, fp):
880 def write(self, fp):
881 fp.write(''.join(self.header))
881 fp.write(''.join(self.header))
882
882
883 def allhunks(self):
883 def allhunks(self):
884 return any(self.allhunks_re.match(h) for h in self.header)
884 return any(self.allhunks_re.match(h) for h in self.header)
885
885
886 def files(self):
886 def files(self):
887 match = self.diffgit_re.match(self.header[0])
887 match = self.diffgit_re.match(self.header[0])
888 if match:
888 if match:
889 fromfile, tofile = match.groups()
889 fromfile, tofile = match.groups()
890 if fromfile == tofile:
890 if fromfile == tofile:
891 return [fromfile]
891 return [fromfile]
892 return [fromfile, tofile]
892 return [fromfile, tofile]
893 else:
893 else:
894 return self.diff_re.match(self.header[0]).groups()
894 return self.diff_re.match(self.header[0]).groups()
895
895
896 def filename(self):
896 def filename(self):
897 return self.files()[-1]
897 return self.files()[-1]
898
898
899 def __repr__(self):
899 def __repr__(self):
900 return '<header %s>' % (' '.join(map(repr, self.files())))
900 return '<header %s>' % (' '.join(map(repr, self.files())))
901
901
902 def isnewfile(self):
902 def isnewfile(self):
903 return any(self.newfile_re.match(h) for h in self.header)
903 return any(self.newfile_re.match(h) for h in self.header)
904
904
905 def special(self):
905 def special(self):
906 # Special files are shown only at the header level and not at the hunk
906 # Special files are shown only at the header level and not at the hunk
907 # level for example a file that has been deleted is a special file.
907 # level for example a file that has been deleted is a special file.
908 # The user cannot change the content of the operation, in the case of
908 # The user cannot change the content of the operation, in the case of
909 # the deleted file he has to take the deletion or not take it, he
909 # the deleted file he has to take the deletion or not take it, he
910 # cannot take some of it.
910 # cannot take some of it.
911 # Newly added files are special if they are empty, they are not special
911 # Newly added files are special if they are empty, they are not special
912 # if they have some content as we want to be able to change it
912 # if they have some content as we want to be able to change it
913 nocontent = len(self.header) == 2
913 nocontent = len(self.header) == 2
914 emptynewfile = self.isnewfile() and nocontent
914 emptynewfile = self.isnewfile() and nocontent
915 return emptynewfile or \
915 return emptynewfile or \
916 any(self.special_re.match(h) for h in self.header)
916 any(self.special_re.match(h) for h in self.header)
917
917
918 class recordhunk(object):
918 class recordhunk(object):
919 """patch hunk
919 """patch hunk
920
920
921 XXX shouldn't we merge this with the other hunk class?
921 XXX shouldn't we merge this with the other hunk class?
922 """
922 """
923
923
924 def __init__(self, header, fromline, toline, proc, before, hunk, after,
924 def __init__(self, header, fromline, toline, proc, before, hunk, after,
925 maxcontext=None):
925 maxcontext=None):
926 def trimcontext(lines, reverse=False):
926 def trimcontext(lines, reverse=False):
927 if maxcontext is not None:
927 if maxcontext is not None:
928 delta = len(lines) - maxcontext
928 delta = len(lines) - maxcontext
929 if delta > 0:
929 if delta > 0:
930 if reverse:
930 if reverse:
931 return delta, lines[delta:]
931 return delta, lines[delta:]
932 else:
932 else:
933 return delta, lines[:maxcontext]
933 return delta, lines[:maxcontext]
934 return 0, lines
934 return 0, lines
935
935
936 self.header = header
936 self.header = header
937 trimedbefore, self.before = trimcontext(before, True)
937 trimedbefore, self.before = trimcontext(before, True)
938 self.fromline = fromline + trimedbefore
938 self.fromline = fromline + trimedbefore
939 self.toline = toline + trimedbefore
939 self.toline = toline + trimedbefore
940 _trimedafter, self.after = trimcontext(after, False)
940 _trimedafter, self.after = trimcontext(after, False)
941 self.proc = proc
941 self.proc = proc
942 self.hunk = hunk
942 self.hunk = hunk
943 self.added, self.removed = self.countchanges(self.hunk)
943 self.added, self.removed = self.countchanges(self.hunk)
944
944
945 def __eq__(self, v):
945 def __eq__(self, v):
946 if not isinstance(v, recordhunk):
946 if not isinstance(v, recordhunk):
947 return False
947 return False
948
948
949 return ((v.hunk == self.hunk) and
949 return ((v.hunk == self.hunk) and
950 (v.proc == self.proc) and
950 (v.proc == self.proc) and
951 (self.fromline == v.fromline) and
951 (self.fromline == v.fromline) and
952 (self.header.files() == v.header.files()))
952 (self.header.files() == v.header.files()))
953
953
954 def __hash__(self):
954 def __hash__(self):
955 return hash((tuple(self.hunk),
955 return hash((tuple(self.hunk),
956 tuple(self.header.files()),
956 tuple(self.header.files()),
957 self.fromline,
957 self.fromline,
958 self.proc))
958 self.proc))
959
959
960 def countchanges(self, hunk):
960 def countchanges(self, hunk):
961 """hunk -> (n+,n-)"""
961 """hunk -> (n+,n-)"""
962 add = len([h for h in hunk if h.startswith('+')])
962 add = len([h for h in hunk if h.startswith('+')])
963 rem = len([h for h in hunk if h.startswith('-')])
963 rem = len([h for h in hunk if h.startswith('-')])
964 return add, rem
964 return add, rem
965
965
966 def reversehunk(self):
966 def reversehunk(self):
967 """return another recordhunk which is the reverse of the hunk
967 """return another recordhunk which is the reverse of the hunk
968
968
969 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
969 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
970 that, swap fromline/toline and +/- signs while keep other things
970 that, swap fromline/toline and +/- signs while keep other things
971 unchanged.
971 unchanged.
972 """
972 """
973 m = {'+': '-', '-': '+', '\\': '\\'}
973 m = {'+': '-', '-': '+', '\\': '\\'}
974 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
974 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
975 return recordhunk(self.header, self.toline, self.fromline, self.proc,
975 return recordhunk(self.header, self.toline, self.fromline, self.proc,
976 self.before, hunk, self.after)
976 self.before, hunk, self.after)
977
977
978 def write(self, fp):
978 def write(self, fp):
979 delta = len(self.before) + len(self.after)
979 delta = len(self.before) + len(self.after)
980 if self.after and self.after[-1] == '\\ No newline at end of file\n':
980 if self.after and self.after[-1] == '\\ No newline at end of file\n':
981 delta -= 1
981 delta -= 1
982 fromlen = delta + self.removed
982 fromlen = delta + self.removed
983 tolen = delta + self.added
983 tolen = delta + self.added
984 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
984 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
985 (self.fromline, fromlen, self.toline, tolen,
985 (self.fromline, fromlen, self.toline, tolen,
986 self.proc and (' ' + self.proc)))
986 self.proc and (' ' + self.proc)))
987 fp.write(''.join(self.before + self.hunk + self.after))
987 fp.write(''.join(self.before + self.hunk + self.after))
988
988
989 pretty = write
989 pretty = write
990
990
991 def filename(self):
991 def filename(self):
992 return self.header.filename()
992 return self.header.filename()
993
993
994 def __repr__(self):
994 def __repr__(self):
995 return '<hunk %r@%d>' % (self.filename(), self.fromline)
995 return '<hunk %r@%d>' % (self.filename(), self.fromline)
996
996
997 def getmessages():
997 def getmessages():
998 return {
998 return {
999 'multiple': {
999 'multiple': {
1000 'discard': _("discard change %d/%d to '%s'?"),
1000 'discard': _("discard change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1003 },
1003 },
1004 'single': {
1004 'single': {
1005 'discard': _("discard this change to '%s'?"),
1005 'discard': _("discard this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1008 },
1008 },
1009 'help': {
1009 'help': {
1010 'discard': _('[Ynesfdaq?]'
1010 'discard': _('[Ynesfdaq?]'
1011 '$$ &Yes, discard this change'
1011 '$$ &Yes, discard this change'
1012 '$$ &No, skip this change'
1012 '$$ &No, skip this change'
1013 '$$ &Edit this change manually'
1013 '$$ &Edit this change manually'
1014 '$$ &Skip remaining changes to this file'
1014 '$$ &Skip remaining changes to this file'
1015 '$$ Discard remaining changes to this &file'
1015 '$$ Discard remaining changes to this &file'
1016 '$$ &Done, skip remaining changes and files'
1016 '$$ &Done, skip remaining changes and files'
1017 '$$ Discard &all changes to all remaining files'
1017 '$$ Discard &all changes to all remaining files'
1018 '$$ &Quit, discarding no changes'
1018 '$$ &Quit, discarding no changes'
1019 '$$ &? (display help)'),
1019 '$$ &? (display help)'),
1020 'record': _('[Ynesfdaq?]'
1020 'record': _('[Ynesfdaq?]'
1021 '$$ &Yes, record this change'
1021 '$$ &Yes, record this change'
1022 '$$ &No, skip this change'
1022 '$$ &No, skip this change'
1023 '$$ &Edit this change manually'
1023 '$$ &Edit this change manually'
1024 '$$ &Skip remaining changes to this file'
1024 '$$ &Skip remaining changes to this file'
1025 '$$ Record remaining changes to this &file'
1025 '$$ Record remaining changes to this &file'
1026 '$$ &Done, skip remaining changes and files'
1026 '$$ &Done, skip remaining changes and files'
1027 '$$ Record &all changes to all remaining files'
1027 '$$ Record &all changes to all remaining files'
1028 '$$ &Quit, recording no changes'
1028 '$$ &Quit, recording no changes'
1029 '$$ &? (display help)'),
1029 '$$ &? (display help)'),
1030 'revert': _('[Ynesfdaq?]'
1030 'revert': _('[Ynesfdaq?]'
1031 '$$ &Yes, revert this change'
1031 '$$ &Yes, revert this change'
1032 '$$ &No, skip this change'
1032 '$$ &No, skip this change'
1033 '$$ &Edit this change manually'
1033 '$$ &Edit this change manually'
1034 '$$ &Skip remaining changes to this file'
1034 '$$ &Skip remaining changes to this file'
1035 '$$ Revert remaining changes to this &file'
1035 '$$ Revert remaining changes to this &file'
1036 '$$ &Done, skip remaining changes and files'
1036 '$$ &Done, skip remaining changes and files'
1037 '$$ Revert &all changes to all remaining files'
1037 '$$ Revert &all changes to all remaining files'
1038 '$$ &Quit, reverting no changes'
1038 '$$ &Quit, reverting no changes'
1039 '$$ &? (display help)')
1039 '$$ &? (display help)')
1040 }
1040 }
1041 }
1041 }
1042
1042
1043 def filterpatch(ui, headers, operation=None):
1043 def filterpatch(ui, headers, operation=None):
1044 """Interactively filter patch chunks into applied-only chunks"""
1044 """Interactively filter patch chunks into applied-only chunks"""
1045 messages = getmessages()
1045 messages = getmessages()
1046
1046
1047 if operation is None:
1047 if operation is None:
1048 operation = 'record'
1048 operation = 'record'
1049
1049
1050 def prompt(skipfile, skipall, query, chunk):
1050 def prompt(skipfile, skipall, query, chunk):
1051 """prompt query, and process base inputs
1051 """prompt query, and process base inputs
1052
1052
1053 - y/n for the rest of file
1053 - y/n for the rest of file
1054 - y/n for the rest
1054 - y/n for the rest
1055 - ? (help)
1055 - ? (help)
1056 - q (quit)
1056 - q (quit)
1057
1057
1058 Return True/False and possibly updated skipfile and skipall.
1058 Return True/False and possibly updated skipfile and skipall.
1059 """
1059 """
1060 newpatches = None
1060 newpatches = None
1061 if skipall is not None:
1061 if skipall is not None:
1062 return skipall, skipfile, skipall, newpatches
1062 return skipall, skipfile, skipall, newpatches
1063 if skipfile is not None:
1063 if skipfile is not None:
1064 return skipfile, skipfile, skipall, newpatches
1064 return skipfile, skipfile, skipall, newpatches
1065 while True:
1065 while True:
1066 resps = messages['help'][operation]
1066 resps = messages['help'][operation]
1067 r = ui.promptchoice("%s %s" % (query, resps))
1067 r = ui.promptchoice("%s %s" % (query, resps))
1068 ui.write("\n")
1068 ui.write("\n")
1069 if r == 8: # ?
1069 if r == 8: # ?
1070 for c, t in ui.extractchoices(resps)[1]:
1070 for c, t in ui.extractchoices(resps)[1]:
1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1072 continue
1072 continue
1073 elif r == 0: # yes
1073 elif r == 0: # yes
1074 ret = True
1074 ret = True
1075 elif r == 1: # no
1075 elif r == 1: # no
1076 ret = False
1076 ret = False
1077 elif r == 2: # Edit patch
1077 elif r == 2: # Edit patch
1078 if chunk is None:
1078 if chunk is None:
1079 ui.write(_('cannot edit patch for whole file'))
1079 ui.write(_('cannot edit patch for whole file'))
1080 ui.write("\n")
1080 ui.write("\n")
1081 continue
1081 continue
1082 if chunk.header.binary():
1082 if chunk.header.binary():
1083 ui.write(_('cannot edit patch for binary file'))
1083 ui.write(_('cannot edit patch for binary file'))
1084 ui.write("\n")
1084 ui.write("\n")
1085 continue
1085 continue
1086 # Patch comment based on the Git one (based on comment at end of
1086 # Patch comment based on the Git one (based on comment at end of
1087 # https://mercurial-scm.org/wiki/RecordExtension)
1087 # https://mercurial-scm.org/wiki/RecordExtension)
1088 phelp = '---' + _("""
1088 phelp = '---' + _("""
1089 To remove '-' lines, make them ' ' lines (context).
1089 To remove '-' lines, make them ' ' lines (context).
1090 To remove '+' lines, delete them.
1090 To remove '+' lines, delete them.
1091 Lines starting with # will be removed from the patch.
1091 Lines starting with # will be removed from the patch.
1092
1092
1093 If the patch applies cleanly, the edited hunk will immediately be
1093 If the patch applies cleanly, the edited hunk will immediately be
1094 added to the record list. If it does not apply cleanly, a rejects
1094 added to the record list. If it does not apply cleanly, a rejects
1095 file will be generated: you can use that when you try again. If
1095 file will be generated: you can use that when you try again. If
1096 all lines of the hunk are removed, then the edit is aborted and
1096 all lines of the hunk are removed, then the edit is aborted and
1097 the hunk is left unchanged.
1097 the hunk is left unchanged.
1098 """)
1098 """)
1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1100 suffix=".diff", text=True)
1100 suffix=".diff", text=True)
1101 ncpatchfp = None
1101 ncpatchfp = None
1102 try:
1102 try:
1103 # Write the initial patch
1103 # Write the initial patch
1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1105 chunk.header.write(f)
1105 chunk.header.write(f)
1106 chunk.write(f)
1106 chunk.write(f)
1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1108 f.close()
1108 f.close()
1109 # Start the editor and wait for it to complete
1109 # Start the editor and wait for it to complete
1110 editor = ui.geteditor()
1110 editor = ui.geteditor()
1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1112 environ={'HGUSER': ui.username()},
1112 environ={'HGUSER': ui.username()},
1113 blockedtag='filterpatch')
1113 blockedtag='filterpatch')
1114 if ret != 0:
1114 if ret != 0:
1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1116 continue
1116 continue
1117 # Remove comment lines
1117 # Remove comment lines
1118 patchfp = open(patchfn)
1118 patchfp = open(patchfn)
1119 ncpatchfp = stringio()
1119 ncpatchfp = stringio()
1120 for line in util.iterfile(patchfp):
1120 for line in util.iterfile(patchfp):
1121 if not line.startswith('#'):
1121 if not line.startswith('#'):
1122 ncpatchfp.write(line)
1122 ncpatchfp.write(line)
1123 patchfp.close()
1123 patchfp.close()
1124 ncpatchfp.seek(0)
1124 ncpatchfp.seek(0)
1125 newpatches = parsepatch(ncpatchfp)
1125 newpatches = parsepatch(ncpatchfp)
1126 finally:
1126 finally:
1127 os.unlink(patchfn)
1127 os.unlink(patchfn)
1128 del ncpatchfp
1128 del ncpatchfp
1129 # Signal that the chunk shouldn't be applied as-is, but
1129 # Signal that the chunk shouldn't be applied as-is, but
1130 # provide the new patch to be used instead.
1130 # provide the new patch to be used instead.
1131 ret = False
1131 ret = False
1132 elif r == 3: # Skip
1132 elif r == 3: # Skip
1133 ret = skipfile = False
1133 ret = skipfile = False
1134 elif r == 4: # file (Record remaining)
1134 elif r == 4: # file (Record remaining)
1135 ret = skipfile = True
1135 ret = skipfile = True
1136 elif r == 5: # done, skip remaining
1136 elif r == 5: # done, skip remaining
1137 ret = skipall = False
1137 ret = skipall = False
1138 elif r == 6: # all
1138 elif r == 6: # all
1139 ret = skipall = True
1139 ret = skipall = True
1140 elif r == 7: # quit
1140 elif r == 7: # quit
1141 raise error.Abort(_('user quit'))
1141 raise error.Abort(_('user quit'))
1142 return ret, skipfile, skipall, newpatches
1142 return ret, skipfile, skipall, newpatches
1143
1143
1144 seen = set()
1144 seen = set()
1145 applied = {} # 'filename' -> [] of chunks
1145 applied = {} # 'filename' -> [] of chunks
1146 skipfile, skipall = None, None
1146 skipfile, skipall = None, None
1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1148 for h in headers:
1148 for h in headers:
1149 pos += len(h.hunks)
1149 pos += len(h.hunks)
1150 skipfile = None
1150 skipfile = None
1151 fixoffset = 0
1151 fixoffset = 0
1152 hdr = ''.join(h.header)
1152 hdr = ''.join(h.header)
1153 if hdr in seen:
1153 if hdr in seen:
1154 continue
1154 continue
1155 seen.add(hdr)
1155 seen.add(hdr)
1156 if skipall is None:
1156 if skipall is None:
1157 h.pretty(ui)
1157 h.pretty(ui)
1158 msg = (_('examine changes to %s?') %
1158 msg = (_('examine changes to %s?') %
1159 _(' and ').join("'%s'" % f for f in h.files()))
1159 _(' and ').join("'%s'" % f for f in h.files()))
1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1161 if not r:
1161 if not r:
1162 continue
1162 continue
1163 applied[h.filename()] = [h]
1163 applied[h.filename()] = [h]
1164 if h.allhunks():
1164 if h.allhunks():
1165 applied[h.filename()] += h.hunks
1165 applied[h.filename()] += h.hunks
1166 continue
1166 continue
1167 for i, chunk in enumerate(h.hunks):
1167 for i, chunk in enumerate(h.hunks):
1168 if skipfile is None and skipall is None:
1168 if skipfile is None and skipall is None:
1169 chunk.pretty(ui)
1169 chunk.pretty(ui)
1170 if total == 1:
1170 if total == 1:
1171 msg = messages['single'][operation] % chunk.filename()
1171 msg = messages['single'][operation] % chunk.filename()
1172 else:
1172 else:
1173 idx = pos - len(h.hunks) + i
1173 idx = pos - len(h.hunks) + i
1174 msg = messages['multiple'][operation] % (idx, total,
1174 msg = messages['multiple'][operation] % (idx, total,
1175 chunk.filename())
1175 chunk.filename())
1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1177 skipall, msg, chunk)
1177 skipall, msg, chunk)
1178 if r:
1178 if r:
1179 if fixoffset:
1179 if fixoffset:
1180 chunk = copy.copy(chunk)
1180 chunk = copy.copy(chunk)
1181 chunk.toline += fixoffset
1181 chunk.toline += fixoffset
1182 applied[chunk.filename()].append(chunk)
1182 applied[chunk.filename()].append(chunk)
1183 elif newpatches is not None:
1183 elif newpatches is not None:
1184 for newpatch in newpatches:
1184 for newpatch in newpatches:
1185 for newhunk in newpatch.hunks:
1185 for newhunk in newpatch.hunks:
1186 if fixoffset:
1186 if fixoffset:
1187 newhunk.toline += fixoffset
1187 newhunk.toline += fixoffset
1188 applied[newhunk.filename()].append(newhunk)
1188 applied[newhunk.filename()].append(newhunk)
1189 else:
1189 else:
1190 fixoffset += chunk.removed - chunk.added
1190 fixoffset += chunk.removed - chunk.added
1191 return (sum([h for h in applied.itervalues()
1191 return (sum([h for h in applied.itervalues()
1192 if h[0].special() or len(h) > 1], []), {})
1192 if h[0].special() or len(h) > 1], []), {})
1193 class hunk(object):
1193 class hunk(object):
1194 def __init__(self, desc, num, lr, context):
1194 def __init__(self, desc, num, lr, context):
1195 self.number = num
1195 self.number = num
1196 self.desc = desc
1196 self.desc = desc
1197 self.hunk = [desc]
1197 self.hunk = [desc]
1198 self.a = []
1198 self.a = []
1199 self.b = []
1199 self.b = []
1200 self.starta = self.lena = None
1200 self.starta = self.lena = None
1201 self.startb = self.lenb = None
1201 self.startb = self.lenb = None
1202 if lr is not None:
1202 if lr is not None:
1203 if context:
1203 if context:
1204 self.read_context_hunk(lr)
1204 self.read_context_hunk(lr)
1205 else:
1205 else:
1206 self.read_unified_hunk(lr)
1206 self.read_unified_hunk(lr)
1207
1207
1208 def getnormalized(self):
1208 def getnormalized(self):
1209 """Return a copy with line endings normalized to LF."""
1209 """Return a copy with line endings normalized to LF."""
1210
1210
1211 def normalize(lines):
1211 def normalize(lines):
1212 nlines = []
1212 nlines = []
1213 for line in lines:
1213 for line in lines:
1214 if line.endswith('\r\n'):
1214 if line.endswith('\r\n'):
1215 line = line[:-2] + '\n'
1215 line = line[:-2] + '\n'
1216 nlines.append(line)
1216 nlines.append(line)
1217 return nlines
1217 return nlines
1218
1218
1219 # Dummy object, it is rebuilt manually
1219 # Dummy object, it is rebuilt manually
1220 nh = hunk(self.desc, self.number, None, None)
1220 nh = hunk(self.desc, self.number, None, None)
1221 nh.number = self.number
1221 nh.number = self.number
1222 nh.desc = self.desc
1222 nh.desc = self.desc
1223 nh.hunk = self.hunk
1223 nh.hunk = self.hunk
1224 nh.a = normalize(self.a)
1224 nh.a = normalize(self.a)
1225 nh.b = normalize(self.b)
1225 nh.b = normalize(self.b)
1226 nh.starta = self.starta
1226 nh.starta = self.starta
1227 nh.startb = self.startb
1227 nh.startb = self.startb
1228 nh.lena = self.lena
1228 nh.lena = self.lena
1229 nh.lenb = self.lenb
1229 nh.lenb = self.lenb
1230 return nh
1230 return nh
1231
1231
1232 def read_unified_hunk(self, lr):
1232 def read_unified_hunk(self, lr):
1233 m = unidesc.match(self.desc)
1233 m = unidesc.match(self.desc)
1234 if not m:
1234 if not m:
1235 raise PatchError(_("bad hunk #%d") % self.number)
1235 raise PatchError(_("bad hunk #%d") % self.number)
1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1237 if self.lena is None:
1237 if self.lena is None:
1238 self.lena = 1
1238 self.lena = 1
1239 else:
1239 else:
1240 self.lena = int(self.lena)
1240 self.lena = int(self.lena)
1241 if self.lenb is None:
1241 if self.lenb is None:
1242 self.lenb = 1
1242 self.lenb = 1
1243 else:
1243 else:
1244 self.lenb = int(self.lenb)
1244 self.lenb = int(self.lenb)
1245 self.starta = int(self.starta)
1245 self.starta = int(self.starta)
1246 self.startb = int(self.startb)
1246 self.startb = int(self.startb)
1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1248 self.b)
1248 self.b)
1249 # if we hit eof before finishing out the hunk, the last line will
1249 # if we hit eof before finishing out the hunk, the last line will
1250 # be zero length. Lets try to fix it up.
1250 # be zero length. Lets try to fix it up.
1251 while len(self.hunk[-1]) == 0:
1251 while len(self.hunk[-1]) == 0:
1252 del self.hunk[-1]
1252 del self.hunk[-1]
1253 del self.a[-1]
1253 del self.a[-1]
1254 del self.b[-1]
1254 del self.b[-1]
1255 self.lena -= 1
1255 self.lena -= 1
1256 self.lenb -= 1
1256 self.lenb -= 1
1257 self._fixnewline(lr)
1257 self._fixnewline(lr)
1258
1258
1259 def read_context_hunk(self, lr):
1259 def read_context_hunk(self, lr):
1260 self.desc = lr.readline()
1260 self.desc = lr.readline()
1261 m = contextdesc.match(self.desc)
1261 m = contextdesc.match(self.desc)
1262 if not m:
1262 if not m:
1263 raise PatchError(_("bad hunk #%d") % self.number)
1263 raise PatchError(_("bad hunk #%d") % self.number)
1264 self.starta, aend = m.groups()
1264 self.starta, aend = m.groups()
1265 self.starta = int(self.starta)
1265 self.starta = int(self.starta)
1266 if aend is None:
1266 if aend is None:
1267 aend = self.starta
1267 aend = self.starta
1268 self.lena = int(aend) - self.starta
1268 self.lena = int(aend) - self.starta
1269 if self.starta:
1269 if self.starta:
1270 self.lena += 1
1270 self.lena += 1
1271 for x in xrange(self.lena):
1271 for x in xrange(self.lena):
1272 l = lr.readline()
1272 l = lr.readline()
1273 if l.startswith('---'):
1273 if l.startswith('---'):
1274 # lines addition, old block is empty
1274 # lines addition, old block is empty
1275 lr.push(l)
1275 lr.push(l)
1276 break
1276 break
1277 s = l[2:]
1277 s = l[2:]
1278 if l.startswith('- ') or l.startswith('! '):
1278 if l.startswith('- ') or l.startswith('! '):
1279 u = '-' + s
1279 u = '-' + s
1280 elif l.startswith(' '):
1280 elif l.startswith(' '):
1281 u = ' ' + s
1281 u = ' ' + s
1282 else:
1282 else:
1283 raise PatchError(_("bad hunk #%d old text line %d") %
1283 raise PatchError(_("bad hunk #%d old text line %d") %
1284 (self.number, x))
1284 (self.number, x))
1285 self.a.append(u)
1285 self.a.append(u)
1286 self.hunk.append(u)
1286 self.hunk.append(u)
1287
1287
1288 l = lr.readline()
1288 l = lr.readline()
1289 if l.startswith('\ '):
1289 if l.startswith('\ '):
1290 s = self.a[-1][:-1]
1290 s = self.a[-1][:-1]
1291 self.a[-1] = s
1291 self.a[-1] = s
1292 self.hunk[-1] = s
1292 self.hunk[-1] = s
1293 l = lr.readline()
1293 l = lr.readline()
1294 m = contextdesc.match(l)
1294 m = contextdesc.match(l)
1295 if not m:
1295 if not m:
1296 raise PatchError(_("bad hunk #%d") % self.number)
1296 raise PatchError(_("bad hunk #%d") % self.number)
1297 self.startb, bend = m.groups()
1297 self.startb, bend = m.groups()
1298 self.startb = int(self.startb)
1298 self.startb = int(self.startb)
1299 if bend is None:
1299 if bend is None:
1300 bend = self.startb
1300 bend = self.startb
1301 self.lenb = int(bend) - self.startb
1301 self.lenb = int(bend) - self.startb
1302 if self.startb:
1302 if self.startb:
1303 self.lenb += 1
1303 self.lenb += 1
1304 hunki = 1
1304 hunki = 1
1305 for x in xrange(self.lenb):
1305 for x in xrange(self.lenb):
1306 l = lr.readline()
1306 l = lr.readline()
1307 if l.startswith('\ '):
1307 if l.startswith('\ '):
1308 # XXX: the only way to hit this is with an invalid line range.
1308 # XXX: the only way to hit this is with an invalid line range.
1309 # The no-eol marker is not counted in the line range, but I
1309 # The no-eol marker is not counted in the line range, but I
1310 # guess there are diff(1) out there which behave differently.
1310 # guess there are diff(1) out there which behave differently.
1311 s = self.b[-1][:-1]
1311 s = self.b[-1][:-1]
1312 self.b[-1] = s
1312 self.b[-1] = s
1313 self.hunk[hunki - 1] = s
1313 self.hunk[hunki - 1] = s
1314 continue
1314 continue
1315 if not l:
1315 if not l:
1316 # line deletions, new block is empty and we hit EOF
1316 # line deletions, new block is empty and we hit EOF
1317 lr.push(l)
1317 lr.push(l)
1318 break
1318 break
1319 s = l[2:]
1319 s = l[2:]
1320 if l.startswith('+ ') or l.startswith('! '):
1320 if l.startswith('+ ') or l.startswith('! '):
1321 u = '+' + s
1321 u = '+' + s
1322 elif l.startswith(' '):
1322 elif l.startswith(' '):
1323 u = ' ' + s
1323 u = ' ' + s
1324 elif len(self.b) == 0:
1324 elif len(self.b) == 0:
1325 # line deletions, new block is empty
1325 # line deletions, new block is empty
1326 lr.push(l)
1326 lr.push(l)
1327 break
1327 break
1328 else:
1328 else:
1329 raise PatchError(_("bad hunk #%d old text line %d") %
1329 raise PatchError(_("bad hunk #%d old text line %d") %
1330 (self.number, x))
1330 (self.number, x))
1331 self.b.append(s)
1331 self.b.append(s)
1332 while True:
1332 while True:
1333 if hunki >= len(self.hunk):
1333 if hunki >= len(self.hunk):
1334 h = ""
1334 h = ""
1335 else:
1335 else:
1336 h = self.hunk[hunki]
1336 h = self.hunk[hunki]
1337 hunki += 1
1337 hunki += 1
1338 if h == u:
1338 if h == u:
1339 break
1339 break
1340 elif h.startswith('-'):
1340 elif h.startswith('-'):
1341 continue
1341 continue
1342 else:
1342 else:
1343 self.hunk.insert(hunki - 1, u)
1343 self.hunk.insert(hunki - 1, u)
1344 break
1344 break
1345
1345
1346 if not self.a:
1346 if not self.a:
1347 # this happens when lines were only added to the hunk
1347 # this happens when lines were only added to the hunk
1348 for x in self.hunk:
1348 for x in self.hunk:
1349 if x.startswith('-') or x.startswith(' '):
1349 if x.startswith('-') or x.startswith(' '):
1350 self.a.append(x)
1350 self.a.append(x)
1351 if not self.b:
1351 if not self.b:
1352 # this happens when lines were only deleted from the hunk
1352 # this happens when lines were only deleted from the hunk
1353 for x in self.hunk:
1353 for x in self.hunk:
1354 if x.startswith('+') or x.startswith(' '):
1354 if x.startswith('+') or x.startswith(' '):
1355 self.b.append(x[1:])
1355 self.b.append(x[1:])
1356 # @@ -start,len +start,len @@
1356 # @@ -start,len +start,len @@
1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1358 self.startb, self.lenb)
1358 self.startb, self.lenb)
1359 self.hunk[0] = self.desc
1359 self.hunk[0] = self.desc
1360 self._fixnewline(lr)
1360 self._fixnewline(lr)
1361
1361
1362 def _fixnewline(self, lr):
1362 def _fixnewline(self, lr):
1363 l = lr.readline()
1363 l = lr.readline()
1364 if l.startswith('\ '):
1364 if l.startswith('\ '):
1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1366 else:
1366 else:
1367 lr.push(l)
1367 lr.push(l)
1368
1368
1369 def complete(self):
1369 def complete(self):
1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1371
1371
1372 def _fuzzit(self, old, new, fuzz, toponly):
1372 def _fuzzit(self, old, new, fuzz, toponly):
1373 # this removes context lines from the top and bottom of list 'l'. It
1373 # this removes context lines from the top and bottom of list 'l'. It
1374 # checks the hunk to make sure only context lines are removed, and then
1374 # checks the hunk to make sure only context lines are removed, and then
1375 # returns a new shortened list of lines.
1375 # returns a new shortened list of lines.
1376 fuzz = min(fuzz, len(old))
1376 fuzz = min(fuzz, len(old))
1377 if fuzz:
1377 if fuzz:
1378 top = 0
1378 top = 0
1379 bot = 0
1379 bot = 0
1380 hlen = len(self.hunk)
1380 hlen = len(self.hunk)
1381 for x in xrange(hlen - 1):
1381 for x in xrange(hlen - 1):
1382 # the hunk starts with the @@ line, so use x+1
1382 # the hunk starts with the @@ line, so use x+1
1383 if self.hunk[x + 1][0] == ' ':
1383 if self.hunk[x + 1][0] == ' ':
1384 top += 1
1384 top += 1
1385 else:
1385 else:
1386 break
1386 break
1387 if not toponly:
1387 if not toponly:
1388 for x in xrange(hlen - 1):
1388 for x in xrange(hlen - 1):
1389 if self.hunk[hlen - bot - 1][0] == ' ':
1389 if self.hunk[hlen - bot - 1][0] == ' ':
1390 bot += 1
1390 bot += 1
1391 else:
1391 else:
1392 break
1392 break
1393
1393
1394 bot = min(fuzz, bot)
1394 bot = min(fuzz, bot)
1395 top = min(fuzz, top)
1395 top = min(fuzz, top)
1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1397 return old, new, 0
1397 return old, new, 0
1398
1398
1399 def fuzzit(self, fuzz, toponly):
1399 def fuzzit(self, fuzz, toponly):
1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1401 oldstart = self.starta + top
1401 oldstart = self.starta + top
1402 newstart = self.startb + top
1402 newstart = self.startb + top
1403 # zero length hunk ranges already have their start decremented
1403 # zero length hunk ranges already have their start decremented
1404 if self.lena and oldstart > 0:
1404 if self.lena and oldstart > 0:
1405 oldstart -= 1
1405 oldstart -= 1
1406 if self.lenb and newstart > 0:
1406 if self.lenb and newstart > 0:
1407 newstart -= 1
1407 newstart -= 1
1408 return old, oldstart, new, newstart
1408 return old, oldstart, new, newstart
1409
1409
1410 class binhunk(object):
1410 class binhunk(object):
1411 'A binary patch file.'
1411 'A binary patch file.'
1412 def __init__(self, lr, fname):
1412 def __init__(self, lr, fname):
1413 self.text = None
1413 self.text = None
1414 self.delta = False
1414 self.delta = False
1415 self.hunk = ['GIT binary patch\n']
1415 self.hunk = ['GIT binary patch\n']
1416 self._fname = fname
1416 self._fname = fname
1417 self._read(lr)
1417 self._read(lr)
1418
1418
1419 def complete(self):
1419 def complete(self):
1420 return self.text is not None
1420 return self.text is not None
1421
1421
1422 def new(self, lines):
1422 def new(self, lines):
1423 if self.delta:
1423 if self.delta:
1424 return [applybindelta(self.text, ''.join(lines))]
1424 return [applybindelta(self.text, ''.join(lines))]
1425 return [self.text]
1425 return [self.text]
1426
1426
1427 def _read(self, lr):
1427 def _read(self, lr):
1428 def getline(lr, hunk):
1428 def getline(lr, hunk):
1429 l = lr.readline()
1429 l = lr.readline()
1430 hunk.append(l)
1430 hunk.append(l)
1431 return l.rstrip('\r\n')
1431 return l.rstrip('\r\n')
1432
1432
1433 size = 0
1433 size = 0
1434 while True:
1434 while True:
1435 line = getline(lr, self.hunk)
1435 line = getline(lr, self.hunk)
1436 if not line:
1436 if not line:
1437 raise PatchError(_('could not extract "%s" binary data')
1437 raise PatchError(_('could not extract "%s" binary data')
1438 % self._fname)
1438 % self._fname)
1439 if line.startswith('literal '):
1439 if line.startswith('literal '):
1440 size = int(line[8:].rstrip())
1440 size = int(line[8:].rstrip())
1441 break
1441 break
1442 if line.startswith('delta '):
1442 if line.startswith('delta '):
1443 size = int(line[6:].rstrip())
1443 size = int(line[6:].rstrip())
1444 self.delta = True
1444 self.delta = True
1445 break
1445 break
1446 dec = []
1446 dec = []
1447 line = getline(lr, self.hunk)
1447 line = getline(lr, self.hunk)
1448 while len(line) > 1:
1448 while len(line) > 1:
1449 l = line[0]
1449 l = line[0]
1450 if l <= 'Z' and l >= 'A':
1450 if l <= 'Z' and l >= 'A':
1451 l = ord(l) - ord('A') + 1
1451 l = ord(l) - ord('A') + 1
1452 else:
1452 else:
1453 l = ord(l) - ord('a') + 27
1453 l = ord(l) - ord('a') + 27
1454 try:
1454 try:
1455 dec.append(util.b85decode(line[1:])[:l])
1455 dec.append(util.b85decode(line[1:])[:l])
1456 except ValueError as e:
1456 except ValueError as e:
1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1458 % (self._fname, str(e)))
1458 % (self._fname, str(e)))
1459 line = getline(lr, self.hunk)
1459 line = getline(lr, self.hunk)
1460 text = zlib.decompress(''.join(dec))
1460 text = zlib.decompress(''.join(dec))
1461 if len(text) != size:
1461 if len(text) != size:
1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1463 % (self._fname, len(text), size))
1463 % (self._fname, len(text), size))
1464 self.text = text
1464 self.text = text
1465
1465
1466 def parsefilename(str):
1466 def parsefilename(str):
1467 # --- filename \t|space stuff
1467 # --- filename \t|space stuff
1468 s = str[4:].rstrip('\r\n')
1468 s = str[4:].rstrip('\r\n')
1469 i = s.find('\t')
1469 i = s.find('\t')
1470 if i < 0:
1470 if i < 0:
1471 i = s.find(' ')
1471 i = s.find(' ')
1472 if i < 0:
1472 if i < 0:
1473 return s
1473 return s
1474 return s[:i]
1474 return s[:i]
1475
1475
1476 def reversehunks(hunks):
1476 def reversehunks(hunks):
1477 '''reverse the signs in the hunks given as argument
1477 '''reverse the signs in the hunks given as argument
1478
1478
1479 This function operates on hunks coming out of patch.filterpatch, that is
1479 This function operates on hunks coming out of patch.filterpatch, that is
1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1481
1481
1482 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1482 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1483 ... --- a/folder1/g
1483 ... --- a/folder1/g
1484 ... +++ b/folder1/g
1484 ... +++ b/folder1/g
1485 ... @@ -1,7 +1,7 @@
1485 ... @@ -1,7 +1,7 @@
1486 ... +firstline
1486 ... +firstline
1487 ... c
1487 ... c
1488 ... 1
1488 ... 1
1489 ... 2
1489 ... 2
1490 ... + 3
1490 ... + 3
1491 ... -4
1491 ... -4
1492 ... 5
1492 ... 5
1493 ... d
1493 ... d
1494 ... +lastline"""
1494 ... +lastline"""
1495 >>> hunks = parsepatch([rawpatch])
1495 >>> hunks = parsepatch([rawpatch])
1496 >>> hunkscomingfromfilterpatch = []
1496 >>> hunkscomingfromfilterpatch = []
1497 >>> for h in hunks:
1497 >>> for h in hunks:
1498 ... hunkscomingfromfilterpatch.append(h)
1498 ... hunkscomingfromfilterpatch.append(h)
1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1500
1500
1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1502 >>> from . import util
1502 >>> from . import util
1503 >>> fp = util.stringio()
1503 >>> fp = util.stringio()
1504 >>> for c in reversedhunks:
1504 >>> for c in reversedhunks:
1505 ... c.write(fp)
1505 ... c.write(fp)
1506 >>> fp.seek(0) or None
1506 >>> fp.seek(0) or None
1507 >>> reversedpatch = fp.read()
1507 >>> reversedpatch = fp.read()
1508 >>> print(pycompat.sysstr(reversedpatch))
1508 >>> print(pycompat.sysstr(reversedpatch))
1509 diff --git a/folder1/g b/folder1/g
1509 diff --git a/folder1/g b/folder1/g
1510 --- a/folder1/g
1510 --- a/folder1/g
1511 +++ b/folder1/g
1511 +++ b/folder1/g
1512 @@ -1,4 +1,3 @@
1512 @@ -1,4 +1,3 @@
1513 -firstline
1513 -firstline
1514 c
1514 c
1515 1
1515 1
1516 2
1516 2
1517 @@ -2,6 +1,6 @@
1517 @@ -2,6 +1,6 @@
1518 c
1518 c
1519 1
1519 1
1520 2
1520 2
1521 - 3
1521 - 3
1522 +4
1522 +4
1523 5
1523 5
1524 d
1524 d
1525 @@ -6,3 +5,2 @@
1525 @@ -6,3 +5,2 @@
1526 5
1526 5
1527 d
1527 d
1528 -lastline
1528 -lastline
1529
1529
1530 '''
1530 '''
1531
1531
1532 newhunks = []
1532 newhunks = []
1533 for c in hunks:
1533 for c in hunks:
1534 if util.safehasattr(c, 'reversehunk'):
1534 if util.safehasattr(c, 'reversehunk'):
1535 c = c.reversehunk()
1535 c = c.reversehunk()
1536 newhunks.append(c)
1536 newhunks.append(c)
1537 return newhunks
1537 return newhunks
1538
1538
1539 def parsepatch(originalchunks, maxcontext=None):
1539 def parsepatch(originalchunks, maxcontext=None):
1540 """patch -> [] of headers -> [] of hunks
1540 """patch -> [] of headers -> [] of hunks
1541
1541
1542 If maxcontext is not None, trim context lines if necessary.
1542 If maxcontext is not None, trim context lines if necessary.
1543
1543
1544 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1544 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1545 ... --- a/folder1/g
1545 ... --- a/folder1/g
1546 ... +++ b/folder1/g
1546 ... +++ b/folder1/g
1547 ... @@ -1,8 +1,10 @@
1547 ... @@ -1,8 +1,10 @@
1548 ... 1
1548 ... 1
1549 ... 2
1549 ... 2
1550 ... -3
1550 ... -3
1551 ... 4
1551 ... 4
1552 ... 5
1552 ... 5
1553 ... 6
1553 ... 6
1554 ... +6.1
1554 ... +6.1
1555 ... +6.2
1555 ... +6.2
1556 ... 7
1556 ... 7
1557 ... 8
1557 ... 8
1558 ... +9'''
1558 ... +9'''
1559 >>> out = util.stringio()
1559 >>> out = util.stringio()
1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1561 >>> for header in headers:
1561 >>> for header in headers:
1562 ... header.write(out)
1562 ... header.write(out)
1563 ... for hunk in header.hunks:
1563 ... for hunk in header.hunks:
1564 ... hunk.write(out)
1564 ... hunk.write(out)
1565 >>> print(pycompat.sysstr(out.getvalue()))
1565 >>> print(pycompat.sysstr(out.getvalue()))
1566 diff --git a/folder1/g b/folder1/g
1566 diff --git a/folder1/g b/folder1/g
1567 --- a/folder1/g
1567 --- a/folder1/g
1568 +++ b/folder1/g
1568 +++ b/folder1/g
1569 @@ -2,3 +2,2 @@
1569 @@ -2,3 +2,2 @@
1570 2
1570 2
1571 -3
1571 -3
1572 4
1572 4
1573 @@ -6,2 +5,4 @@
1573 @@ -6,2 +5,4 @@
1574 6
1574 6
1575 +6.1
1575 +6.1
1576 +6.2
1576 +6.2
1577 7
1577 7
1578 @@ -8,1 +9,2 @@
1578 @@ -8,1 +9,2 @@
1579 8
1579 8
1580 +9
1580 +9
1581 """
1581 """
1582 class parser(object):
1582 class parser(object):
1583 """patch parsing state machine"""
1583 """patch parsing state machine"""
1584 def __init__(self):
1584 def __init__(self):
1585 self.fromline = 0
1585 self.fromline = 0
1586 self.toline = 0
1586 self.toline = 0
1587 self.proc = ''
1587 self.proc = ''
1588 self.header = None
1588 self.header = None
1589 self.context = []
1589 self.context = []
1590 self.before = []
1590 self.before = []
1591 self.hunk = []
1591 self.hunk = []
1592 self.headers = []
1592 self.headers = []
1593
1593
1594 def addrange(self, limits):
1594 def addrange(self, limits):
1595 fromstart, fromend, tostart, toend, proc = limits
1595 fromstart, fromend, tostart, toend, proc = limits
1596 self.fromline = int(fromstart)
1596 self.fromline = int(fromstart)
1597 self.toline = int(tostart)
1597 self.toline = int(tostart)
1598 self.proc = proc
1598 self.proc = proc
1599
1599
1600 def addcontext(self, context):
1600 def addcontext(self, context):
1601 if self.hunk:
1601 if self.hunk:
1602 h = recordhunk(self.header, self.fromline, self.toline,
1602 h = recordhunk(self.header, self.fromline, self.toline,
1603 self.proc, self.before, self.hunk, context, maxcontext)
1603 self.proc, self.before, self.hunk, context, maxcontext)
1604 self.header.hunks.append(h)
1604 self.header.hunks.append(h)
1605 self.fromline += len(self.before) + h.removed
1605 self.fromline += len(self.before) + h.removed
1606 self.toline += len(self.before) + h.added
1606 self.toline += len(self.before) + h.added
1607 self.before = []
1607 self.before = []
1608 self.hunk = []
1608 self.hunk = []
1609 self.context = context
1609 self.context = context
1610
1610
1611 def addhunk(self, hunk):
1611 def addhunk(self, hunk):
1612 if self.context:
1612 if self.context:
1613 self.before = self.context
1613 self.before = self.context
1614 self.context = []
1614 self.context = []
1615 self.hunk = hunk
1615 self.hunk = hunk
1616
1616
1617 def newfile(self, hdr):
1617 def newfile(self, hdr):
1618 self.addcontext([])
1618 self.addcontext([])
1619 h = header(hdr)
1619 h = header(hdr)
1620 self.headers.append(h)
1620 self.headers.append(h)
1621 self.header = h
1621 self.header = h
1622
1622
1623 def addother(self, line):
1623 def addother(self, line):
1624 pass # 'other' lines are ignored
1624 pass # 'other' lines are ignored
1625
1625
1626 def finished(self):
1626 def finished(self):
1627 self.addcontext([])
1627 self.addcontext([])
1628 return self.headers
1628 return self.headers
1629
1629
1630 transitions = {
1630 transitions = {
1631 'file': {'context': addcontext,
1631 'file': {'context': addcontext,
1632 'file': newfile,
1632 'file': newfile,
1633 'hunk': addhunk,
1633 'hunk': addhunk,
1634 'range': addrange},
1634 'range': addrange},
1635 'context': {'file': newfile,
1635 'context': {'file': newfile,
1636 'hunk': addhunk,
1636 'hunk': addhunk,
1637 'range': addrange,
1637 'range': addrange,
1638 'other': addother},
1638 'other': addother},
1639 'hunk': {'context': addcontext,
1639 'hunk': {'context': addcontext,
1640 'file': newfile,
1640 'file': newfile,
1641 'range': addrange},
1641 'range': addrange},
1642 'range': {'context': addcontext,
1642 'range': {'context': addcontext,
1643 'hunk': addhunk},
1643 'hunk': addhunk},
1644 'other': {'other': addother},
1644 'other': {'other': addother},
1645 }
1645 }
1646
1646
1647 p = parser()
1647 p = parser()
1648 fp = stringio()
1648 fp = stringio()
1649 fp.write(''.join(originalchunks))
1649 fp.write(''.join(originalchunks))
1650 fp.seek(0)
1650 fp.seek(0)
1651
1651
1652 state = 'context'
1652 state = 'context'
1653 for newstate, data in scanpatch(fp):
1653 for newstate, data in scanpatch(fp):
1654 try:
1654 try:
1655 p.transitions[state][newstate](p, data)
1655 p.transitions[state][newstate](p, data)
1656 except KeyError:
1656 except KeyError:
1657 raise PatchError('unhandled transition: %s -> %s' %
1657 raise PatchError('unhandled transition: %s -> %s' %
1658 (state, newstate))
1658 (state, newstate))
1659 state = newstate
1659 state = newstate
1660 del fp
1660 del fp
1661 return p.finished()
1661 return p.finished()
1662
1662
1663 def pathtransform(path, strip, prefix):
1663 def pathtransform(path, strip, prefix):
1664 '''turn a path from a patch into a path suitable for the repository
1664 '''turn a path from a patch into a path suitable for the repository
1665
1665
1666 prefix, if not empty, is expected to be normalized with a / at the end.
1666 prefix, if not empty, is expected to be normalized with a / at the end.
1667
1667
1668 Returns (stripped components, path in repository).
1668 Returns (stripped components, path in repository).
1669
1669
1670 >>> pathtransform(b'a/b/c', 0, b'')
1670 >>> pathtransform(b'a/b/c', 0, b'')
1671 ('', 'a/b/c')
1671 ('', 'a/b/c')
1672 >>> pathtransform(b' a/b/c ', 0, b'')
1672 >>> pathtransform(b' a/b/c ', 0, b'')
1673 ('', ' a/b/c')
1673 ('', ' a/b/c')
1674 >>> pathtransform(b' a/b/c ', 2, b'')
1674 >>> pathtransform(b' a/b/c ', 2, b'')
1675 ('a/b/', 'c')
1675 ('a/b/', 'c')
1676 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1676 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1677 ('', 'd/e/a/b/c')
1677 ('', 'd/e/a/b/c')
1678 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1678 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1679 ('a//b/', 'd/e/c')
1679 ('a//b/', 'd/e/c')
1680 >>> pathtransform(b'a/b/c', 3, b'')
1680 >>> pathtransform(b'a/b/c', 3, b'')
1681 Traceback (most recent call last):
1681 Traceback (most recent call last):
1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1683 '''
1683 '''
1684 pathlen = len(path)
1684 pathlen = len(path)
1685 i = 0
1685 i = 0
1686 if strip == 0:
1686 if strip == 0:
1687 return '', prefix + path.rstrip()
1687 return '', prefix + path.rstrip()
1688 count = strip
1688 count = strip
1689 while count > 0:
1689 while count > 0:
1690 i = path.find('/', i)
1690 i = path.find('/', i)
1691 if i == -1:
1691 if i == -1:
1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1693 (count, strip, path))
1693 (count, strip, path))
1694 i += 1
1694 i += 1
1695 # consume '//' in the path
1695 # consume '//' in the path
1696 while i < pathlen - 1 and path[i:i + 1] == '/':
1696 while i < pathlen - 1 and path[i:i + 1] == '/':
1697 i += 1
1697 i += 1
1698 count -= 1
1698 count -= 1
1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1700
1700
1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1702 nulla = afile_orig == "/dev/null"
1702 nulla = afile_orig == "/dev/null"
1703 nullb = bfile_orig == "/dev/null"
1703 nullb = bfile_orig == "/dev/null"
1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1707 gooda = not nulla and backend.exists(afile)
1707 gooda = not nulla and backend.exists(afile)
1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1709 if afile == bfile:
1709 if afile == bfile:
1710 goodb = gooda
1710 goodb = gooda
1711 else:
1711 else:
1712 goodb = not nullb and backend.exists(bfile)
1712 goodb = not nullb and backend.exists(bfile)
1713 missing = not goodb and not gooda and not create
1713 missing = not goodb and not gooda and not create
1714
1714
1715 # some diff programs apparently produce patches where the afile is
1715 # some diff programs apparently produce patches where the afile is
1716 # not /dev/null, but afile starts with bfile
1716 # not /dev/null, but afile starts with bfile
1717 abasedir = afile[:afile.rfind('/') + 1]
1717 abasedir = afile[:afile.rfind('/') + 1]
1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1720 and hunk.starta == 0 and hunk.lena == 0):
1720 and hunk.starta == 0 and hunk.lena == 0):
1721 create = True
1721 create = True
1722 missing = False
1722 missing = False
1723
1723
1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1725 # diff is between a file and its backup. In this case, the original
1725 # diff is between a file and its backup. In this case, the original
1726 # file should be patched (see original mpatch code).
1726 # file should be patched (see original mpatch code).
1727 isbackup = (abase == bbase and bfile.startswith(afile))
1727 isbackup = (abase == bbase and bfile.startswith(afile))
1728 fname = None
1728 fname = None
1729 if not missing:
1729 if not missing:
1730 if gooda and goodb:
1730 if gooda and goodb:
1731 if isbackup:
1731 if isbackup:
1732 fname = afile
1732 fname = afile
1733 else:
1733 else:
1734 fname = bfile
1734 fname = bfile
1735 elif gooda:
1735 elif gooda:
1736 fname = afile
1736 fname = afile
1737
1737
1738 if not fname:
1738 if not fname:
1739 if not nullb:
1739 if not nullb:
1740 if isbackup:
1740 if isbackup:
1741 fname = afile
1741 fname = afile
1742 else:
1742 else:
1743 fname = bfile
1743 fname = bfile
1744 elif not nulla:
1744 elif not nulla:
1745 fname = afile
1745 fname = afile
1746 else:
1746 else:
1747 raise PatchError(_("undefined source and destination files"))
1747 raise PatchError(_("undefined source and destination files"))
1748
1748
1749 gp = patchmeta(fname)
1749 gp = patchmeta(fname)
1750 if create:
1750 if create:
1751 gp.op = 'ADD'
1751 gp.op = 'ADD'
1752 elif remove:
1752 elif remove:
1753 gp.op = 'DELETE'
1753 gp.op = 'DELETE'
1754 return gp
1754 return gp
1755
1755
1756 def scanpatch(fp):
1756 def scanpatch(fp):
1757 """like patch.iterhunks, but yield different events
1757 """like patch.iterhunks, but yield different events
1758
1758
1759 - ('file', [header_lines + fromfile + tofile])
1759 - ('file', [header_lines + fromfile + tofile])
1760 - ('context', [context_lines])
1760 - ('context', [context_lines])
1761 - ('hunk', [hunk_lines])
1761 - ('hunk', [hunk_lines])
1762 - ('range', (-start,len, +start,len, proc))
1762 - ('range', (-start,len, +start,len, proc))
1763 """
1763 """
1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1765 lr = linereader(fp)
1765 lr = linereader(fp)
1766
1766
1767 def scanwhile(first, p):
1767 def scanwhile(first, p):
1768 """scan lr while predicate holds"""
1768 """scan lr while predicate holds"""
1769 lines = [first]
1769 lines = [first]
1770 for line in iter(lr.readline, ''):
1770 for line in iter(lr.readline, ''):
1771 if p(line):
1771 if p(line):
1772 lines.append(line)
1772 lines.append(line)
1773 else:
1773 else:
1774 lr.push(line)
1774 lr.push(line)
1775 break
1775 break
1776 return lines
1776 return lines
1777
1777
1778 for line in iter(lr.readline, ''):
1778 for line in iter(lr.readline, ''):
1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1780 def notheader(line):
1780 def notheader(line):
1781 s = line.split(None, 1)
1781 s = line.split(None, 1)
1782 return not s or s[0] not in ('---', 'diff')
1782 return not s or s[0] not in ('---', 'diff')
1783 header = scanwhile(line, notheader)
1783 header = scanwhile(line, notheader)
1784 fromfile = lr.readline()
1784 fromfile = lr.readline()
1785 if fromfile.startswith('---'):
1785 if fromfile.startswith('---'):
1786 tofile = lr.readline()
1786 tofile = lr.readline()
1787 header += [fromfile, tofile]
1787 header += [fromfile, tofile]
1788 else:
1788 else:
1789 lr.push(fromfile)
1789 lr.push(fromfile)
1790 yield 'file', header
1790 yield 'file', header
1791 elif line[0:1] == ' ':
1791 elif line[0:1] == ' ':
1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1793 elif line[0] in '-+':
1793 elif line[0] in '-+':
1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1795 else:
1795 else:
1796 m = lines_re.match(line)
1796 m = lines_re.match(line)
1797 if m:
1797 if m:
1798 yield 'range', m.groups()
1798 yield 'range', m.groups()
1799 else:
1799 else:
1800 yield 'other', line
1800 yield 'other', line
1801
1801
1802 def scangitpatch(lr, firstline):
1802 def scangitpatch(lr, firstline):
1803 """
1803 """
1804 Git patches can emit:
1804 Git patches can emit:
1805 - rename a to b
1805 - rename a to b
1806 - change b
1806 - change b
1807 - copy a to c
1807 - copy a to c
1808 - change c
1808 - change c
1809
1809
1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1811 found for it would have been renamed already. And we cannot copy
1811 found for it would have been renamed already. And we cannot copy
1812 from 'b' instead because 'b' would have been changed already. So
1812 from 'b' instead because 'b' would have been changed already. So
1813 we scan the git patch for copy and rename commands so we can
1813 we scan the git patch for copy and rename commands so we can
1814 perform the copies ahead of time.
1814 perform the copies ahead of time.
1815 """
1815 """
1816 pos = 0
1816 pos = 0
1817 try:
1817 try:
1818 pos = lr.fp.tell()
1818 pos = lr.fp.tell()
1819 fp = lr.fp
1819 fp = lr.fp
1820 except IOError:
1820 except IOError:
1821 fp = stringio(lr.fp.read())
1821 fp = stringio(lr.fp.read())
1822 gitlr = linereader(fp)
1822 gitlr = linereader(fp)
1823 gitlr.push(firstline)
1823 gitlr.push(firstline)
1824 gitpatches = readgitpatch(gitlr)
1824 gitpatches = readgitpatch(gitlr)
1825 fp.seek(pos)
1825 fp.seek(pos)
1826 return gitpatches
1826 return gitpatches
1827
1827
1828 def iterhunks(fp):
1828 def iterhunks(fp):
1829 """Read a patch and yield the following events:
1829 """Read a patch and yield the following events:
1830 - ("file", afile, bfile, firsthunk): select a new target file.
1830 - ("file", afile, bfile, firsthunk): select a new target file.
1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1832 "file" event.
1832 "file" event.
1833 - ("git", gitchanges): current diff is in git format, gitchanges
1833 - ("git", gitchanges): current diff is in git format, gitchanges
1834 maps filenames to gitpatch records. Unique event.
1834 maps filenames to gitpatch records. Unique event.
1835 """
1835 """
1836 afile = ""
1836 afile = ""
1837 bfile = ""
1837 bfile = ""
1838 state = None
1838 state = None
1839 hunknum = 0
1839 hunknum = 0
1840 emitfile = newfile = False
1840 emitfile = newfile = False
1841 gitpatches = None
1841 gitpatches = None
1842
1842
1843 # our states
1843 # our states
1844 BFILE = 1
1844 BFILE = 1
1845 context = None
1845 context = None
1846 lr = linereader(fp)
1846 lr = linereader(fp)
1847
1847
1848 for x in iter(lr.readline, ''):
1848 for x in iter(lr.readline, ''):
1849 if state == BFILE and (
1849 if state == BFILE and (
1850 (not context and x[0] == '@')
1850 (not context and x[0] == '@')
1851 or (context is not False and x.startswith('***************'))
1851 or (context is not False and x.startswith('***************'))
1852 or x.startswith('GIT binary patch')):
1852 or x.startswith('GIT binary patch')):
1853 gp = None
1853 gp = None
1854 if (gitpatches and
1854 if (gitpatches and
1855 gitpatches[-1].ispatching(afile, bfile)):
1855 gitpatches[-1].ispatching(afile, bfile)):
1856 gp = gitpatches.pop()
1856 gp = gitpatches.pop()
1857 if x.startswith('GIT binary patch'):
1857 if x.startswith('GIT binary patch'):
1858 h = binhunk(lr, gp.path)
1858 h = binhunk(lr, gp.path)
1859 else:
1859 else:
1860 if context is None and x.startswith('***************'):
1860 if context is None and x.startswith('***************'):
1861 context = True
1861 context = True
1862 h = hunk(x, hunknum + 1, lr, context)
1862 h = hunk(x, hunknum + 1, lr, context)
1863 hunknum += 1
1863 hunknum += 1
1864 if emitfile:
1864 if emitfile:
1865 emitfile = False
1865 emitfile = False
1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1867 yield 'hunk', h
1867 yield 'hunk', h
1868 elif x.startswith('diff --git a/'):
1868 elif x.startswith('diff --git a/'):
1869 m = gitre.match(x.rstrip(' \r\n'))
1869 m = gitre.match(x.rstrip(' \r\n'))
1870 if not m:
1870 if not m:
1871 continue
1871 continue
1872 if gitpatches is None:
1872 if gitpatches is None:
1873 # scan whole input for git metadata
1873 # scan whole input for git metadata
1874 gitpatches = scangitpatch(lr, x)
1874 gitpatches = scangitpatch(lr, x)
1875 yield 'git', [g.copy() for g in gitpatches
1875 yield 'git', [g.copy() for g in gitpatches
1876 if g.op in ('COPY', 'RENAME')]
1876 if g.op in ('COPY', 'RENAME')]
1877 gitpatches.reverse()
1877 gitpatches.reverse()
1878 afile = 'a/' + m.group(1)
1878 afile = 'a/' + m.group(1)
1879 bfile = 'b/' + m.group(2)
1879 bfile = 'b/' + m.group(2)
1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1881 gp = gitpatches.pop()
1881 gp = gitpatches.pop()
1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1883 if not gitpatches:
1883 if not gitpatches:
1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1885 % afile[2:])
1885 % afile[2:])
1886 gp = gitpatches[-1]
1886 gp = gitpatches[-1]
1887 newfile = True
1887 newfile = True
1888 elif x.startswith('---'):
1888 elif x.startswith('---'):
1889 # check for a unified diff
1889 # check for a unified diff
1890 l2 = lr.readline()
1890 l2 = lr.readline()
1891 if not l2.startswith('+++'):
1891 if not l2.startswith('+++'):
1892 lr.push(l2)
1892 lr.push(l2)
1893 continue
1893 continue
1894 newfile = True
1894 newfile = True
1895 context = False
1895 context = False
1896 afile = parsefilename(x)
1896 afile = parsefilename(x)
1897 bfile = parsefilename(l2)
1897 bfile = parsefilename(l2)
1898 elif x.startswith('***'):
1898 elif x.startswith('***'):
1899 # check for a context diff
1899 # check for a context diff
1900 l2 = lr.readline()
1900 l2 = lr.readline()
1901 if not l2.startswith('---'):
1901 if not l2.startswith('---'):
1902 lr.push(l2)
1902 lr.push(l2)
1903 continue
1903 continue
1904 l3 = lr.readline()
1904 l3 = lr.readline()
1905 lr.push(l3)
1905 lr.push(l3)
1906 if not l3.startswith("***************"):
1906 if not l3.startswith("***************"):
1907 lr.push(l2)
1907 lr.push(l2)
1908 continue
1908 continue
1909 newfile = True
1909 newfile = True
1910 context = True
1910 context = True
1911 afile = parsefilename(x)
1911 afile = parsefilename(x)
1912 bfile = parsefilename(l2)
1912 bfile = parsefilename(l2)
1913
1913
1914 if newfile:
1914 if newfile:
1915 newfile = False
1915 newfile = False
1916 emitfile = True
1916 emitfile = True
1917 state = BFILE
1917 state = BFILE
1918 hunknum = 0
1918 hunknum = 0
1919
1919
1920 while gitpatches:
1920 while gitpatches:
1921 gp = gitpatches.pop()
1921 gp = gitpatches.pop()
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1923
1923
1924 def applybindelta(binchunk, data):
1924 def applybindelta(binchunk, data):
1925 """Apply a binary delta hunk
1925 """Apply a binary delta hunk
1926 The algorithm used is the algorithm from git's patch-delta.c
1926 The algorithm used is the algorithm from git's patch-delta.c
1927 """
1927 """
1928 def deltahead(binchunk):
1928 def deltahead(binchunk):
1929 i = 0
1929 i = 0
1930 for c in binchunk:
1930 for c in binchunk:
1931 i += 1
1931 i += 1
1932 if not (ord(c) & 0x80):
1932 if not (ord(c) & 0x80):
1933 return i
1933 return i
1934 return i
1934 return i
1935 out = ""
1935 out = ""
1936 s = deltahead(binchunk)
1936 s = deltahead(binchunk)
1937 binchunk = binchunk[s:]
1937 binchunk = binchunk[s:]
1938 s = deltahead(binchunk)
1938 s = deltahead(binchunk)
1939 binchunk = binchunk[s:]
1939 binchunk = binchunk[s:]
1940 i = 0
1940 i = 0
1941 while i < len(binchunk):
1941 while i < len(binchunk):
1942 cmd = ord(binchunk[i])
1942 cmd = ord(binchunk[i])
1943 i += 1
1943 i += 1
1944 if (cmd & 0x80):
1944 if (cmd & 0x80):
1945 offset = 0
1945 offset = 0
1946 size = 0
1946 size = 0
1947 if (cmd & 0x01):
1947 if (cmd & 0x01):
1948 offset = ord(binchunk[i])
1948 offset = ord(binchunk[i])
1949 i += 1
1949 i += 1
1950 if (cmd & 0x02):
1950 if (cmd & 0x02):
1951 offset |= ord(binchunk[i]) << 8
1951 offset |= ord(binchunk[i]) << 8
1952 i += 1
1952 i += 1
1953 if (cmd & 0x04):
1953 if (cmd & 0x04):
1954 offset |= ord(binchunk[i]) << 16
1954 offset |= ord(binchunk[i]) << 16
1955 i += 1
1955 i += 1
1956 if (cmd & 0x08):
1956 if (cmd & 0x08):
1957 offset |= ord(binchunk[i]) << 24
1957 offset |= ord(binchunk[i]) << 24
1958 i += 1
1958 i += 1
1959 if (cmd & 0x10):
1959 if (cmd & 0x10):
1960 size = ord(binchunk[i])
1960 size = ord(binchunk[i])
1961 i += 1
1961 i += 1
1962 if (cmd & 0x20):
1962 if (cmd & 0x20):
1963 size |= ord(binchunk[i]) << 8
1963 size |= ord(binchunk[i]) << 8
1964 i += 1
1964 i += 1
1965 if (cmd & 0x40):
1965 if (cmd & 0x40):
1966 size |= ord(binchunk[i]) << 16
1966 size |= ord(binchunk[i]) << 16
1967 i += 1
1967 i += 1
1968 if size == 0:
1968 if size == 0:
1969 size = 0x10000
1969 size = 0x10000
1970 offset_end = offset + size
1970 offset_end = offset + size
1971 out += data[offset:offset_end]
1971 out += data[offset:offset_end]
1972 elif cmd != 0:
1972 elif cmd != 0:
1973 offset_end = i + cmd
1973 offset_end = i + cmd
1974 out += binchunk[i:offset_end]
1974 out += binchunk[i:offset_end]
1975 i += cmd
1975 i += cmd
1976 else:
1976 else:
1977 raise PatchError(_('unexpected delta opcode 0'))
1977 raise PatchError(_('unexpected delta opcode 0'))
1978 return out
1978 return out
1979
1979
1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1981 """Reads a patch from fp and tries to apply it.
1981 """Reads a patch from fp and tries to apply it.
1982
1982
1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1984 there was any fuzz.
1984 there was any fuzz.
1985
1985
1986 If 'eolmode' is 'strict', the patch content and patched file are
1986 If 'eolmode' is 'strict', the patch content and patched file are
1987 read in binary mode. Otherwise, line endings are ignored when
1987 read in binary mode. Otherwise, line endings are ignored when
1988 patching then normalized according to 'eolmode'.
1988 patching then normalized according to 'eolmode'.
1989 """
1989 """
1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1991 prefix=prefix, eolmode=eolmode)
1991 prefix=prefix, eolmode=eolmode)
1992
1992
1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1994 eolmode='strict'):
1994 eolmode='strict'):
1995
1995
1996 if prefix:
1996 if prefix:
1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1998 prefix)
1998 prefix)
1999 if prefix != '':
1999 if prefix != '':
2000 prefix += '/'
2000 prefix += '/'
2001 def pstrip(p):
2001 def pstrip(p):
2002 return pathtransform(p, strip - 1, prefix)[1]
2002 return pathtransform(p, strip - 1, prefix)[1]
2003
2003
2004 rejects = 0
2004 rejects = 0
2005 err = 0
2005 err = 0
2006 current_file = None
2006 current_file = None
2007
2007
2008 for state, values in iterhunks(fp):
2008 for state, values in iterhunks(fp):
2009 if state == 'hunk':
2009 if state == 'hunk':
2010 if not current_file:
2010 if not current_file:
2011 continue
2011 continue
2012 ret = current_file.apply(values)
2012 ret = current_file.apply(values)
2013 if ret > 0:
2013 if ret > 0:
2014 err = 1
2014 err = 1
2015 elif state == 'file':
2015 elif state == 'file':
2016 if current_file:
2016 if current_file:
2017 rejects += current_file.close()
2017 rejects += current_file.close()
2018 current_file = None
2018 current_file = None
2019 afile, bfile, first_hunk, gp = values
2019 afile, bfile, first_hunk, gp = values
2020 if gp:
2020 if gp:
2021 gp.path = pstrip(gp.path)
2021 gp.path = pstrip(gp.path)
2022 if gp.oldpath:
2022 if gp.oldpath:
2023 gp.oldpath = pstrip(gp.oldpath)
2023 gp.oldpath = pstrip(gp.oldpath)
2024 else:
2024 else:
2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2026 prefix)
2026 prefix)
2027 if gp.op == 'RENAME':
2027 if gp.op == 'RENAME':
2028 backend.unlink(gp.oldpath)
2028 backend.unlink(gp.oldpath)
2029 if not first_hunk:
2029 if not first_hunk:
2030 if gp.op == 'DELETE':
2030 if gp.op == 'DELETE':
2031 backend.unlink(gp.path)
2031 backend.unlink(gp.path)
2032 continue
2032 continue
2033 data, mode = None, None
2033 data, mode = None, None
2034 if gp.op in ('RENAME', 'COPY'):
2034 if gp.op in ('RENAME', 'COPY'):
2035 data, mode = store.getfile(gp.oldpath)[:2]
2035 data, mode = store.getfile(gp.oldpath)[:2]
2036 if data is None:
2036 if data is None:
2037 # This means that the old path does not exist
2037 # This means that the old path does not exist
2038 raise PatchError(_("source file '%s' does not exist")
2038 raise PatchError(_("source file '%s' does not exist")
2039 % gp.oldpath)
2039 % gp.oldpath)
2040 if gp.mode:
2040 if gp.mode:
2041 mode = gp.mode
2041 mode = gp.mode
2042 if gp.op == 'ADD':
2042 if gp.op == 'ADD':
2043 # Added files without content have no hunk and
2043 # Added files without content have no hunk and
2044 # must be created
2044 # must be created
2045 data = ''
2045 data = ''
2046 if data or mode:
2046 if data or mode:
2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2048 and backend.exists(gp.path)):
2048 and backend.exists(gp.path)):
2049 raise PatchError(_("cannot create %s: destination "
2049 raise PatchError(_("cannot create %s: destination "
2050 "already exists") % gp.path)
2050 "already exists") % gp.path)
2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2052 continue
2052 continue
2053 try:
2053 try:
2054 current_file = patcher(ui, gp, backend, store,
2054 current_file = patcher(ui, gp, backend, store,
2055 eolmode=eolmode)
2055 eolmode=eolmode)
2056 except PatchError as inst:
2056 except PatchError as inst:
2057 ui.warn(str(inst) + '\n')
2057 ui.warn(str(inst) + '\n')
2058 current_file = None
2058 current_file = None
2059 rejects += 1
2059 rejects += 1
2060 continue
2060 continue
2061 elif state == 'git':
2061 elif state == 'git':
2062 for gp in values:
2062 for gp in values:
2063 path = pstrip(gp.oldpath)
2063 path = pstrip(gp.oldpath)
2064 data, mode = backend.getfile(path)
2064 data, mode = backend.getfile(path)
2065 if data is None:
2065 if data is None:
2066 # The error ignored here will trigger a getfile()
2066 # The error ignored here will trigger a getfile()
2067 # error in a place more appropriate for error
2067 # error in a place more appropriate for error
2068 # handling, and will not interrupt the patching
2068 # handling, and will not interrupt the patching
2069 # process.
2069 # process.
2070 pass
2070 pass
2071 else:
2071 else:
2072 store.setfile(path, data, mode)
2072 store.setfile(path, data, mode)
2073 else:
2073 else:
2074 raise error.Abort(_('unsupported parser state: %s') % state)
2074 raise error.Abort(_('unsupported parser state: %s') % state)
2075
2075
2076 if current_file:
2076 if current_file:
2077 rejects += current_file.close()
2077 rejects += current_file.close()
2078
2078
2079 if rejects:
2079 if rejects:
2080 return -1
2080 return -1
2081 return err
2081 return err
2082
2082
2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2084 similarity):
2084 similarity):
2085 """use <patcher> to apply <patchname> to the working directory.
2085 """use <patcher> to apply <patchname> to the working directory.
2086 returns whether patch was applied with fuzz factor."""
2086 returns whether patch was applied with fuzz factor."""
2087
2087
2088 fuzz = False
2088 fuzz = False
2089 args = []
2089 args = []
2090 cwd = repo.root
2090 cwd = repo.root
2091 if cwd:
2091 if cwd:
2092 args.append('-d %s' % util.shellquote(cwd))
2092 args.append('-d %s' % util.shellquote(cwd))
2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2094 util.shellquote(patchname)))
2094 util.shellquote(patchname)))
2095 try:
2095 try:
2096 for line in util.iterfile(fp):
2096 for line in util.iterfile(fp):
2097 line = line.rstrip()
2097 line = line.rstrip()
2098 ui.note(line + '\n')
2098 ui.note(line + '\n')
2099 if line.startswith('patching file '):
2099 if line.startswith('patching file '):
2100 pf = util.parsepatchoutput(line)
2100 pf = util.parsepatchoutput(line)
2101 printed_file = False
2101 printed_file = False
2102 files.add(pf)
2102 files.add(pf)
2103 elif line.find('with fuzz') >= 0:
2103 elif line.find('with fuzz') >= 0:
2104 fuzz = True
2104 fuzz = True
2105 if not printed_file:
2105 if not printed_file:
2106 ui.warn(pf + '\n')
2106 ui.warn(pf + '\n')
2107 printed_file = True
2107 printed_file = True
2108 ui.warn(line + '\n')
2108 ui.warn(line + '\n')
2109 elif line.find('saving rejects to file') >= 0:
2109 elif line.find('saving rejects to file') >= 0:
2110 ui.warn(line + '\n')
2110 ui.warn(line + '\n')
2111 elif line.find('FAILED') >= 0:
2111 elif line.find('FAILED') >= 0:
2112 if not printed_file:
2112 if not printed_file:
2113 ui.warn(pf + '\n')
2113 ui.warn(pf + '\n')
2114 printed_file = True
2114 printed_file = True
2115 ui.warn(line + '\n')
2115 ui.warn(line + '\n')
2116 finally:
2116 finally:
2117 if files:
2117 if files:
2118 scmutil.marktouched(repo, files, similarity)
2118 scmutil.marktouched(repo, files, similarity)
2119 code = fp.close()
2119 code = fp.close()
2120 if code:
2120 if code:
2121 raise PatchError(_("patch command failed: %s") %
2121 raise PatchError(_("patch command failed: %s") %
2122 util.explainexit(code)[0])
2122 util.explainexit(code)[0])
2123 return fuzz
2123 return fuzz
2124
2124
2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2126 eolmode='strict'):
2126 eolmode='strict'):
2127 if files is None:
2127 if files is None:
2128 files = set()
2128 files = set()
2129 if eolmode is None:
2129 if eolmode is None:
2130 eolmode = ui.config('patch', 'eol')
2130 eolmode = ui.config('patch', 'eol')
2131 if eolmode.lower() not in eolmodes:
2131 if eolmode.lower() not in eolmodes:
2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2133 eolmode = eolmode.lower()
2133 eolmode = eolmode.lower()
2134
2134
2135 store = filestore()
2135 store = filestore()
2136 try:
2136 try:
2137 fp = open(patchobj, 'rb')
2137 fp = open(patchobj, 'rb')
2138 except TypeError:
2138 except TypeError:
2139 fp = patchobj
2139 fp = patchobj
2140 try:
2140 try:
2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2142 eolmode=eolmode)
2142 eolmode=eolmode)
2143 finally:
2143 finally:
2144 if fp != patchobj:
2144 if fp != patchobj:
2145 fp.close()
2145 fp.close()
2146 files.update(backend.close())
2146 files.update(backend.close())
2147 store.close()
2147 store.close()
2148 if ret < 0:
2148 if ret < 0:
2149 raise PatchError(_('patch failed to apply'))
2149 raise PatchError(_('patch failed to apply'))
2150 return ret > 0
2150 return ret > 0
2151
2151
2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2153 eolmode='strict', similarity=0):
2153 eolmode='strict', similarity=0):
2154 """use builtin patch to apply <patchobj> to the working directory.
2154 """use builtin patch to apply <patchobj> to the working directory.
2155 returns whether patch was applied with fuzz factor."""
2155 returns whether patch was applied with fuzz factor."""
2156 backend = workingbackend(ui, repo, similarity)
2156 backend = workingbackend(ui, repo, similarity)
2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2158
2158
2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2160 eolmode='strict'):
2160 eolmode='strict'):
2161 backend = repobackend(ui, repo, ctx, store)
2161 backend = repobackend(ui, repo, ctx, store)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163
2163
2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2165 similarity=0):
2165 similarity=0):
2166 """Apply <patchname> to the working directory.
2166 """Apply <patchname> to the working directory.
2167
2167
2168 'eolmode' specifies how end of lines should be handled. It can be:
2168 'eolmode' specifies how end of lines should be handled. It can be:
2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2171 - 'lf': EOLs are ignored when patching and reset to LF
2171 - 'lf': EOLs are ignored when patching and reset to LF
2172 - None: get it from user settings, default to 'strict'
2172 - None: get it from user settings, default to 'strict'
2173 'eolmode' is ignored when using an external patcher program.
2173 'eolmode' is ignored when using an external patcher program.
2174
2174
2175 Returns whether patch was applied with fuzz factor.
2175 Returns whether patch was applied with fuzz factor.
2176 """
2176 """
2177 patcher = ui.config('ui', 'patch')
2177 patcher = ui.config('ui', 'patch')
2178 if files is None:
2178 if files is None:
2179 files = set()
2179 files = set()
2180 if patcher:
2180 if patcher:
2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2182 files, similarity)
2182 files, similarity)
2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2184 similarity)
2184 similarity)
2185
2185
2186 def changedfiles(ui, repo, patchpath, strip=1):
2186 def changedfiles(ui, repo, patchpath, strip=1):
2187 backend = fsbackend(ui, repo.root)
2187 backend = fsbackend(ui, repo.root)
2188 with open(patchpath, 'rb') as fp:
2188 with open(patchpath, 'rb') as fp:
2189 changed = set()
2189 changed = set()
2190 for state, values in iterhunks(fp):
2190 for state, values in iterhunks(fp):
2191 if state == 'file':
2191 if state == 'file':
2192 afile, bfile, first_hunk, gp = values
2192 afile, bfile, first_hunk, gp = values
2193 if gp:
2193 if gp:
2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2195 if gp.oldpath:
2195 if gp.oldpath:
2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2197 else:
2197 else:
2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2199 '')
2199 '')
2200 changed.add(gp.path)
2200 changed.add(gp.path)
2201 if gp.op == 'RENAME':
2201 if gp.op == 'RENAME':
2202 changed.add(gp.oldpath)
2202 changed.add(gp.oldpath)
2203 elif state not in ('hunk', 'git'):
2203 elif state not in ('hunk', 'git'):
2204 raise error.Abort(_('unsupported parser state: %s') % state)
2204 raise error.Abort(_('unsupported parser state: %s') % state)
2205 return changed
2205 return changed
2206
2206
2207 class GitDiffRequired(Exception):
2207 class GitDiffRequired(Exception):
2208 pass
2208 pass
2209
2209
2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2211 '''return diffopts with all features supported and parsed'''
2211 '''return diffopts with all features supported and parsed'''
2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2213 git=True, whitespace=True, formatchanging=True)
2213 git=True, whitespace=True, formatchanging=True)
2214
2214
2215 diffopts = diffallopts
2215 diffopts = diffallopts
2216
2216
2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2218 whitespace=False, formatchanging=False):
2218 whitespace=False, formatchanging=False):
2219 '''return diffopts with only opted-in features parsed
2219 '''return diffopts with only opted-in features parsed
2220
2220
2221 Features:
2221 Features:
2222 - git: git-style diffs
2222 - git: git-style diffs
2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2224 - formatchanging: options that will likely break or cause correctness issues
2224 - formatchanging: options that will likely break or cause correctness issues
2225 with most diff parsers
2225 with most diff parsers
2226 '''
2226 '''
2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2228 if opts:
2228 if opts:
2229 v = opts.get(key)
2229 v = opts.get(key)
2230 # diffopts flags are either None-default (which is passed
2230 # diffopts flags are either None-default (which is passed
2231 # through unchanged, so we can identify unset values), or
2231 # through unchanged, so we can identify unset values), or
2232 # some other falsey default (eg --unified, which defaults
2232 # some other falsey default (eg --unified, which defaults
2233 # to an empty string). We only want to override the config
2233 # to an empty string). We only want to override the config
2234 # entries from hgrc with command line values if they
2234 # entries from hgrc with command line values if they
2235 # appear to have been set, which is any truthy value,
2235 # appear to have been set, which is any truthy value,
2236 # True, or False.
2236 # True, or False.
2237 if v or isinstance(v, bool):
2237 if v or isinstance(v, bool):
2238 return v
2238 return v
2239 if forceplain is not None and ui.plain():
2239 if forceplain is not None and ui.plain():
2240 return forceplain
2240 return forceplain
2241 return getter(section, name or key, untrusted=untrusted)
2241 return getter(section, name or key, untrusted=untrusted)
2242
2242
2243 # core options, expected to be understood by every diff parser
2243 # core options, expected to be understood by every diff parser
2244 buildopts = {
2244 buildopts = {
2245 'nodates': get('nodates'),
2245 'nodates': get('nodates'),
2246 'showfunc': get('show_function', 'showfunc'),
2246 'showfunc': get('show_function', 'showfunc'),
2247 'context': get('unified', getter=ui.config),
2247 'context': get('unified', getter=ui.config),
2248 }
2248 }
2249
2249
2250 if git:
2250 if git:
2251 buildopts['git'] = get('git')
2251 buildopts['git'] = get('git')
2252
2252
2253 # since this is in the experimental section, we need to call
2253 # since this is in the experimental section, we need to call
2254 # ui.configbool directory
2254 # ui.configbool directory
2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2256 'extendedheader.similarity')
2256 'extendedheader.similarity')
2257
2257
2258 # need to inspect the ui object instead of using get() since we want to
2258 # need to inspect the ui object instead of using get() since we want to
2259 # test for an int
2259 # test for an int
2260 hconf = ui.config('experimental', 'extendedheader.index')
2260 hconf = ui.config('experimental', 'extendedheader.index')
2261 if hconf is not None:
2261 if hconf is not None:
2262 hlen = None
2262 hlen = None
2263 try:
2263 try:
2264 # the hash config could be an integer (for length of hash) or a
2264 # the hash config could be an integer (for length of hash) or a
2265 # word (e.g. short, full, none)
2265 # word (e.g. short, full, none)
2266 hlen = int(hconf)
2266 hlen = int(hconf)
2267 if hlen < 0 or hlen > 40:
2267 if hlen < 0 or hlen > 40:
2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2269 ui.warn(msg % hlen)
2269 ui.warn(msg % hlen)
2270 except ValueError:
2270 except ValueError:
2271 # default value
2271 # default value
2272 if hconf == 'short' or hconf == '':
2272 if hconf == 'short' or hconf == '':
2273 hlen = 12
2273 hlen = 12
2274 elif hconf == 'full':
2274 elif hconf == 'full':
2275 hlen = 40
2275 hlen = 40
2276 elif hconf != 'none':
2276 elif hconf != 'none':
2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2278 ui.warn(msg % hconf)
2278 ui.warn(msg % hconf)
2279 finally:
2279 finally:
2280 buildopts['index'] = hlen
2280 buildopts['index'] = hlen
2281
2281
2282 if whitespace:
2282 if whitespace:
2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2285 'ignorewsamount')
2285 'ignorewsamount')
2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2287 'ignoreblanklines')
2287 'ignoreblanklines')
2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2289 if formatchanging:
2289 if formatchanging:
2290 buildopts['text'] = opts and opts.get('text')
2290 buildopts['text'] = opts and opts.get('text')
2291 binary = None if opts is None else opts.get('binary')
2291 binary = None if opts is None else opts.get('binary')
2292 buildopts['nobinary'] = (not binary if binary is not None
2292 buildopts['nobinary'] = (not binary if binary is not None
2293 else get('nobinary', forceplain=False))
2293 else get('nobinary', forceplain=False))
2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2295
2295
2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2297
2297
2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2300 '''yields diff of changes to files between two nodes, or node and
2300 '''yields diff of changes to files between two nodes, or node and
2301 working directory.
2301 working directory.
2302
2302
2303 if node1 is None, use first dirstate parent instead.
2303 if node1 is None, use first dirstate parent instead.
2304 if node2 is None, compare node1 with working directory.
2304 if node2 is None, compare node1 with working directory.
2305
2305
2306 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2306 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2307 every time some change cannot be represented with the current
2307 every time some change cannot be represented with the current
2308 patch format. Return False to upgrade to git patch format, True to
2308 patch format. Return False to upgrade to git patch format, True to
2309 accept the loss or raise an exception to abort the diff. It is
2309 accept the loss or raise an exception to abort the diff. It is
2310 called with the name of current file being diffed as 'fn'. If set
2310 called with the name of current file being diffed as 'fn'. If set
2311 to None, patches will always be upgraded to git format when
2311 to None, patches will always be upgraded to git format when
2312 necessary.
2312 necessary.
2313
2313
2314 prefix is a filename prefix that is prepended to all filenames on
2314 prefix is a filename prefix that is prepended to all filenames on
2315 display (used for subrepos).
2315 display (used for subrepos).
2316
2316
2317 relroot, if not empty, must be normalized with a trailing /. Any match
2317 relroot, if not empty, must be normalized with a trailing /. Any match
2318 patterns that fall outside it will be ignored.
2318 patterns that fall outside it will be ignored.
2319
2319
2320 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2320 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2321 information.'''
2321 information.'''
2322 for hdr, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2322 for fctx1, fctx2, hdr, hunks in diffhunks(
2323 changes=changes, opts=opts,
2323 repo, node1=node1, node2=node2,
2324 losedatafn=losedatafn, prefix=prefix,
2324 match=match, changes=changes, opts=opts,
2325 relroot=relroot, copy=copy):
2325 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2326 ):
2326 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2327 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2327 if hdr and (text or len(hdr) > 1):
2328 if hdr and (text or len(hdr) > 1):
2328 yield '\n'.join(hdr) + '\n'
2329 yield '\n'.join(hdr) + '\n'
2329 if text:
2330 if text:
2330 yield text
2331 yield text
2331
2332
2332 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2333 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2333 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2334 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2334 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2335 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2335 where `header` is a list of diff headers and `hunks` is an iterable of
2336 where `header` is a list of diff headers and `hunks` is an iterable of
2336 (`hunkrange`, `hunklines`) tuples.
2337 (`hunkrange`, `hunklines`) tuples.
2337
2338
2338 See diff() for the meaning of parameters.
2339 See diff() for the meaning of parameters.
2339 """
2340 """
2340
2341
2341 if opts is None:
2342 if opts is None:
2342 opts = mdiff.defaultopts
2343 opts = mdiff.defaultopts
2343
2344
2344 if not node1 and not node2:
2345 if not node1 and not node2:
2345 node1 = repo.dirstate.p1()
2346 node1 = repo.dirstate.p1()
2346
2347
2347 def lrugetfilectx():
2348 def lrugetfilectx():
2348 cache = {}
2349 cache = {}
2349 order = collections.deque()
2350 order = collections.deque()
2350 def getfilectx(f, ctx):
2351 def getfilectx(f, ctx):
2351 fctx = ctx.filectx(f, filelog=cache.get(f))
2352 fctx = ctx.filectx(f, filelog=cache.get(f))
2352 if f not in cache:
2353 if f not in cache:
2353 if len(cache) > 20:
2354 if len(cache) > 20:
2354 del cache[order.popleft()]
2355 del cache[order.popleft()]
2355 cache[f] = fctx.filelog()
2356 cache[f] = fctx.filelog()
2356 else:
2357 else:
2357 order.remove(f)
2358 order.remove(f)
2358 order.append(f)
2359 order.append(f)
2359 return fctx
2360 return fctx
2360 return getfilectx
2361 return getfilectx
2361 getfilectx = lrugetfilectx()
2362 getfilectx = lrugetfilectx()
2362
2363
2363 ctx1 = repo[node1]
2364 ctx1 = repo[node1]
2364 ctx2 = repo[node2]
2365 ctx2 = repo[node2]
2365
2366
2366 relfiltered = False
2367 relfiltered = False
2367 if relroot != '' and match.always():
2368 if relroot != '' and match.always():
2368 # as a special case, create a new matcher with just the relroot
2369 # as a special case, create a new matcher with just the relroot
2369 pats = [relroot]
2370 pats = [relroot]
2370 match = scmutil.match(ctx2, pats, default='path')
2371 match = scmutil.match(ctx2, pats, default='path')
2371 relfiltered = True
2372 relfiltered = True
2372
2373
2373 if not changes:
2374 if not changes:
2374 changes = repo.status(ctx1, ctx2, match=match)
2375 changes = repo.status(ctx1, ctx2, match=match)
2375 modified, added, removed = changes[:3]
2376 modified, added, removed = changes[:3]
2376
2377
2377 if not modified and not added and not removed:
2378 if not modified and not added and not removed:
2378 return []
2379 return []
2379
2380
2380 if repo.ui.debugflag:
2381 if repo.ui.debugflag:
2381 hexfunc = hex
2382 hexfunc = hex
2382 else:
2383 else:
2383 hexfunc = short
2384 hexfunc = short
2384 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2385 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2385
2386
2386 if copy is None:
2387 if copy is None:
2387 copy = {}
2388 copy = {}
2388 if opts.git or opts.upgrade:
2389 if opts.git or opts.upgrade:
2389 copy = copies.pathcopies(ctx1, ctx2, match=match)
2390 copy = copies.pathcopies(ctx1, ctx2, match=match)
2390
2391
2391 if relroot is not None:
2392 if relroot is not None:
2392 if not relfiltered:
2393 if not relfiltered:
2393 # XXX this would ideally be done in the matcher, but that is
2394 # XXX this would ideally be done in the matcher, but that is
2394 # generally meant to 'or' patterns, not 'and' them. In this case we
2395 # generally meant to 'or' patterns, not 'and' them. In this case we
2395 # need to 'and' all the patterns from the matcher with relroot.
2396 # need to 'and' all the patterns from the matcher with relroot.
2396 def filterrel(l):
2397 def filterrel(l):
2397 return [f for f in l if f.startswith(relroot)]
2398 return [f for f in l if f.startswith(relroot)]
2398 modified = filterrel(modified)
2399 modified = filterrel(modified)
2399 added = filterrel(added)
2400 added = filterrel(added)
2400 removed = filterrel(removed)
2401 removed = filterrel(removed)
2401 relfiltered = True
2402 relfiltered = True
2402 # filter out copies where either side isn't inside the relative root
2403 # filter out copies where either side isn't inside the relative root
2403 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2404 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2404 if dst.startswith(relroot)
2405 if dst.startswith(relroot)
2405 and src.startswith(relroot)))
2406 and src.startswith(relroot)))
2406
2407
2407 modifiedset = set(modified)
2408 modifiedset = set(modified)
2408 addedset = set(added)
2409 addedset = set(added)
2409 removedset = set(removed)
2410 removedset = set(removed)
2410 for f in modified:
2411 for f in modified:
2411 if f not in ctx1:
2412 if f not in ctx1:
2412 # Fix up added, since merged-in additions appear as
2413 # Fix up added, since merged-in additions appear as
2413 # modifications during merges
2414 # modifications during merges
2414 modifiedset.remove(f)
2415 modifiedset.remove(f)
2415 addedset.add(f)
2416 addedset.add(f)
2416 for f in removed:
2417 for f in removed:
2417 if f not in ctx1:
2418 if f not in ctx1:
2418 # Merged-in additions that are then removed are reported as removed.
2419 # Merged-in additions that are then removed are reported as removed.
2419 # They are not in ctx1, so We don't want to show them in the diff.
2420 # They are not in ctx1, so We don't want to show them in the diff.
2420 removedset.remove(f)
2421 removedset.remove(f)
2421 modified = sorted(modifiedset)
2422 modified = sorted(modifiedset)
2422 added = sorted(addedset)
2423 added = sorted(addedset)
2423 removed = sorted(removedset)
2424 removed = sorted(removedset)
2424 for dst, src in copy.items():
2425 for dst, src in copy.items():
2425 if src not in ctx1:
2426 if src not in ctx1:
2426 # Files merged in during a merge and then copied/renamed are
2427 # Files merged in during a merge and then copied/renamed are
2427 # reported as copies. We want to show them in the diff as additions.
2428 # reported as copies. We want to show them in the diff as additions.
2428 del copy[dst]
2429 del copy[dst]
2429
2430
2430 def difffn(opts, losedata):
2431 def difffn(opts, losedata):
2431 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2432 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2432 copy, getfilectx, opts, losedata, prefix, relroot)
2433 copy, getfilectx, opts, losedata, prefix, relroot)
2433 if opts.upgrade and not opts.git:
2434 if opts.upgrade and not opts.git:
2434 try:
2435 try:
2435 def losedata(fn):
2436 def losedata(fn):
2436 if not losedatafn or not losedatafn(fn=fn):
2437 if not losedatafn or not losedatafn(fn=fn):
2437 raise GitDiffRequired
2438 raise GitDiffRequired
2438 # Buffer the whole output until we are sure it can be generated
2439 # Buffer the whole output until we are sure it can be generated
2439 return list(difffn(opts.copy(git=False), losedata))
2440 return list(difffn(opts.copy(git=False), losedata))
2440 except GitDiffRequired:
2441 except GitDiffRequired:
2441 return difffn(opts.copy(git=True), None)
2442 return difffn(opts.copy(git=True), None)
2442 else:
2443 else:
2443 return difffn(opts, None)
2444 return difffn(opts, None)
2444
2445
2445 def difflabel(func, *args, **kw):
2446 def difflabel(func, *args, **kw):
2446 '''yields 2-tuples of (output, label) based on the output of func()'''
2447 '''yields 2-tuples of (output, label) based on the output of func()'''
2447 headprefixes = [('diff', 'diff.diffline'),
2448 headprefixes = [('diff', 'diff.diffline'),
2448 ('copy', 'diff.extended'),
2449 ('copy', 'diff.extended'),
2449 ('rename', 'diff.extended'),
2450 ('rename', 'diff.extended'),
2450 ('old', 'diff.extended'),
2451 ('old', 'diff.extended'),
2451 ('new', 'diff.extended'),
2452 ('new', 'diff.extended'),
2452 ('deleted', 'diff.extended'),
2453 ('deleted', 'diff.extended'),
2453 ('index', 'diff.extended'),
2454 ('index', 'diff.extended'),
2454 ('similarity', 'diff.extended'),
2455 ('similarity', 'diff.extended'),
2455 ('---', 'diff.file_a'),
2456 ('---', 'diff.file_a'),
2456 ('+++', 'diff.file_b')]
2457 ('+++', 'diff.file_b')]
2457 textprefixes = [('@', 'diff.hunk'),
2458 textprefixes = [('@', 'diff.hunk'),
2458 ('-', 'diff.deleted'),
2459 ('-', 'diff.deleted'),
2459 ('+', 'diff.inserted')]
2460 ('+', 'diff.inserted')]
2460 head = False
2461 head = False
2461 for chunk in func(*args, **kw):
2462 for chunk in func(*args, **kw):
2462 lines = chunk.split('\n')
2463 lines = chunk.split('\n')
2463 for i, line in enumerate(lines):
2464 for i, line in enumerate(lines):
2464 if i != 0:
2465 if i != 0:
2465 yield ('\n', '')
2466 yield ('\n', '')
2466 if head:
2467 if head:
2467 if line.startswith('@'):
2468 if line.startswith('@'):
2468 head = False
2469 head = False
2469 else:
2470 else:
2470 if line and line[0] not in ' +-@\\':
2471 if line and line[0] not in ' +-@\\':
2471 head = True
2472 head = True
2472 stripline = line
2473 stripline = line
2473 diffline = False
2474 diffline = False
2474 if not head and line and line[0] in '+-':
2475 if not head and line and line[0] in '+-':
2475 # highlight tabs and trailing whitespace, but only in
2476 # highlight tabs and trailing whitespace, but only in
2476 # changed lines
2477 # changed lines
2477 stripline = line.rstrip()
2478 stripline = line.rstrip()
2478 diffline = True
2479 diffline = True
2479
2480
2480 prefixes = textprefixes
2481 prefixes = textprefixes
2481 if head:
2482 if head:
2482 prefixes = headprefixes
2483 prefixes = headprefixes
2483 for prefix, label in prefixes:
2484 for prefix, label in prefixes:
2484 if stripline.startswith(prefix):
2485 if stripline.startswith(prefix):
2485 if diffline:
2486 if diffline:
2486 for token in tabsplitter.findall(stripline):
2487 for token in tabsplitter.findall(stripline):
2487 if '\t' == token[0]:
2488 if '\t' == token[0]:
2488 yield (token, 'diff.tab')
2489 yield (token, 'diff.tab')
2489 else:
2490 else:
2490 yield (token, label)
2491 yield (token, label)
2491 else:
2492 else:
2492 yield (stripline, label)
2493 yield (stripline, label)
2493 break
2494 break
2494 else:
2495 else:
2495 yield (line, '')
2496 yield (line, '')
2496 if line != stripline:
2497 if line != stripline:
2497 yield (line[len(stripline):], 'diff.trailingwhitespace')
2498 yield (line[len(stripline):], 'diff.trailingwhitespace')
2498
2499
2499 def diffui(*args, **kw):
2500 def diffui(*args, **kw):
2500 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2501 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2501 return difflabel(diff, *args, **kw)
2502 return difflabel(diff, *args, **kw)
2502
2503
2503 def _filepairs(modified, added, removed, copy, opts):
2504 def _filepairs(modified, added, removed, copy, opts):
2504 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2505 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2505 before and f2 is the the name after. For added files, f1 will be None,
2506 before and f2 is the the name after. For added files, f1 will be None,
2506 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2507 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2507 or 'rename' (the latter two only if opts.git is set).'''
2508 or 'rename' (the latter two only if opts.git is set).'''
2508 gone = set()
2509 gone = set()
2509
2510
2510 copyto = dict([(v, k) for k, v in copy.items()])
2511 copyto = dict([(v, k) for k, v in copy.items()])
2511
2512
2512 addedset, removedset = set(added), set(removed)
2513 addedset, removedset = set(added), set(removed)
2513
2514
2514 for f in sorted(modified + added + removed):
2515 for f in sorted(modified + added + removed):
2515 copyop = None
2516 copyop = None
2516 f1, f2 = f, f
2517 f1, f2 = f, f
2517 if f in addedset:
2518 if f in addedset:
2518 f1 = None
2519 f1 = None
2519 if f in copy:
2520 if f in copy:
2520 if opts.git:
2521 if opts.git:
2521 f1 = copy[f]
2522 f1 = copy[f]
2522 if f1 in removedset and f1 not in gone:
2523 if f1 in removedset and f1 not in gone:
2523 copyop = 'rename'
2524 copyop = 'rename'
2524 gone.add(f1)
2525 gone.add(f1)
2525 else:
2526 else:
2526 copyop = 'copy'
2527 copyop = 'copy'
2527 elif f in removedset:
2528 elif f in removedset:
2528 f2 = None
2529 f2 = None
2529 if opts.git:
2530 if opts.git:
2530 # have we already reported a copy above?
2531 # have we already reported a copy above?
2531 if (f in copyto and copyto[f] in addedset
2532 if (f in copyto and copyto[f] in addedset
2532 and copy[copyto[f]] == f):
2533 and copy[copyto[f]] == f):
2533 continue
2534 continue
2534 yield f1, f2, copyop
2535 yield f1, f2, copyop
2535
2536
2536 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2537 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2537 copy, getfilectx, opts, losedatafn, prefix, relroot):
2538 copy, getfilectx, opts, losedatafn, prefix, relroot):
2538 '''given input data, generate a diff and yield it in blocks
2539 '''given input data, generate a diff and yield it in blocks
2539
2540
2540 If generating a diff would lose data like flags or binary data and
2541 If generating a diff would lose data like flags or binary data and
2541 losedatafn is not None, it will be called.
2542 losedatafn is not None, it will be called.
2542
2543
2543 relroot is removed and prefix is added to every path in the diff output.
2544 relroot is removed and prefix is added to every path in the diff output.
2544
2545
2545 If relroot is not empty, this function expects every path in modified,
2546 If relroot is not empty, this function expects every path in modified,
2546 added, removed and copy to start with it.'''
2547 added, removed and copy to start with it.'''
2547
2548
2548 def gitindex(text):
2549 def gitindex(text):
2549 if not text:
2550 if not text:
2550 text = ""
2551 text = ""
2551 l = len(text)
2552 l = len(text)
2552 s = hashlib.sha1('blob %d\0' % l)
2553 s = hashlib.sha1('blob %d\0' % l)
2553 s.update(text)
2554 s.update(text)
2554 return s.hexdigest()
2555 return s.hexdigest()
2555
2556
2556 if opts.noprefix:
2557 if opts.noprefix:
2557 aprefix = bprefix = ''
2558 aprefix = bprefix = ''
2558 else:
2559 else:
2559 aprefix = 'a/'
2560 aprefix = 'a/'
2560 bprefix = 'b/'
2561 bprefix = 'b/'
2561
2562
2562 def diffline(f, revs):
2563 def diffline(f, revs):
2563 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2564 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2564 return 'diff %s %s' % (revinfo, f)
2565 return 'diff %s %s' % (revinfo, f)
2565
2566
2566 def isempty(fctx):
2567 def isempty(fctx):
2567 return fctx is None or fctx.size() == 0
2568 return fctx is None or fctx.size() == 0
2568
2569
2569 date1 = util.datestr(ctx1.date())
2570 date1 = util.datestr(ctx1.date())
2570 date2 = util.datestr(ctx2.date())
2571 date2 = util.datestr(ctx2.date())
2571
2572
2572 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2573 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2573
2574
2574 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2575 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2575 or repo.ui.configbool('devel', 'check-relroot')):
2576 or repo.ui.configbool('devel', 'check-relroot')):
2576 for f in modified + added + removed + list(copy) + list(copy.values()):
2577 for f in modified + added + removed + list(copy) + list(copy.values()):
2577 if f is not None and not f.startswith(relroot):
2578 if f is not None and not f.startswith(relroot):
2578 raise AssertionError(
2579 raise AssertionError(
2579 "file %s doesn't start with relroot %s" % (f, relroot))
2580 "file %s doesn't start with relroot %s" % (f, relroot))
2580
2581
2581 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2582 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2582 content1 = None
2583 content1 = None
2583 content2 = None
2584 content2 = None
2584 fctx1 = None
2585 fctx1 = None
2585 fctx2 = None
2586 fctx2 = None
2586 flag1 = None
2587 flag1 = None
2587 flag2 = None
2588 flag2 = None
2588 if f1:
2589 if f1:
2589 fctx1 = getfilectx(f1, ctx1)
2590 fctx1 = getfilectx(f1, ctx1)
2590 if opts.git or losedatafn:
2591 if opts.git or losedatafn:
2591 flag1 = ctx1.flags(f1)
2592 flag1 = ctx1.flags(f1)
2592 if f2:
2593 if f2:
2593 fctx2 = getfilectx(f2, ctx2)
2594 fctx2 = getfilectx(f2, ctx2)
2594 if opts.git or losedatafn:
2595 if opts.git or losedatafn:
2595 flag2 = ctx2.flags(f2)
2596 flag2 = ctx2.flags(f2)
2596 # if binary is True, output "summary" or "base85", but not "text diff"
2597 # if binary is True, output "summary" or "base85", but not "text diff"
2597 binary = not opts.text and any(f.isbinary()
2598 binary = not opts.text and any(f.isbinary()
2598 for f in [fctx1, fctx2] if f is not None)
2599 for f in [fctx1, fctx2] if f is not None)
2599
2600
2600 if losedatafn and not opts.git:
2601 if losedatafn and not opts.git:
2601 if (binary or
2602 if (binary or
2602 # copy/rename
2603 # copy/rename
2603 f2 in copy or
2604 f2 in copy or
2604 # empty file creation
2605 # empty file creation
2605 (not f1 and isempty(fctx2)) or
2606 (not f1 and isempty(fctx2)) or
2606 # empty file deletion
2607 # empty file deletion
2607 (isempty(fctx1) and not f2) or
2608 (isempty(fctx1) and not f2) or
2608 # create with flags
2609 # create with flags
2609 (not f1 and flag2) or
2610 (not f1 and flag2) or
2610 # change flags
2611 # change flags
2611 (f1 and f2 and flag1 != flag2)):
2612 (f1 and f2 and flag1 != flag2)):
2612 losedatafn(f2 or f1)
2613 losedatafn(f2 or f1)
2613
2614
2614 path1 = f1 or f2
2615 path1 = f1 or f2
2615 path2 = f2 or f1
2616 path2 = f2 or f1
2616 path1 = posixpath.join(prefix, path1[len(relroot):])
2617 path1 = posixpath.join(prefix, path1[len(relroot):])
2617 path2 = posixpath.join(prefix, path2[len(relroot):])
2618 path2 = posixpath.join(prefix, path2[len(relroot):])
2618 header = []
2619 header = []
2619 if opts.git:
2620 if opts.git:
2620 header.append('diff --git %s%s %s%s' %
2621 header.append('diff --git %s%s %s%s' %
2621 (aprefix, path1, bprefix, path2))
2622 (aprefix, path1, bprefix, path2))
2622 if not f1: # added
2623 if not f1: # added
2623 header.append('new file mode %s' % gitmode[flag2])
2624 header.append('new file mode %s' % gitmode[flag2])
2624 elif not f2: # removed
2625 elif not f2: # removed
2625 header.append('deleted file mode %s' % gitmode[flag1])
2626 header.append('deleted file mode %s' % gitmode[flag1])
2626 else: # modified/copied/renamed
2627 else: # modified/copied/renamed
2627 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2628 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2628 if mode1 != mode2:
2629 if mode1 != mode2:
2629 header.append('old mode %s' % mode1)
2630 header.append('old mode %s' % mode1)
2630 header.append('new mode %s' % mode2)
2631 header.append('new mode %s' % mode2)
2631 if copyop is not None:
2632 if copyop is not None:
2632 if opts.showsimilarity:
2633 if opts.showsimilarity:
2633 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2634 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2634 header.append('similarity index %d%%' % sim)
2635 header.append('similarity index %d%%' % sim)
2635 header.append('%s from %s' % (copyop, path1))
2636 header.append('%s from %s' % (copyop, path1))
2636 header.append('%s to %s' % (copyop, path2))
2637 header.append('%s to %s' % (copyop, path2))
2637 elif revs and not repo.ui.quiet:
2638 elif revs and not repo.ui.quiet:
2638 header.append(diffline(path1, revs))
2639 header.append(diffline(path1, revs))
2639
2640
2640 # fctx.is | diffopts | what to | is fctx.data()
2641 # fctx.is | diffopts | what to | is fctx.data()
2641 # binary() | text nobinary git index | output? | outputted?
2642 # binary() | text nobinary git index | output? | outputted?
2642 # ------------------------------------|----------------------------
2643 # ------------------------------------|----------------------------
2643 # yes | no no no * | summary | no
2644 # yes | no no no * | summary | no
2644 # yes | no no yes * | base85 | yes
2645 # yes | no no yes * | base85 | yes
2645 # yes | no yes no * | summary | no
2646 # yes | no yes no * | summary | no
2646 # yes | no yes yes 0 | summary | no
2647 # yes | no yes yes 0 | summary | no
2647 # yes | no yes yes >0 | summary | semi [1]
2648 # yes | no yes yes >0 | summary | semi [1]
2648 # yes | yes * * * | text diff | yes
2649 # yes | yes * * * | text diff | yes
2649 # no | * * * * | text diff | yes
2650 # no | * * * * | text diff | yes
2650 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2651 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2651 if binary and (not opts.git or (opts.git and opts.nobinary and not
2652 if binary and (not opts.git or (opts.git and opts.nobinary and not
2652 opts.index)):
2653 opts.index)):
2653 # fast path: no binary content will be displayed, content1 and
2654 # fast path: no binary content will be displayed, content1 and
2654 # content2 are only used for equivalent test. cmp() could have a
2655 # content2 are only used for equivalent test. cmp() could have a
2655 # fast path.
2656 # fast path.
2656 if fctx1 is not None:
2657 if fctx1 is not None:
2657 content1 = b'\0'
2658 content1 = b'\0'
2658 if fctx2 is not None:
2659 if fctx2 is not None:
2659 if fctx1 is not None and not fctx1.cmp(fctx2):
2660 if fctx1 is not None and not fctx1.cmp(fctx2):
2660 content2 = b'\0' # not different
2661 content2 = b'\0' # not different
2661 else:
2662 else:
2662 content2 = b'\0\0'
2663 content2 = b'\0\0'
2663 else:
2664 else:
2664 # normal path: load contents
2665 # normal path: load contents
2665 if fctx1 is not None:
2666 if fctx1 is not None:
2666 content1 = fctx1.data()
2667 content1 = fctx1.data()
2667 if fctx2 is not None:
2668 if fctx2 is not None:
2668 content2 = fctx2.data()
2669 content2 = fctx2.data()
2669
2670
2670 if binary and opts.git and not opts.nobinary:
2671 if binary and opts.git and not opts.nobinary:
2671 text = mdiff.b85diff(content1, content2)
2672 text = mdiff.b85diff(content1, content2)
2672 if text:
2673 if text:
2673 header.append('index %s..%s' %
2674 header.append('index %s..%s' %
2674 (gitindex(content1), gitindex(content2)))
2675 (gitindex(content1), gitindex(content2)))
2675 hunks = (None, [text]),
2676 hunks = (None, [text]),
2676 else:
2677 else:
2677 if opts.git and opts.index > 0:
2678 if opts.git and opts.index > 0:
2678 flag = flag1
2679 flag = flag1
2679 if flag is None:
2680 if flag is None:
2680 flag = flag2
2681 flag = flag2
2681 header.append('index %s..%s %s' %
2682 header.append('index %s..%s %s' %
2682 (gitindex(content1)[0:opts.index],
2683 (gitindex(content1)[0:opts.index],
2683 gitindex(content2)[0:opts.index],
2684 gitindex(content2)[0:opts.index],
2684 gitmode[flag]))
2685 gitmode[flag]))
2685
2686
2686 uheaders, hunks = mdiff.unidiff(content1, date1,
2687 uheaders, hunks = mdiff.unidiff(content1, date1,
2687 content2, date2,
2688 content2, date2,
2688 path1, path2, opts=opts)
2689 path1, path2, opts=opts)
2689 header.extend(uheaders)
2690 header.extend(uheaders)
2690 yield header, hunks
2691 yield fctx1, fctx2, header, hunks
2691
2692
2692 def diffstatsum(stats):
2693 def diffstatsum(stats):
2693 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2694 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2694 for f, a, r, b in stats:
2695 for f, a, r, b in stats:
2695 maxfile = max(maxfile, encoding.colwidth(f))
2696 maxfile = max(maxfile, encoding.colwidth(f))
2696 maxtotal = max(maxtotal, a + r)
2697 maxtotal = max(maxtotal, a + r)
2697 addtotal += a
2698 addtotal += a
2698 removetotal += r
2699 removetotal += r
2699 binary = binary or b
2700 binary = binary or b
2700
2701
2701 return maxfile, maxtotal, addtotal, removetotal, binary
2702 return maxfile, maxtotal, addtotal, removetotal, binary
2702
2703
2703 def diffstatdata(lines):
2704 def diffstatdata(lines):
2704 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2705 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2705
2706
2706 results = []
2707 results = []
2707 filename, adds, removes, isbinary = None, 0, 0, False
2708 filename, adds, removes, isbinary = None, 0, 0, False
2708
2709
2709 def addresult():
2710 def addresult():
2710 if filename:
2711 if filename:
2711 results.append((filename, adds, removes, isbinary))
2712 results.append((filename, adds, removes, isbinary))
2712
2713
2713 # inheader is used to track if a line is in the
2714 # inheader is used to track if a line is in the
2714 # header portion of the diff. This helps properly account
2715 # header portion of the diff. This helps properly account
2715 # for lines that start with '--' or '++'
2716 # for lines that start with '--' or '++'
2716 inheader = False
2717 inheader = False
2717
2718
2718 for line in lines:
2719 for line in lines:
2719 if line.startswith('diff'):
2720 if line.startswith('diff'):
2720 addresult()
2721 addresult()
2721 # starting a new file diff
2722 # starting a new file diff
2722 # set numbers to 0 and reset inheader
2723 # set numbers to 0 and reset inheader
2723 inheader = True
2724 inheader = True
2724 adds, removes, isbinary = 0, 0, False
2725 adds, removes, isbinary = 0, 0, False
2725 if line.startswith('diff --git a/'):
2726 if line.startswith('diff --git a/'):
2726 filename = gitre.search(line).group(2)
2727 filename = gitre.search(line).group(2)
2727 elif line.startswith('diff -r'):
2728 elif line.startswith('diff -r'):
2728 # format: "diff -r ... -r ... filename"
2729 # format: "diff -r ... -r ... filename"
2729 filename = diffre.search(line).group(1)
2730 filename = diffre.search(line).group(1)
2730 elif line.startswith('@@'):
2731 elif line.startswith('@@'):
2731 inheader = False
2732 inheader = False
2732 elif line.startswith('+') and not inheader:
2733 elif line.startswith('+') and not inheader:
2733 adds += 1
2734 adds += 1
2734 elif line.startswith('-') and not inheader:
2735 elif line.startswith('-') and not inheader:
2735 removes += 1
2736 removes += 1
2736 elif (line.startswith('GIT binary patch') or
2737 elif (line.startswith('GIT binary patch') or
2737 line.startswith('Binary file')):
2738 line.startswith('Binary file')):
2738 isbinary = True
2739 isbinary = True
2739 addresult()
2740 addresult()
2740 return results
2741 return results
2741
2742
2742 def diffstat(lines, width=80):
2743 def diffstat(lines, width=80):
2743 output = []
2744 output = []
2744 stats = diffstatdata(lines)
2745 stats = diffstatdata(lines)
2745 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2746 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2746
2747
2747 countwidth = len(str(maxtotal))
2748 countwidth = len(str(maxtotal))
2748 if hasbinary and countwidth < 3:
2749 if hasbinary and countwidth < 3:
2749 countwidth = 3
2750 countwidth = 3
2750 graphwidth = width - countwidth - maxname - 6
2751 graphwidth = width - countwidth - maxname - 6
2751 if graphwidth < 10:
2752 if graphwidth < 10:
2752 graphwidth = 10
2753 graphwidth = 10
2753
2754
2754 def scale(i):
2755 def scale(i):
2755 if maxtotal <= graphwidth:
2756 if maxtotal <= graphwidth:
2756 return i
2757 return i
2757 # If diffstat runs out of room it doesn't print anything,
2758 # If diffstat runs out of room it doesn't print anything,
2758 # which isn't very useful, so always print at least one + or -
2759 # which isn't very useful, so always print at least one + or -
2759 # if there were at least some changes.
2760 # if there were at least some changes.
2760 return max(i * graphwidth // maxtotal, int(bool(i)))
2761 return max(i * graphwidth // maxtotal, int(bool(i)))
2761
2762
2762 for filename, adds, removes, isbinary in stats:
2763 for filename, adds, removes, isbinary in stats:
2763 if isbinary:
2764 if isbinary:
2764 count = 'Bin'
2765 count = 'Bin'
2765 else:
2766 else:
2766 count = '%d' % (adds + removes)
2767 count = '%d' % (adds + removes)
2767 pluses = '+' * scale(adds)
2768 pluses = '+' * scale(adds)
2768 minuses = '-' * scale(removes)
2769 minuses = '-' * scale(removes)
2769 output.append(' %s%s | %*s %s%s\n' %
2770 output.append(' %s%s | %*s %s%s\n' %
2770 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2771 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2771 countwidth, count, pluses, minuses))
2772 countwidth, count, pluses, minuses))
2772
2773
2773 if stats:
2774 if stats:
2774 output.append(_(' %d files changed, %d insertions(+), '
2775 output.append(_(' %d files changed, %d insertions(+), '
2775 '%d deletions(-)\n')
2776 '%d deletions(-)\n')
2776 % (len(stats), totaladds, totalremoves))
2777 % (len(stats), totaladds, totalremoves))
2777
2778
2778 return ''.join(output)
2779 return ''.join(output)
2779
2780
2780 def diffstatui(*args, **kw):
2781 def diffstatui(*args, **kw):
2781 '''like diffstat(), but yields 2-tuples of (output, label) for
2782 '''like diffstat(), but yields 2-tuples of (output, label) for
2782 ui.write()
2783 ui.write()
2783 '''
2784 '''
2784
2785
2785 for line in diffstat(*args, **kw).splitlines():
2786 for line in diffstat(*args, **kw).splitlines():
2786 if line and line[-1] in '+-':
2787 if line and line[-1] in '+-':
2787 name, graph = line.rsplit(' ', 1)
2788 name, graph = line.rsplit(' ', 1)
2788 yield (name + ' ', '')
2789 yield (name + ' ', '')
2789 m = re.search(br'\++', graph)
2790 m = re.search(br'\++', graph)
2790 if m:
2791 if m:
2791 yield (m.group(0), 'diffstat.inserted')
2792 yield (m.group(0), 'diffstat.inserted')
2792 m = re.search(br'-+', graph)
2793 m = re.search(br'-+', graph)
2793 if m:
2794 if m:
2794 yield (m.group(0), 'diffstat.deleted')
2795 yield (m.group(0), 'diffstat.deleted')
2795 else:
2796 else:
2796 yield (line, '')
2797 yield (line, '')
2797 yield ('\n', '')
2798 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now