##// END OF EJS Templates
patch: accept second matcher that applies only to copy sources (API)...
Martin von Zweigbergk -
r41769:74f53d3b default
parent child Browse files
Show More
@@ -1,2485 +1,2486 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirid,
24 wdirid,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
298 hunksfilterfn=None):
298 copysourcematch=None, hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 relroot=relroot, copy=copy,
306 relroot=relroot, copy=copy,
307 copysourcematch=copysourcematch,
307 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
308
309
309 def dirs(self):
310 def dirs(self):
310 return self._manifest.dirs()
311 return self._manifest.dirs()
311
312
312 def hasdir(self, dir):
313 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
314
315
315 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
318 directory.
319 directory.
319
320
320 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
321
322
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
324 """
324
325
325 ctx1 = self
326 ctx1 = self
326 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
327
328
328 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
332 # with its first parent.
332 #
333 #
333 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
334 #
335 #
335 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
336 #
337 #
337 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
340 reversed = False
341 reversed = False
341 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
343 reversed = True
344 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
345
346
346 match = self._repo.narrowmatch(match)
347 match = self._repo.narrowmatch(match)
347 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
351 listunknown)
351
352
352 if reversed:
353 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
355 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
357 r.clean)
357
358
358 if listsubrepos:
359 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
361 try:
361 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
362 except KeyError:
363 except KeyError:
363 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
367 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
371 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
374
374 for l in r:
375 for l in r:
375 l.sort()
376 l.sort()
376
377
377 return r
378 return r
378
379
379 class changectx(basectx):
380 class changectx(basectx):
380 """A changecontext object makes access to data related to a particular
381 """A changecontext object makes access to data related to a particular
381 changeset convenient. It represents a read-only context already present in
382 changeset convenient. It represents a read-only context already present in
382 the repo."""
383 the repo."""
383 def __init__(self, repo, rev, node):
384 def __init__(self, repo, rev, node):
384 super(changectx, self).__init__(repo)
385 super(changectx, self).__init__(repo)
385 self._rev = rev
386 self._rev = rev
386 self._node = node
387 self._node = node
387
388
388 def __hash__(self):
389 def __hash__(self):
389 try:
390 try:
390 return hash(self._rev)
391 return hash(self._rev)
391 except AttributeError:
392 except AttributeError:
392 return id(self)
393 return id(self)
393
394
394 def __nonzero__(self):
395 def __nonzero__(self):
395 return self._rev != nullrev
396 return self._rev != nullrev
396
397
397 __bool__ = __nonzero__
398 __bool__ = __nonzero__
398
399
399 @propertycache
400 @propertycache
400 def _changeset(self):
401 def _changeset(self):
401 return self._repo.changelog.changelogrevision(self.rev())
402 return self._repo.changelog.changelogrevision(self.rev())
402
403
403 @propertycache
404 @propertycache
404 def _manifest(self):
405 def _manifest(self):
405 return self._manifestctx.read()
406 return self._manifestctx.read()
406
407
407 @property
408 @property
408 def _manifestctx(self):
409 def _manifestctx(self):
409 return self._repo.manifestlog[self._changeset.manifest]
410 return self._repo.manifestlog[self._changeset.manifest]
410
411
411 @propertycache
412 @propertycache
412 def _manifestdelta(self):
413 def _manifestdelta(self):
413 return self._manifestctx.readdelta()
414 return self._manifestctx.readdelta()
414
415
415 @propertycache
416 @propertycache
416 def _parents(self):
417 def _parents(self):
417 repo = self._repo
418 repo = self._repo
418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 p1, p2 = repo.changelog.parentrevs(self._rev)
419 if p2 == nullrev:
420 if p2 == nullrev:
420 return [repo[p1]]
421 return [repo[p1]]
421 return [repo[p1], repo[p2]]
422 return [repo[p1], repo[p2]]
422
423
423 def changeset(self):
424 def changeset(self):
424 c = self._changeset
425 c = self._changeset
425 return (
426 return (
426 c.manifest,
427 c.manifest,
427 c.user,
428 c.user,
428 c.date,
429 c.date,
429 c.files,
430 c.files,
430 c.description,
431 c.description,
431 c.extra,
432 c.extra,
432 )
433 )
433 def manifestnode(self):
434 def manifestnode(self):
434 return self._changeset.manifest
435 return self._changeset.manifest
435
436
436 def user(self):
437 def user(self):
437 return self._changeset.user
438 return self._changeset.user
438 def date(self):
439 def date(self):
439 return self._changeset.date
440 return self._changeset.date
440 def files(self):
441 def files(self):
441 return self._changeset.files
442 return self._changeset.files
442 def description(self):
443 def description(self):
443 return self._changeset.description
444 return self._changeset.description
444 def branch(self):
445 def branch(self):
445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 return encoding.tolocal(self._changeset.extra.get("branch"))
446 def closesbranch(self):
447 def closesbranch(self):
447 return 'close' in self._changeset.extra
448 return 'close' in self._changeset.extra
448 def extra(self):
449 def extra(self):
449 """Return a dict of extra information."""
450 """Return a dict of extra information."""
450 return self._changeset.extra
451 return self._changeset.extra
451 def tags(self):
452 def tags(self):
452 """Return a list of byte tag names"""
453 """Return a list of byte tag names"""
453 return self._repo.nodetags(self._node)
454 return self._repo.nodetags(self._node)
454 def bookmarks(self):
455 def bookmarks(self):
455 """Return a list of byte bookmark names."""
456 """Return a list of byte bookmark names."""
456 return self._repo.nodebookmarks(self._node)
457 return self._repo.nodebookmarks(self._node)
457 def phase(self):
458 def phase(self):
458 return self._repo._phasecache.phase(self._repo, self._rev)
459 return self._repo._phasecache.phase(self._repo, self._rev)
459 def hidden(self):
460 def hidden(self):
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461 return self._rev in repoview.filterrevs(self._repo, 'visible')
461
462
462 def isinmemory(self):
463 def isinmemory(self):
463 return False
464 return False
464
465
465 def children(self):
466 def children(self):
466 """return list of changectx contexts for each child changeset.
467 """return list of changectx contexts for each child changeset.
467
468
468 This returns only the immediate child changesets. Use descendants() to
469 This returns only the immediate child changesets. Use descendants() to
469 recursively walk children.
470 recursively walk children.
470 """
471 """
471 c = self._repo.changelog.children(self._node)
472 c = self._repo.changelog.children(self._node)
472 return [self._repo[x] for x in c]
473 return [self._repo[x] for x in c]
473
474
474 def ancestors(self):
475 def ancestors(self):
475 for a in self._repo.changelog.ancestors([self._rev]):
476 for a in self._repo.changelog.ancestors([self._rev]):
476 yield self._repo[a]
477 yield self._repo[a]
477
478
478 def descendants(self):
479 def descendants(self):
479 """Recursively yield all children of the changeset.
480 """Recursively yield all children of the changeset.
480
481
481 For just the immediate children, use children()
482 For just the immediate children, use children()
482 """
483 """
483 for d in self._repo.changelog.descendants([self._rev]):
484 for d in self._repo.changelog.descendants([self._rev]):
484 yield self._repo[d]
485 yield self._repo[d]
485
486
486 def filectx(self, path, fileid=None, filelog=None):
487 def filectx(self, path, fileid=None, filelog=None):
487 """get a file context from this changeset"""
488 """get a file context from this changeset"""
488 if fileid is None:
489 if fileid is None:
489 fileid = self.filenode(path)
490 fileid = self.filenode(path)
490 return filectx(self._repo, path, fileid=fileid,
491 return filectx(self._repo, path, fileid=fileid,
491 changectx=self, filelog=filelog)
492 changectx=self, filelog=filelog)
492
493
493 def ancestor(self, c2, warn=False):
494 def ancestor(self, c2, warn=False):
494 """return the "best" ancestor context of self and c2
495 """return the "best" ancestor context of self and c2
495
496
496 If there are multiple candidates, it will show a message and check
497 If there are multiple candidates, it will show a message and check
497 merge.preferancestor configuration before falling back to the
498 merge.preferancestor configuration before falling back to the
498 revlog ancestor."""
499 revlog ancestor."""
499 # deal with workingctxs
500 # deal with workingctxs
500 n2 = c2._node
501 n2 = c2._node
501 if n2 is None:
502 if n2 is None:
502 n2 = c2._parents[0]._node
503 n2 = c2._parents[0]._node
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 if not cahs:
505 if not cahs:
505 anc = nullid
506 anc = nullid
506 elif len(cahs) == 1:
507 elif len(cahs) == 1:
507 anc = cahs[0]
508 anc = cahs[0]
508 else:
509 else:
509 # experimental config: merge.preferancestor
510 # experimental config: merge.preferancestor
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 try:
512 try:
512 ctx = scmutil.revsymbol(self._repo, r)
513 ctx = scmutil.revsymbol(self._repo, r)
513 except error.RepoLookupError:
514 except error.RepoLookupError:
514 continue
515 continue
515 anc = ctx.node()
516 anc = ctx.node()
516 if anc in cahs:
517 if anc in cahs:
517 break
518 break
518 else:
519 else:
519 anc = self._repo.changelog.ancestor(self._node, n2)
520 anc = self._repo.changelog.ancestor(self._node, n2)
520 if warn:
521 if warn:
521 self._repo.ui.status(
522 self._repo.ui.status(
522 (_("note: using %s as ancestor of %s and %s\n") %
523 (_("note: using %s as ancestor of %s and %s\n") %
523 (short(anc), short(self._node), short(n2))) +
524 (short(anc), short(self._node), short(n2))) +
524 ''.join(_(" alternatively, use --config "
525 ''.join(_(" alternatively, use --config "
525 "merge.preferancestor=%s\n") %
526 "merge.preferancestor=%s\n") %
526 short(n) for n in sorted(cahs) if n != anc))
527 short(n) for n in sorted(cahs) if n != anc))
527 return self._repo[anc]
528 return self._repo[anc]
528
529
529 def isancestorof(self, other):
530 def isancestorof(self, other):
530 """True if this changeset is an ancestor of other"""
531 """True if this changeset is an ancestor of other"""
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532
533
533 def walk(self, match):
534 def walk(self, match):
534 '''Generates matching file names.'''
535 '''Generates matching file names.'''
535
536
536 # Wrap match.bad method to have message with nodeid
537 # Wrap match.bad method to have message with nodeid
537 def bad(fn, msg):
538 def bad(fn, msg):
538 # The manifest doesn't know about subrepos, so don't complain about
539 # The manifest doesn't know about subrepos, so don't complain about
539 # paths into valid subrepos.
540 # paths into valid subrepos.
540 if any(fn == s or fn.startswith(s + '/')
541 if any(fn == s or fn.startswith(s + '/')
541 for s in self.substate):
542 for s in self.substate):
542 return
543 return
543 match.bad(fn, _('no such file in rev %s') % self)
544 match.bad(fn, _('no such file in rev %s') % self)
544
545
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 return self._manifest.walk(m)
547 return self._manifest.walk(m)
547
548
548 def matches(self, match):
549 def matches(self, match):
549 return self.walk(match)
550 return self.walk(match)
550
551
551 class basefilectx(object):
552 class basefilectx(object):
552 """A filecontext object represents the common logic for its children:
553 """A filecontext object represents the common logic for its children:
553 filectx: read-only access to a filerevision that is already present
554 filectx: read-only access to a filerevision that is already present
554 in the repo,
555 in the repo,
555 workingfilectx: a filecontext that represents files from the working
556 workingfilectx: a filecontext that represents files from the working
556 directory,
557 directory,
557 memfilectx: a filecontext that represents files in-memory,
558 memfilectx: a filecontext that represents files in-memory,
558 """
559 """
559 @propertycache
560 @propertycache
560 def _filelog(self):
561 def _filelog(self):
561 return self._repo.file(self._path)
562 return self._repo.file(self._path)
562
563
563 @propertycache
564 @propertycache
564 def _changeid(self):
565 def _changeid(self):
565 if r'_changectx' in self.__dict__:
566 if r'_changectx' in self.__dict__:
566 return self._changectx.rev()
567 return self._changectx.rev()
567 elif r'_descendantrev' in self.__dict__:
568 elif r'_descendantrev' in self.__dict__:
568 # this file context was created from a revision with a known
569 # this file context was created from a revision with a known
569 # descendant, we can (lazily) correct for linkrev aliases
570 # descendant, we can (lazily) correct for linkrev aliases
570 return self._adjustlinkrev(self._descendantrev)
571 return self._adjustlinkrev(self._descendantrev)
571 else:
572 else:
572 return self._filelog.linkrev(self._filerev)
573 return self._filelog.linkrev(self._filerev)
573
574
574 @propertycache
575 @propertycache
575 def _filenode(self):
576 def _filenode(self):
576 if r'_fileid' in self.__dict__:
577 if r'_fileid' in self.__dict__:
577 return self._filelog.lookup(self._fileid)
578 return self._filelog.lookup(self._fileid)
578 else:
579 else:
579 return self._changectx.filenode(self._path)
580 return self._changectx.filenode(self._path)
580
581
581 @propertycache
582 @propertycache
582 def _filerev(self):
583 def _filerev(self):
583 return self._filelog.rev(self._filenode)
584 return self._filelog.rev(self._filenode)
584
585
585 @propertycache
586 @propertycache
586 def _repopath(self):
587 def _repopath(self):
587 return self._path
588 return self._path
588
589
589 def __nonzero__(self):
590 def __nonzero__(self):
590 try:
591 try:
591 self._filenode
592 self._filenode
592 return True
593 return True
593 except error.LookupError:
594 except error.LookupError:
594 # file is missing
595 # file is missing
595 return False
596 return False
596
597
597 __bool__ = __nonzero__
598 __bool__ = __nonzero__
598
599
599 def __bytes__(self):
600 def __bytes__(self):
600 try:
601 try:
601 return "%s@%s" % (self.path(), self._changectx)
602 return "%s@%s" % (self.path(), self._changectx)
602 except error.LookupError:
603 except error.LookupError:
603 return "%s@???" % self.path()
604 return "%s@???" % self.path()
604
605
605 __str__ = encoding.strmethod(__bytes__)
606 __str__ = encoding.strmethod(__bytes__)
606
607
607 def __repr__(self):
608 def __repr__(self):
608 return r"<%s %s>" % (type(self).__name__, str(self))
609 return r"<%s %s>" % (type(self).__name__, str(self))
609
610
610 def __hash__(self):
611 def __hash__(self):
611 try:
612 try:
612 return hash((self._path, self._filenode))
613 return hash((self._path, self._filenode))
613 except AttributeError:
614 except AttributeError:
614 return id(self)
615 return id(self)
615
616
616 def __eq__(self, other):
617 def __eq__(self, other):
617 try:
618 try:
618 return (type(self) == type(other) and self._path == other._path
619 return (type(self) == type(other) and self._path == other._path
619 and self._filenode == other._filenode)
620 and self._filenode == other._filenode)
620 except AttributeError:
621 except AttributeError:
621 return False
622 return False
622
623
623 def __ne__(self, other):
624 def __ne__(self, other):
624 return not (self == other)
625 return not (self == other)
625
626
626 def filerev(self):
627 def filerev(self):
627 return self._filerev
628 return self._filerev
628 def filenode(self):
629 def filenode(self):
629 return self._filenode
630 return self._filenode
630 @propertycache
631 @propertycache
631 def _flags(self):
632 def _flags(self):
632 return self._changectx.flags(self._path)
633 return self._changectx.flags(self._path)
633 def flags(self):
634 def flags(self):
634 return self._flags
635 return self._flags
635 def filelog(self):
636 def filelog(self):
636 return self._filelog
637 return self._filelog
637 def rev(self):
638 def rev(self):
638 return self._changeid
639 return self._changeid
639 def linkrev(self):
640 def linkrev(self):
640 return self._filelog.linkrev(self._filerev)
641 return self._filelog.linkrev(self._filerev)
641 def node(self):
642 def node(self):
642 return self._changectx.node()
643 return self._changectx.node()
643 def hex(self):
644 def hex(self):
644 return self._changectx.hex()
645 return self._changectx.hex()
645 def user(self):
646 def user(self):
646 return self._changectx.user()
647 return self._changectx.user()
647 def date(self):
648 def date(self):
648 return self._changectx.date()
649 return self._changectx.date()
649 def files(self):
650 def files(self):
650 return self._changectx.files()
651 return self._changectx.files()
651 def description(self):
652 def description(self):
652 return self._changectx.description()
653 return self._changectx.description()
653 def branch(self):
654 def branch(self):
654 return self._changectx.branch()
655 return self._changectx.branch()
655 def extra(self):
656 def extra(self):
656 return self._changectx.extra()
657 return self._changectx.extra()
657 def phase(self):
658 def phase(self):
658 return self._changectx.phase()
659 return self._changectx.phase()
659 def phasestr(self):
660 def phasestr(self):
660 return self._changectx.phasestr()
661 return self._changectx.phasestr()
661 def obsolete(self):
662 def obsolete(self):
662 return self._changectx.obsolete()
663 return self._changectx.obsolete()
663 def instabilities(self):
664 def instabilities(self):
664 return self._changectx.instabilities()
665 return self._changectx.instabilities()
665 def manifest(self):
666 def manifest(self):
666 return self._changectx.manifest()
667 return self._changectx.manifest()
667 def changectx(self):
668 def changectx(self):
668 return self._changectx
669 return self._changectx
669 def renamed(self):
670 def renamed(self):
670 return self._copied
671 return self._copied
671 def repo(self):
672 def repo(self):
672 return self._repo
673 return self._repo
673 def size(self):
674 def size(self):
674 return len(self.data())
675 return len(self.data())
675
676
676 def path(self):
677 def path(self):
677 return self._path
678 return self._path
678
679
679 def isbinary(self):
680 def isbinary(self):
680 try:
681 try:
681 return stringutil.binary(self.data())
682 return stringutil.binary(self.data())
682 except IOError:
683 except IOError:
683 return False
684 return False
684 def isexec(self):
685 def isexec(self):
685 return 'x' in self.flags()
686 return 'x' in self.flags()
686 def islink(self):
687 def islink(self):
687 return 'l' in self.flags()
688 return 'l' in self.flags()
688
689
689 def isabsent(self):
690 def isabsent(self):
690 """whether this filectx represents a file not in self._changectx
691 """whether this filectx represents a file not in self._changectx
691
692
692 This is mainly for merge code to detect change/delete conflicts. This is
693 This is mainly for merge code to detect change/delete conflicts. This is
693 expected to be True for all subclasses of basectx."""
694 expected to be True for all subclasses of basectx."""
694 return False
695 return False
695
696
696 _customcmp = False
697 _customcmp = False
697 def cmp(self, fctx):
698 def cmp(self, fctx):
698 """compare with other file context
699 """compare with other file context
699
700
700 returns True if different than fctx.
701 returns True if different than fctx.
701 """
702 """
702 if fctx._customcmp:
703 if fctx._customcmp:
703 return fctx.cmp(self)
704 return fctx.cmp(self)
704
705
705 if self._filenode is None:
706 if self._filenode is None:
706 raise error.ProgrammingError(
707 raise error.ProgrammingError(
707 'filectx.cmp() must be reimplemented if not backed by revlog')
708 'filectx.cmp() must be reimplemented if not backed by revlog')
708
709
709 if fctx._filenode is None:
710 if fctx._filenode is None:
710 if self._repo._encodefilterpats:
711 if self._repo._encodefilterpats:
711 # can't rely on size() because wdir content may be decoded
712 # can't rely on size() because wdir content may be decoded
712 return self._filelog.cmp(self._filenode, fctx.data())
713 return self._filelog.cmp(self._filenode, fctx.data())
713 if self.size() - 4 == fctx.size():
714 if self.size() - 4 == fctx.size():
714 # size() can match:
715 # size() can match:
715 # if file data starts with '\1\n', empty metadata block is
716 # if file data starts with '\1\n', empty metadata block is
716 # prepended, which adds 4 bytes to filelog.size().
717 # prepended, which adds 4 bytes to filelog.size().
717 return self._filelog.cmp(self._filenode, fctx.data())
718 return self._filelog.cmp(self._filenode, fctx.data())
718 if self.size() == fctx.size():
719 if self.size() == fctx.size():
719 # size() matches: need to compare content
720 # size() matches: need to compare content
720 return self._filelog.cmp(self._filenode, fctx.data())
721 return self._filelog.cmp(self._filenode, fctx.data())
721
722
722 # size() differs
723 # size() differs
723 return True
724 return True
724
725
725 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
726 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
726 """return the first ancestor of <srcrev> introducing <fnode>
727 """return the first ancestor of <srcrev> introducing <fnode>
727
728
728 If the linkrev of the file revision does not point to an ancestor of
729 If the linkrev of the file revision does not point to an ancestor of
729 srcrev, we'll walk down the ancestors until we find one introducing
730 srcrev, we'll walk down the ancestors until we find one introducing
730 this file revision.
731 this file revision.
731
732
732 :srcrev: the changeset revision we search ancestors from
733 :srcrev: the changeset revision we search ancestors from
733 :inclusive: if true, the src revision will also be checked
734 :inclusive: if true, the src revision will also be checked
734 :stoprev: an optional revision to stop the walk at. If no introduction
735 :stoprev: an optional revision to stop the walk at. If no introduction
735 of this file content could be found before this floor
736 of this file content could be found before this floor
736 revision, the function will returns "None" and stops its
737 revision, the function will returns "None" and stops its
737 iteration.
738 iteration.
738 """
739 """
739 repo = self._repo
740 repo = self._repo
740 cl = repo.unfiltered().changelog
741 cl = repo.unfiltered().changelog
741 mfl = repo.manifestlog
742 mfl = repo.manifestlog
742 # fetch the linkrev
743 # fetch the linkrev
743 lkr = self.linkrev()
744 lkr = self.linkrev()
744 if srcrev == lkr:
745 if srcrev == lkr:
745 return lkr
746 return lkr
746 # hack to reuse ancestor computation when searching for renames
747 # hack to reuse ancestor computation when searching for renames
747 memberanc = getattr(self, '_ancestrycontext', None)
748 memberanc = getattr(self, '_ancestrycontext', None)
748 iteranc = None
749 iteranc = None
749 if srcrev is None:
750 if srcrev is None:
750 # wctx case, used by workingfilectx during mergecopy
751 # wctx case, used by workingfilectx during mergecopy
751 revs = [p.rev() for p in self._repo[None].parents()]
752 revs = [p.rev() for p in self._repo[None].parents()]
752 inclusive = True # we skipped the real (revless) source
753 inclusive = True # we skipped the real (revless) source
753 else:
754 else:
754 revs = [srcrev]
755 revs = [srcrev]
755 if memberanc is None:
756 if memberanc is None:
756 memberanc = iteranc = cl.ancestors(revs, lkr,
757 memberanc = iteranc = cl.ancestors(revs, lkr,
757 inclusive=inclusive)
758 inclusive=inclusive)
758 # check if this linkrev is an ancestor of srcrev
759 # check if this linkrev is an ancestor of srcrev
759 if lkr not in memberanc:
760 if lkr not in memberanc:
760 if iteranc is None:
761 if iteranc is None:
761 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
762 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
762 fnode = self._filenode
763 fnode = self._filenode
763 path = self._path
764 path = self._path
764 for a in iteranc:
765 for a in iteranc:
765 if stoprev is not None and a < stoprev:
766 if stoprev is not None and a < stoprev:
766 return None
767 return None
767 ac = cl.read(a) # get changeset data (we avoid object creation)
768 ac = cl.read(a) # get changeset data (we avoid object creation)
768 if path in ac[3]: # checking the 'files' field.
769 if path in ac[3]: # checking the 'files' field.
769 # The file has been touched, check if the content is
770 # The file has been touched, check if the content is
770 # similar to the one we search for.
771 # similar to the one we search for.
771 if fnode == mfl[ac[0]].readfast().get(path):
772 if fnode == mfl[ac[0]].readfast().get(path):
772 return a
773 return a
773 # In theory, we should never get out of that loop without a result.
774 # In theory, we should never get out of that loop without a result.
774 # But if manifest uses a buggy file revision (not children of the
775 # But if manifest uses a buggy file revision (not children of the
775 # one it replaces) we could. Such a buggy situation will likely
776 # one it replaces) we could. Such a buggy situation will likely
776 # result is crash somewhere else at to some point.
777 # result is crash somewhere else at to some point.
777 return lkr
778 return lkr
778
779
779 def isintroducedafter(self, changelogrev):
780 def isintroducedafter(self, changelogrev):
780 """True if a filectx has been introduced after a given floor revision
781 """True if a filectx has been introduced after a given floor revision
781 """
782 """
782 if self.linkrev() >= changelogrev:
783 if self.linkrev() >= changelogrev:
783 return True
784 return True
784 introrev = self._introrev(stoprev=changelogrev)
785 introrev = self._introrev(stoprev=changelogrev)
785 if introrev is None:
786 if introrev is None:
786 return False
787 return False
787 return introrev >= changelogrev
788 return introrev >= changelogrev
788
789
789 def introrev(self):
790 def introrev(self):
790 """return the rev of the changeset which introduced this file revision
791 """return the rev of the changeset which introduced this file revision
791
792
792 This method is different from linkrev because it take into account the
793 This method is different from linkrev because it take into account the
793 changeset the filectx was created from. It ensures the returned
794 changeset the filectx was created from. It ensures the returned
794 revision is one of its ancestors. This prevents bugs from
795 revision is one of its ancestors. This prevents bugs from
795 'linkrev-shadowing' when a file revision is used by multiple
796 'linkrev-shadowing' when a file revision is used by multiple
796 changesets.
797 changesets.
797 """
798 """
798 return self._introrev()
799 return self._introrev()
799
800
800 def _introrev(self, stoprev=None):
801 def _introrev(self, stoprev=None):
801 """
802 """
802 Same as `introrev` but, with an extra argument to limit changelog
803 Same as `introrev` but, with an extra argument to limit changelog
803 iteration range in some internal usecase.
804 iteration range in some internal usecase.
804
805
805 If `stoprev` is set, the `introrev` will not be searched past that
806 If `stoprev` is set, the `introrev` will not be searched past that
806 `stoprev` revision and "None" might be returned. This is useful to
807 `stoprev` revision and "None" might be returned. This is useful to
807 limit the iteration range.
808 limit the iteration range.
808 """
809 """
809 toprev = None
810 toprev = None
810 attrs = vars(self)
811 attrs = vars(self)
811 if r'_changeid' in attrs:
812 if r'_changeid' in attrs:
812 # We have a cached value already
813 # We have a cached value already
813 toprev = self._changeid
814 toprev = self._changeid
814 elif r'_changectx' in attrs:
815 elif r'_changectx' in attrs:
815 # We know which changelog entry we are coming from
816 # We know which changelog entry we are coming from
816 toprev = self._changectx.rev()
817 toprev = self._changectx.rev()
817
818
818 if toprev is not None:
819 if toprev is not None:
819 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
820 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
820 elif r'_descendantrev' in attrs:
821 elif r'_descendantrev' in attrs:
821 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
822 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
822 # be nice and cache the result of the computation
823 # be nice and cache the result of the computation
823 if introrev is not None:
824 if introrev is not None:
824 self._changeid = introrev
825 self._changeid = introrev
825 return introrev
826 return introrev
826 else:
827 else:
827 return self.linkrev()
828 return self.linkrev()
828
829
829 def introfilectx(self):
830 def introfilectx(self):
830 """Return filectx having identical contents, but pointing to the
831 """Return filectx having identical contents, but pointing to the
831 changeset revision where this filectx was introduced"""
832 changeset revision where this filectx was introduced"""
832 introrev = self.introrev()
833 introrev = self.introrev()
833 if self.rev() == introrev:
834 if self.rev() == introrev:
834 return self
835 return self
835 return self.filectx(self.filenode(), changeid=introrev)
836 return self.filectx(self.filenode(), changeid=introrev)
836
837
837 def _parentfilectx(self, path, fileid, filelog):
838 def _parentfilectx(self, path, fileid, filelog):
838 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
839 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
839 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
840 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
840 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
841 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
841 # If self is associated with a changeset (probably explicitly
842 # If self is associated with a changeset (probably explicitly
842 # fed), ensure the created filectx is associated with a
843 # fed), ensure the created filectx is associated with a
843 # changeset that is an ancestor of self.changectx.
844 # changeset that is an ancestor of self.changectx.
844 # This lets us later use _adjustlinkrev to get a correct link.
845 # This lets us later use _adjustlinkrev to get a correct link.
845 fctx._descendantrev = self.rev()
846 fctx._descendantrev = self.rev()
846 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
847 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
847 elif r'_descendantrev' in vars(self):
848 elif r'_descendantrev' in vars(self):
848 # Otherwise propagate _descendantrev if we have one associated.
849 # Otherwise propagate _descendantrev if we have one associated.
849 fctx._descendantrev = self._descendantrev
850 fctx._descendantrev = self._descendantrev
850 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
851 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
851 return fctx
852 return fctx
852
853
853 def parents(self):
854 def parents(self):
854 _path = self._path
855 _path = self._path
855 fl = self._filelog
856 fl = self._filelog
856 parents = self._filelog.parents(self._filenode)
857 parents = self._filelog.parents(self._filenode)
857 pl = [(_path, node, fl) for node in parents if node != nullid]
858 pl = [(_path, node, fl) for node in parents if node != nullid]
858
859
859 r = fl.renamed(self._filenode)
860 r = fl.renamed(self._filenode)
860 if r:
861 if r:
861 # - In the simple rename case, both parent are nullid, pl is empty.
862 # - In the simple rename case, both parent are nullid, pl is empty.
862 # - In case of merge, only one of the parent is null id and should
863 # - In case of merge, only one of the parent is null id and should
863 # be replaced with the rename information. This parent is -always-
864 # be replaced with the rename information. This parent is -always-
864 # the first one.
865 # the first one.
865 #
866 #
866 # As null id have always been filtered out in the previous list
867 # As null id have always been filtered out in the previous list
867 # comprehension, inserting to 0 will always result in "replacing
868 # comprehension, inserting to 0 will always result in "replacing
868 # first nullid parent with rename information.
869 # first nullid parent with rename information.
869 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
870 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
870
871
871 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
872 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
872
873
873 def p1(self):
874 def p1(self):
874 return self.parents()[0]
875 return self.parents()[0]
875
876
876 def p2(self):
877 def p2(self):
877 p = self.parents()
878 p = self.parents()
878 if len(p) == 2:
879 if len(p) == 2:
879 return p[1]
880 return p[1]
880 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
881 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
881
882
882 def annotate(self, follow=False, skiprevs=None, diffopts=None):
883 def annotate(self, follow=False, skiprevs=None, diffopts=None):
883 """Returns a list of annotateline objects for each line in the file
884 """Returns a list of annotateline objects for each line in the file
884
885
885 - line.fctx is the filectx of the node where that line was last changed
886 - line.fctx is the filectx of the node where that line was last changed
886 - line.lineno is the line number at the first appearance in the managed
887 - line.lineno is the line number at the first appearance in the managed
887 file
888 file
888 - line.text is the data on that line (including newline character)
889 - line.text is the data on that line (including newline character)
889 """
890 """
890 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
891 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
891
892
892 def parents(f):
893 def parents(f):
893 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
894 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
894 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
895 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
895 # from the topmost introrev (= srcrev) down to p.linkrev() if it
896 # from the topmost introrev (= srcrev) down to p.linkrev() if it
896 # isn't an ancestor of the srcrev.
897 # isn't an ancestor of the srcrev.
897 f._changeid
898 f._changeid
898 pl = f.parents()
899 pl = f.parents()
899
900
900 # Don't return renamed parents if we aren't following.
901 # Don't return renamed parents if we aren't following.
901 if not follow:
902 if not follow:
902 pl = [p for p in pl if p.path() == f.path()]
903 pl = [p for p in pl if p.path() == f.path()]
903
904
904 # renamed filectx won't have a filelog yet, so set it
905 # renamed filectx won't have a filelog yet, so set it
905 # from the cache to save time
906 # from the cache to save time
906 for p in pl:
907 for p in pl:
907 if not r'_filelog' in p.__dict__:
908 if not r'_filelog' in p.__dict__:
908 p._filelog = getlog(p.path())
909 p._filelog = getlog(p.path())
909
910
910 return pl
911 return pl
911
912
912 # use linkrev to find the first changeset where self appeared
913 # use linkrev to find the first changeset where self appeared
913 base = self.introfilectx()
914 base = self.introfilectx()
914 if getattr(base, '_ancestrycontext', None) is None:
915 if getattr(base, '_ancestrycontext', None) is None:
915 cl = self._repo.changelog
916 cl = self._repo.changelog
916 if base.rev() is None:
917 if base.rev() is None:
917 # wctx is not inclusive, but works because _ancestrycontext
918 # wctx is not inclusive, but works because _ancestrycontext
918 # is used to test filelog revisions
919 # is used to test filelog revisions
919 ac = cl.ancestors([p.rev() for p in base.parents()],
920 ac = cl.ancestors([p.rev() for p in base.parents()],
920 inclusive=True)
921 inclusive=True)
921 else:
922 else:
922 ac = cl.ancestors([base.rev()], inclusive=True)
923 ac = cl.ancestors([base.rev()], inclusive=True)
923 base._ancestrycontext = ac
924 base._ancestrycontext = ac
924
925
925 return dagop.annotate(base, parents, skiprevs=skiprevs,
926 return dagop.annotate(base, parents, skiprevs=skiprevs,
926 diffopts=diffopts)
927 diffopts=diffopts)
927
928
928 def ancestors(self, followfirst=False):
929 def ancestors(self, followfirst=False):
929 visit = {}
930 visit = {}
930 c = self
931 c = self
931 if followfirst:
932 if followfirst:
932 cut = 1
933 cut = 1
933 else:
934 else:
934 cut = None
935 cut = None
935
936
936 while True:
937 while True:
937 for parent in c.parents()[:cut]:
938 for parent in c.parents()[:cut]:
938 visit[(parent.linkrev(), parent.filenode())] = parent
939 visit[(parent.linkrev(), parent.filenode())] = parent
939 if not visit:
940 if not visit:
940 break
941 break
941 c = visit.pop(max(visit))
942 c = visit.pop(max(visit))
942 yield c
943 yield c
943
944
944 def decodeddata(self):
945 def decodeddata(self):
945 """Returns `data()` after running repository decoding filters.
946 """Returns `data()` after running repository decoding filters.
946
947
947 This is often equivalent to how the data would be expressed on disk.
948 This is often equivalent to how the data would be expressed on disk.
948 """
949 """
949 return self._repo.wwritedata(self.path(), self.data())
950 return self._repo.wwritedata(self.path(), self.data())
950
951
951 class filectx(basefilectx):
952 class filectx(basefilectx):
952 """A filecontext object makes access to data related to a particular
953 """A filecontext object makes access to data related to a particular
953 filerevision convenient."""
954 filerevision convenient."""
954 def __init__(self, repo, path, changeid=None, fileid=None,
955 def __init__(self, repo, path, changeid=None, fileid=None,
955 filelog=None, changectx=None):
956 filelog=None, changectx=None):
956 """changeid must be a revision number, if specified.
957 """changeid must be a revision number, if specified.
957 fileid can be a file revision or node."""
958 fileid can be a file revision or node."""
958 self._repo = repo
959 self._repo = repo
959 self._path = path
960 self._path = path
960
961
961 assert (changeid is not None
962 assert (changeid is not None
962 or fileid is not None
963 or fileid is not None
963 or changectx is not None), \
964 or changectx is not None), \
964 ("bad args: changeid=%r, fileid=%r, changectx=%r"
965 ("bad args: changeid=%r, fileid=%r, changectx=%r"
965 % (changeid, fileid, changectx))
966 % (changeid, fileid, changectx))
966
967
967 if filelog is not None:
968 if filelog is not None:
968 self._filelog = filelog
969 self._filelog = filelog
969
970
970 if changeid is not None:
971 if changeid is not None:
971 self._changeid = changeid
972 self._changeid = changeid
972 if changectx is not None:
973 if changectx is not None:
973 self._changectx = changectx
974 self._changectx = changectx
974 if fileid is not None:
975 if fileid is not None:
975 self._fileid = fileid
976 self._fileid = fileid
976
977
977 @propertycache
978 @propertycache
978 def _changectx(self):
979 def _changectx(self):
979 try:
980 try:
980 return self._repo[self._changeid]
981 return self._repo[self._changeid]
981 except error.FilteredRepoLookupError:
982 except error.FilteredRepoLookupError:
982 # Linkrev may point to any revision in the repository. When the
983 # Linkrev may point to any revision in the repository. When the
983 # repository is filtered this may lead to `filectx` trying to build
984 # repository is filtered this may lead to `filectx` trying to build
984 # `changectx` for filtered revision. In such case we fallback to
985 # `changectx` for filtered revision. In such case we fallback to
985 # creating `changectx` on the unfiltered version of the reposition.
986 # creating `changectx` on the unfiltered version of the reposition.
986 # This fallback should not be an issue because `changectx` from
987 # This fallback should not be an issue because `changectx` from
987 # `filectx` are not used in complex operations that care about
988 # `filectx` are not used in complex operations that care about
988 # filtering.
989 # filtering.
989 #
990 #
990 # This fallback is a cheap and dirty fix that prevent several
991 # This fallback is a cheap and dirty fix that prevent several
991 # crashes. It does not ensure the behavior is correct. However the
992 # crashes. It does not ensure the behavior is correct. However the
992 # behavior was not correct before filtering either and "incorrect
993 # behavior was not correct before filtering either and "incorrect
993 # behavior" is seen as better as "crash"
994 # behavior" is seen as better as "crash"
994 #
995 #
995 # Linkrevs have several serious troubles with filtering that are
996 # Linkrevs have several serious troubles with filtering that are
996 # complicated to solve. Proper handling of the issue here should be
997 # complicated to solve. Proper handling of the issue here should be
997 # considered when solving linkrev issue are on the table.
998 # considered when solving linkrev issue are on the table.
998 return self._repo.unfiltered()[self._changeid]
999 return self._repo.unfiltered()[self._changeid]
999
1000
1000 def filectx(self, fileid, changeid=None):
1001 def filectx(self, fileid, changeid=None):
1001 '''opens an arbitrary revision of the file without
1002 '''opens an arbitrary revision of the file without
1002 opening a new filelog'''
1003 opening a new filelog'''
1003 return filectx(self._repo, self._path, fileid=fileid,
1004 return filectx(self._repo, self._path, fileid=fileid,
1004 filelog=self._filelog, changeid=changeid)
1005 filelog=self._filelog, changeid=changeid)
1005
1006
1006 def rawdata(self):
1007 def rawdata(self):
1007 return self._filelog.revision(self._filenode, raw=True)
1008 return self._filelog.revision(self._filenode, raw=True)
1008
1009
1009 def rawflags(self):
1010 def rawflags(self):
1010 """low-level revlog flags"""
1011 """low-level revlog flags"""
1011 return self._filelog.flags(self._filerev)
1012 return self._filelog.flags(self._filerev)
1012
1013
1013 def data(self):
1014 def data(self):
1014 try:
1015 try:
1015 return self._filelog.read(self._filenode)
1016 return self._filelog.read(self._filenode)
1016 except error.CensoredNodeError:
1017 except error.CensoredNodeError:
1017 if self._repo.ui.config("censor", "policy") == "ignore":
1018 if self._repo.ui.config("censor", "policy") == "ignore":
1018 return ""
1019 return ""
1019 raise error.Abort(_("censored node: %s") % short(self._filenode),
1020 raise error.Abort(_("censored node: %s") % short(self._filenode),
1020 hint=_("set censor.policy to ignore errors"))
1021 hint=_("set censor.policy to ignore errors"))
1021
1022
1022 def size(self):
1023 def size(self):
1023 return self._filelog.size(self._filerev)
1024 return self._filelog.size(self._filerev)
1024
1025
1025 @propertycache
1026 @propertycache
1026 def _copied(self):
1027 def _copied(self):
1027 """check if file was actually renamed in this changeset revision
1028 """check if file was actually renamed in this changeset revision
1028
1029
1029 If rename logged in file revision, we report copy for changeset only
1030 If rename logged in file revision, we report copy for changeset only
1030 if file revisions linkrev points back to the changeset in question
1031 if file revisions linkrev points back to the changeset in question
1031 or both changeset parents contain different file revisions.
1032 or both changeset parents contain different file revisions.
1032 """
1033 """
1033
1034
1034 renamed = self._filelog.renamed(self._filenode)
1035 renamed = self._filelog.renamed(self._filenode)
1035 if not renamed:
1036 if not renamed:
1036 return None
1037 return None
1037
1038
1038 if self.rev() == self.linkrev():
1039 if self.rev() == self.linkrev():
1039 return renamed
1040 return renamed
1040
1041
1041 name = self.path()
1042 name = self.path()
1042 fnode = self._filenode
1043 fnode = self._filenode
1043 for p in self._changectx.parents():
1044 for p in self._changectx.parents():
1044 try:
1045 try:
1045 if fnode == p.filenode(name):
1046 if fnode == p.filenode(name):
1046 return None
1047 return None
1047 except error.LookupError:
1048 except error.LookupError:
1048 pass
1049 pass
1049 return renamed
1050 return renamed
1050
1051
1051 def children(self):
1052 def children(self):
1052 # hard for renames
1053 # hard for renames
1053 c = self._filelog.children(self._filenode)
1054 c = self._filelog.children(self._filenode)
1054 return [filectx(self._repo, self._path, fileid=x,
1055 return [filectx(self._repo, self._path, fileid=x,
1055 filelog=self._filelog) for x in c]
1056 filelog=self._filelog) for x in c]
1056
1057
1057 class committablectx(basectx):
1058 class committablectx(basectx):
1058 """A committablectx object provides common functionality for a context that
1059 """A committablectx object provides common functionality for a context that
1059 wants the ability to commit, e.g. workingctx or memctx."""
1060 wants the ability to commit, e.g. workingctx or memctx."""
1060 def __init__(self, repo, text="", user=None, date=None, extra=None,
1061 def __init__(self, repo, text="", user=None, date=None, extra=None,
1061 changes=None):
1062 changes=None):
1062 super(committablectx, self).__init__(repo)
1063 super(committablectx, self).__init__(repo)
1063 self._rev = None
1064 self._rev = None
1064 self._node = None
1065 self._node = None
1065 self._text = text
1066 self._text = text
1066 if date:
1067 if date:
1067 self._date = dateutil.parsedate(date)
1068 self._date = dateutil.parsedate(date)
1068 if user:
1069 if user:
1069 self._user = user
1070 self._user = user
1070 if changes:
1071 if changes:
1071 self._status = changes
1072 self._status = changes
1072
1073
1073 self._extra = {}
1074 self._extra = {}
1074 if extra:
1075 if extra:
1075 self._extra = extra.copy()
1076 self._extra = extra.copy()
1076 if 'branch' not in self._extra:
1077 if 'branch' not in self._extra:
1077 try:
1078 try:
1078 branch = encoding.fromlocal(self._repo.dirstate.branch())
1079 branch = encoding.fromlocal(self._repo.dirstate.branch())
1079 except UnicodeDecodeError:
1080 except UnicodeDecodeError:
1080 raise error.Abort(_('branch name not in UTF-8!'))
1081 raise error.Abort(_('branch name not in UTF-8!'))
1081 self._extra['branch'] = branch
1082 self._extra['branch'] = branch
1082 if self._extra['branch'] == '':
1083 if self._extra['branch'] == '':
1083 self._extra['branch'] = 'default'
1084 self._extra['branch'] = 'default'
1084
1085
1085 def __bytes__(self):
1086 def __bytes__(self):
1086 return bytes(self._parents[0]) + "+"
1087 return bytes(self._parents[0]) + "+"
1087
1088
1088 __str__ = encoding.strmethod(__bytes__)
1089 __str__ = encoding.strmethod(__bytes__)
1089
1090
1090 def __nonzero__(self):
1091 def __nonzero__(self):
1091 return True
1092 return True
1092
1093
1093 __bool__ = __nonzero__
1094 __bool__ = __nonzero__
1094
1095
1095 def _buildflagfunc(self):
1096 def _buildflagfunc(self):
1096 # Create a fallback function for getting file flags when the
1097 # Create a fallback function for getting file flags when the
1097 # filesystem doesn't support them
1098 # filesystem doesn't support them
1098
1099
1099 copiesget = self._repo.dirstate.copies().get
1100 copiesget = self._repo.dirstate.copies().get
1100 parents = self.parents()
1101 parents = self.parents()
1101 if len(parents) < 2:
1102 if len(parents) < 2:
1102 # when we have one parent, it's easy: copy from parent
1103 # when we have one parent, it's easy: copy from parent
1103 man = parents[0].manifest()
1104 man = parents[0].manifest()
1104 def func(f):
1105 def func(f):
1105 f = copiesget(f, f)
1106 f = copiesget(f, f)
1106 return man.flags(f)
1107 return man.flags(f)
1107 else:
1108 else:
1108 # merges are tricky: we try to reconstruct the unstored
1109 # merges are tricky: we try to reconstruct the unstored
1109 # result from the merge (issue1802)
1110 # result from the merge (issue1802)
1110 p1, p2 = parents
1111 p1, p2 = parents
1111 pa = p1.ancestor(p2)
1112 pa = p1.ancestor(p2)
1112 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1113 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1113
1114
1114 def func(f):
1115 def func(f):
1115 f = copiesget(f, f) # may be wrong for merges with copies
1116 f = copiesget(f, f) # may be wrong for merges with copies
1116 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1117 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1117 if fl1 == fl2:
1118 if fl1 == fl2:
1118 return fl1
1119 return fl1
1119 if fl1 == fla:
1120 if fl1 == fla:
1120 return fl2
1121 return fl2
1121 if fl2 == fla:
1122 if fl2 == fla:
1122 return fl1
1123 return fl1
1123 return '' # punt for conflicts
1124 return '' # punt for conflicts
1124
1125
1125 return func
1126 return func
1126
1127
1127 @propertycache
1128 @propertycache
1128 def _flagfunc(self):
1129 def _flagfunc(self):
1129 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1130 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1130
1131
1131 @propertycache
1132 @propertycache
1132 def _status(self):
1133 def _status(self):
1133 return self._repo.status()
1134 return self._repo.status()
1134
1135
1135 @propertycache
1136 @propertycache
1136 def _user(self):
1137 def _user(self):
1137 return self._repo.ui.username()
1138 return self._repo.ui.username()
1138
1139
1139 @propertycache
1140 @propertycache
1140 def _date(self):
1141 def _date(self):
1141 ui = self._repo.ui
1142 ui = self._repo.ui
1142 date = ui.configdate('devel', 'default-date')
1143 date = ui.configdate('devel', 'default-date')
1143 if date is None:
1144 if date is None:
1144 date = dateutil.makedate()
1145 date = dateutil.makedate()
1145 return date
1146 return date
1146
1147
1147 def subrev(self, subpath):
1148 def subrev(self, subpath):
1148 return None
1149 return None
1149
1150
1150 def manifestnode(self):
1151 def manifestnode(self):
1151 return None
1152 return None
1152 def user(self):
1153 def user(self):
1153 return self._user or self._repo.ui.username()
1154 return self._user or self._repo.ui.username()
1154 def date(self):
1155 def date(self):
1155 return self._date
1156 return self._date
1156 def description(self):
1157 def description(self):
1157 return self._text
1158 return self._text
1158 def files(self):
1159 def files(self):
1159 return sorted(self._status.modified + self._status.added +
1160 return sorted(self._status.modified + self._status.added +
1160 self._status.removed)
1161 self._status.removed)
1161
1162
1162 def modified(self):
1163 def modified(self):
1163 return self._status.modified
1164 return self._status.modified
1164 def added(self):
1165 def added(self):
1165 return self._status.added
1166 return self._status.added
1166 def removed(self):
1167 def removed(self):
1167 return self._status.removed
1168 return self._status.removed
1168 def deleted(self):
1169 def deleted(self):
1169 return self._status.deleted
1170 return self._status.deleted
1170 def branch(self):
1171 def branch(self):
1171 return encoding.tolocal(self._extra['branch'])
1172 return encoding.tolocal(self._extra['branch'])
1172 def closesbranch(self):
1173 def closesbranch(self):
1173 return 'close' in self._extra
1174 return 'close' in self._extra
1174 def extra(self):
1175 def extra(self):
1175 return self._extra
1176 return self._extra
1176
1177
1177 def isinmemory(self):
1178 def isinmemory(self):
1178 return False
1179 return False
1179
1180
1180 def tags(self):
1181 def tags(self):
1181 return []
1182 return []
1182
1183
1183 def bookmarks(self):
1184 def bookmarks(self):
1184 b = []
1185 b = []
1185 for p in self.parents():
1186 for p in self.parents():
1186 b.extend(p.bookmarks())
1187 b.extend(p.bookmarks())
1187 return b
1188 return b
1188
1189
1189 def phase(self):
1190 def phase(self):
1190 phase = phases.draft # default phase to draft
1191 phase = phases.draft # default phase to draft
1191 for p in self.parents():
1192 for p in self.parents():
1192 phase = max(phase, p.phase())
1193 phase = max(phase, p.phase())
1193 return phase
1194 return phase
1194
1195
1195 def hidden(self):
1196 def hidden(self):
1196 return False
1197 return False
1197
1198
1198 def children(self):
1199 def children(self):
1199 return []
1200 return []
1200
1201
1201 def flags(self, path):
1202 def flags(self, path):
1202 if r'_manifest' in self.__dict__:
1203 if r'_manifest' in self.__dict__:
1203 try:
1204 try:
1204 return self._manifest.flags(path)
1205 return self._manifest.flags(path)
1205 except KeyError:
1206 except KeyError:
1206 return ''
1207 return ''
1207
1208
1208 try:
1209 try:
1209 return self._flagfunc(path)
1210 return self._flagfunc(path)
1210 except OSError:
1211 except OSError:
1211 return ''
1212 return ''
1212
1213
1213 def ancestor(self, c2):
1214 def ancestor(self, c2):
1214 """return the "best" ancestor context of self and c2"""
1215 """return the "best" ancestor context of self and c2"""
1215 return self._parents[0].ancestor(c2) # punt on two parents for now
1216 return self._parents[0].ancestor(c2) # punt on two parents for now
1216
1217
1217 def walk(self, match):
1218 def walk(self, match):
1218 '''Generates matching file names.'''
1219 '''Generates matching file names.'''
1219 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1220 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1220 subrepos=sorted(self.substate),
1221 subrepos=sorted(self.substate),
1221 unknown=True, ignored=False))
1222 unknown=True, ignored=False))
1222
1223
1223 def matches(self, match):
1224 def matches(self, match):
1224 match = self._repo.narrowmatch(match)
1225 match = self._repo.narrowmatch(match)
1225 ds = self._repo.dirstate
1226 ds = self._repo.dirstate
1226 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1227 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1227
1228
1228 def ancestors(self):
1229 def ancestors(self):
1229 for p in self._parents:
1230 for p in self._parents:
1230 yield p
1231 yield p
1231 for a in self._repo.changelog.ancestors(
1232 for a in self._repo.changelog.ancestors(
1232 [p.rev() for p in self._parents]):
1233 [p.rev() for p in self._parents]):
1233 yield self._repo[a]
1234 yield self._repo[a]
1234
1235
1235 def markcommitted(self, node):
1236 def markcommitted(self, node):
1236 """Perform post-commit cleanup necessary after committing this ctx
1237 """Perform post-commit cleanup necessary after committing this ctx
1237
1238
1238 Specifically, this updates backing stores this working context
1239 Specifically, this updates backing stores this working context
1239 wraps to reflect the fact that the changes reflected by this
1240 wraps to reflect the fact that the changes reflected by this
1240 workingctx have been committed. For example, it marks
1241 workingctx have been committed. For example, it marks
1241 modified and added files as normal in the dirstate.
1242 modified and added files as normal in the dirstate.
1242
1243
1243 """
1244 """
1244
1245
1245 with self._repo.dirstate.parentchange():
1246 with self._repo.dirstate.parentchange():
1246 for f in self.modified() + self.added():
1247 for f in self.modified() + self.added():
1247 self._repo.dirstate.normal(f)
1248 self._repo.dirstate.normal(f)
1248 for f in self.removed():
1249 for f in self.removed():
1249 self._repo.dirstate.drop(f)
1250 self._repo.dirstate.drop(f)
1250 self._repo.dirstate.setparents(node)
1251 self._repo.dirstate.setparents(node)
1251
1252
1252 # write changes out explicitly, because nesting wlock at
1253 # write changes out explicitly, because nesting wlock at
1253 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1254 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1254 # from immediately doing so for subsequent changing files
1255 # from immediately doing so for subsequent changing files
1255 self._repo.dirstate.write(self._repo.currenttransaction())
1256 self._repo.dirstate.write(self._repo.currenttransaction())
1256
1257
1257 def dirty(self, missing=False, merge=True, branch=True):
1258 def dirty(self, missing=False, merge=True, branch=True):
1258 return False
1259 return False
1259
1260
1260 class workingctx(committablectx):
1261 class workingctx(committablectx):
1261 """A workingctx object makes access to data related to
1262 """A workingctx object makes access to data related to
1262 the current working directory convenient.
1263 the current working directory convenient.
1263 date - any valid date string or (unixtime, offset), or None.
1264 date - any valid date string or (unixtime, offset), or None.
1264 user - username string, or None.
1265 user - username string, or None.
1265 extra - a dictionary of extra values, or None.
1266 extra - a dictionary of extra values, or None.
1266 changes - a list of file lists as returned by localrepo.status()
1267 changes - a list of file lists as returned by localrepo.status()
1267 or None to use the repository status.
1268 or None to use the repository status.
1268 """
1269 """
1269 def __init__(self, repo, text="", user=None, date=None, extra=None,
1270 def __init__(self, repo, text="", user=None, date=None, extra=None,
1270 changes=None):
1271 changes=None):
1271 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1272 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1272
1273
1273 def __iter__(self):
1274 def __iter__(self):
1274 d = self._repo.dirstate
1275 d = self._repo.dirstate
1275 for f in d:
1276 for f in d:
1276 if d[f] != 'r':
1277 if d[f] != 'r':
1277 yield f
1278 yield f
1278
1279
1279 def __contains__(self, key):
1280 def __contains__(self, key):
1280 return self._repo.dirstate[key] not in "?r"
1281 return self._repo.dirstate[key] not in "?r"
1281
1282
1282 def hex(self):
1283 def hex(self):
1283 return hex(wdirid)
1284 return hex(wdirid)
1284
1285
1285 @propertycache
1286 @propertycache
1286 def _parents(self):
1287 def _parents(self):
1287 p = self._repo.dirstate.parents()
1288 p = self._repo.dirstate.parents()
1288 if p[1] == nullid:
1289 if p[1] == nullid:
1289 p = p[:-1]
1290 p = p[:-1]
1290 # use unfiltered repo to delay/avoid loading obsmarkers
1291 # use unfiltered repo to delay/avoid loading obsmarkers
1291 unfi = self._repo.unfiltered()
1292 unfi = self._repo.unfiltered()
1292 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1293 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1293
1294
1294 def _fileinfo(self, path):
1295 def _fileinfo(self, path):
1295 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1296 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1296 self._manifest
1297 self._manifest
1297 return super(workingctx, self)._fileinfo(path)
1298 return super(workingctx, self)._fileinfo(path)
1298
1299
1299 def filectx(self, path, filelog=None):
1300 def filectx(self, path, filelog=None):
1300 """get a file context from the working directory"""
1301 """get a file context from the working directory"""
1301 return workingfilectx(self._repo, path, workingctx=self,
1302 return workingfilectx(self._repo, path, workingctx=self,
1302 filelog=filelog)
1303 filelog=filelog)
1303
1304
1304 def dirty(self, missing=False, merge=True, branch=True):
1305 def dirty(self, missing=False, merge=True, branch=True):
1305 "check whether a working directory is modified"
1306 "check whether a working directory is modified"
1306 # check subrepos first
1307 # check subrepos first
1307 for s in sorted(self.substate):
1308 for s in sorted(self.substate):
1308 if self.sub(s).dirty(missing=missing):
1309 if self.sub(s).dirty(missing=missing):
1309 return True
1310 return True
1310 # check current working dir
1311 # check current working dir
1311 return ((merge and self.p2()) or
1312 return ((merge and self.p2()) or
1312 (branch and self.branch() != self.p1().branch()) or
1313 (branch and self.branch() != self.p1().branch()) or
1313 self.modified() or self.added() or self.removed() or
1314 self.modified() or self.added() or self.removed() or
1314 (missing and self.deleted()))
1315 (missing and self.deleted()))
1315
1316
1316 def add(self, list, prefix=""):
1317 def add(self, list, prefix=""):
1317 with self._repo.wlock():
1318 with self._repo.wlock():
1318 ui, ds = self._repo.ui, self._repo.dirstate
1319 ui, ds = self._repo.ui, self._repo.dirstate
1319 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1320 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1320 rejected = []
1321 rejected = []
1321 lstat = self._repo.wvfs.lstat
1322 lstat = self._repo.wvfs.lstat
1322 for f in list:
1323 for f in list:
1323 # ds.pathto() returns an absolute file when this is invoked from
1324 # ds.pathto() returns an absolute file when this is invoked from
1324 # the keyword extension. That gets flagged as non-portable on
1325 # the keyword extension. That gets flagged as non-portable on
1325 # Windows, since it contains the drive letter and colon.
1326 # Windows, since it contains the drive letter and colon.
1326 scmutil.checkportable(ui, os.path.join(prefix, f))
1327 scmutil.checkportable(ui, os.path.join(prefix, f))
1327 try:
1328 try:
1328 st = lstat(f)
1329 st = lstat(f)
1329 except OSError:
1330 except OSError:
1330 ui.warn(_("%s does not exist!\n") % uipath(f))
1331 ui.warn(_("%s does not exist!\n") % uipath(f))
1331 rejected.append(f)
1332 rejected.append(f)
1332 continue
1333 continue
1333 limit = ui.configbytes('ui', 'large-file-limit')
1334 limit = ui.configbytes('ui', 'large-file-limit')
1334 if limit != 0 and st.st_size > limit:
1335 if limit != 0 and st.st_size > limit:
1335 ui.warn(_("%s: up to %d MB of RAM may be required "
1336 ui.warn(_("%s: up to %d MB of RAM may be required "
1336 "to manage this file\n"
1337 "to manage this file\n"
1337 "(use 'hg revert %s' to cancel the "
1338 "(use 'hg revert %s' to cancel the "
1338 "pending addition)\n")
1339 "pending addition)\n")
1339 % (f, 3 * st.st_size // 1000000, uipath(f)))
1340 % (f, 3 * st.st_size // 1000000, uipath(f)))
1340 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1341 ui.warn(_("%s not added: only files and symlinks "
1342 ui.warn(_("%s not added: only files and symlinks "
1342 "supported currently\n") % uipath(f))
1343 "supported currently\n") % uipath(f))
1343 rejected.append(f)
1344 rejected.append(f)
1344 elif ds[f] in 'amn':
1345 elif ds[f] in 'amn':
1345 ui.warn(_("%s already tracked!\n") % uipath(f))
1346 ui.warn(_("%s already tracked!\n") % uipath(f))
1346 elif ds[f] == 'r':
1347 elif ds[f] == 'r':
1347 ds.normallookup(f)
1348 ds.normallookup(f)
1348 else:
1349 else:
1349 ds.add(f)
1350 ds.add(f)
1350 return rejected
1351 return rejected
1351
1352
1352 def forget(self, files, prefix=""):
1353 def forget(self, files, prefix=""):
1353 with self._repo.wlock():
1354 with self._repo.wlock():
1354 ds = self._repo.dirstate
1355 ds = self._repo.dirstate
1355 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1356 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1356 rejected = []
1357 rejected = []
1357 for f in files:
1358 for f in files:
1358 if f not in ds:
1359 if f not in ds:
1359 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1360 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1360 rejected.append(f)
1361 rejected.append(f)
1361 elif ds[f] != 'a':
1362 elif ds[f] != 'a':
1362 ds.remove(f)
1363 ds.remove(f)
1363 else:
1364 else:
1364 ds.drop(f)
1365 ds.drop(f)
1365 return rejected
1366 return rejected
1366
1367
1367 def copy(self, source, dest):
1368 def copy(self, source, dest):
1368 try:
1369 try:
1369 st = self._repo.wvfs.lstat(dest)
1370 st = self._repo.wvfs.lstat(dest)
1370 except OSError as err:
1371 except OSError as err:
1371 if err.errno != errno.ENOENT:
1372 if err.errno != errno.ENOENT:
1372 raise
1373 raise
1373 self._repo.ui.warn(_("%s does not exist!\n")
1374 self._repo.ui.warn(_("%s does not exist!\n")
1374 % self._repo.dirstate.pathto(dest))
1375 % self._repo.dirstate.pathto(dest))
1375 return
1376 return
1376 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1377 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1377 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1378 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1378 "symbolic link\n")
1379 "symbolic link\n")
1379 % self._repo.dirstate.pathto(dest))
1380 % self._repo.dirstate.pathto(dest))
1380 else:
1381 else:
1381 with self._repo.wlock():
1382 with self._repo.wlock():
1382 ds = self._repo.dirstate
1383 ds = self._repo.dirstate
1383 if ds[dest] in '?':
1384 if ds[dest] in '?':
1384 ds.add(dest)
1385 ds.add(dest)
1385 elif ds[dest] in 'r':
1386 elif ds[dest] in 'r':
1386 ds.normallookup(dest)
1387 ds.normallookup(dest)
1387 ds.copy(source, dest)
1388 ds.copy(source, dest)
1388
1389
1389 def match(self, pats=None, include=None, exclude=None, default='glob',
1390 def match(self, pats=None, include=None, exclude=None, default='glob',
1390 listsubrepos=False, badfn=None):
1391 listsubrepos=False, badfn=None):
1391 r = self._repo
1392 r = self._repo
1392
1393
1393 # Only a case insensitive filesystem needs magic to translate user input
1394 # Only a case insensitive filesystem needs magic to translate user input
1394 # to actual case in the filesystem.
1395 # to actual case in the filesystem.
1395 icasefs = not util.fscasesensitive(r.root)
1396 icasefs = not util.fscasesensitive(r.root)
1396 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1397 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1397 default, auditor=r.auditor, ctx=self,
1398 default, auditor=r.auditor, ctx=self,
1398 listsubrepos=listsubrepos, badfn=badfn,
1399 listsubrepos=listsubrepos, badfn=badfn,
1399 icasefs=icasefs)
1400 icasefs=icasefs)
1400
1401
1401 def _filtersuspectsymlink(self, files):
1402 def _filtersuspectsymlink(self, files):
1402 if not files or self._repo.dirstate._checklink:
1403 if not files or self._repo.dirstate._checklink:
1403 return files
1404 return files
1404
1405
1405 # Symlink placeholders may get non-symlink-like contents
1406 # Symlink placeholders may get non-symlink-like contents
1406 # via user error or dereferencing by NFS or Samba servers,
1407 # via user error or dereferencing by NFS or Samba servers,
1407 # so we filter out any placeholders that don't look like a
1408 # so we filter out any placeholders that don't look like a
1408 # symlink
1409 # symlink
1409 sane = []
1410 sane = []
1410 for f in files:
1411 for f in files:
1411 if self.flags(f) == 'l':
1412 if self.flags(f) == 'l':
1412 d = self[f].data()
1413 d = self[f].data()
1413 if (d == '' or len(d) >= 1024 or '\n' in d
1414 if (d == '' or len(d) >= 1024 or '\n' in d
1414 or stringutil.binary(d)):
1415 or stringutil.binary(d)):
1415 self._repo.ui.debug('ignoring suspect symlink placeholder'
1416 self._repo.ui.debug('ignoring suspect symlink placeholder'
1416 ' "%s"\n' % f)
1417 ' "%s"\n' % f)
1417 continue
1418 continue
1418 sane.append(f)
1419 sane.append(f)
1419 return sane
1420 return sane
1420
1421
1421 def _checklookup(self, files):
1422 def _checklookup(self, files):
1422 # check for any possibly clean files
1423 # check for any possibly clean files
1423 if not files:
1424 if not files:
1424 return [], [], []
1425 return [], [], []
1425
1426
1426 modified = []
1427 modified = []
1427 deleted = []
1428 deleted = []
1428 fixup = []
1429 fixup = []
1429 pctx = self._parents[0]
1430 pctx = self._parents[0]
1430 # do a full compare of any files that might have changed
1431 # do a full compare of any files that might have changed
1431 for f in sorted(files):
1432 for f in sorted(files):
1432 try:
1433 try:
1433 # This will return True for a file that got replaced by a
1434 # This will return True for a file that got replaced by a
1434 # directory in the interim, but fixing that is pretty hard.
1435 # directory in the interim, but fixing that is pretty hard.
1435 if (f not in pctx or self.flags(f) != pctx.flags(f)
1436 if (f not in pctx or self.flags(f) != pctx.flags(f)
1436 or pctx[f].cmp(self[f])):
1437 or pctx[f].cmp(self[f])):
1437 modified.append(f)
1438 modified.append(f)
1438 else:
1439 else:
1439 fixup.append(f)
1440 fixup.append(f)
1440 except (IOError, OSError):
1441 except (IOError, OSError):
1441 # A file become inaccessible in between? Mark it as deleted,
1442 # A file become inaccessible in between? Mark it as deleted,
1442 # matching dirstate behavior (issue5584).
1443 # matching dirstate behavior (issue5584).
1443 # The dirstate has more complex behavior around whether a
1444 # The dirstate has more complex behavior around whether a
1444 # missing file matches a directory, etc, but we don't need to
1445 # missing file matches a directory, etc, but we don't need to
1445 # bother with that: if f has made it to this point, we're sure
1446 # bother with that: if f has made it to this point, we're sure
1446 # it's in the dirstate.
1447 # it's in the dirstate.
1447 deleted.append(f)
1448 deleted.append(f)
1448
1449
1449 return modified, deleted, fixup
1450 return modified, deleted, fixup
1450
1451
1451 def _poststatusfixup(self, status, fixup):
1452 def _poststatusfixup(self, status, fixup):
1452 """update dirstate for files that are actually clean"""
1453 """update dirstate for files that are actually clean"""
1453 poststatus = self._repo.postdsstatus()
1454 poststatus = self._repo.postdsstatus()
1454 if fixup or poststatus:
1455 if fixup or poststatus:
1455 try:
1456 try:
1456 oldid = self._repo.dirstate.identity()
1457 oldid = self._repo.dirstate.identity()
1457
1458
1458 # updating the dirstate is optional
1459 # updating the dirstate is optional
1459 # so we don't wait on the lock
1460 # so we don't wait on the lock
1460 # wlock can invalidate the dirstate, so cache normal _after_
1461 # wlock can invalidate the dirstate, so cache normal _after_
1461 # taking the lock
1462 # taking the lock
1462 with self._repo.wlock(False):
1463 with self._repo.wlock(False):
1463 if self._repo.dirstate.identity() == oldid:
1464 if self._repo.dirstate.identity() == oldid:
1464 if fixup:
1465 if fixup:
1465 normal = self._repo.dirstate.normal
1466 normal = self._repo.dirstate.normal
1466 for f in fixup:
1467 for f in fixup:
1467 normal(f)
1468 normal(f)
1468 # write changes out explicitly, because nesting
1469 # write changes out explicitly, because nesting
1469 # wlock at runtime may prevent 'wlock.release()'
1470 # wlock at runtime may prevent 'wlock.release()'
1470 # after this block from doing so for subsequent
1471 # after this block from doing so for subsequent
1471 # changing files
1472 # changing files
1472 tr = self._repo.currenttransaction()
1473 tr = self._repo.currenttransaction()
1473 self._repo.dirstate.write(tr)
1474 self._repo.dirstate.write(tr)
1474
1475
1475 if poststatus:
1476 if poststatus:
1476 for ps in poststatus:
1477 for ps in poststatus:
1477 ps(self, status)
1478 ps(self, status)
1478 else:
1479 else:
1479 # in this case, writing changes out breaks
1480 # in this case, writing changes out breaks
1480 # consistency, because .hg/dirstate was
1481 # consistency, because .hg/dirstate was
1481 # already changed simultaneously after last
1482 # already changed simultaneously after last
1482 # caching (see also issue5584 for detail)
1483 # caching (see also issue5584 for detail)
1483 self._repo.ui.debug('skip updating dirstate: '
1484 self._repo.ui.debug('skip updating dirstate: '
1484 'identity mismatch\n')
1485 'identity mismatch\n')
1485 except error.LockError:
1486 except error.LockError:
1486 pass
1487 pass
1487 finally:
1488 finally:
1488 # Even if the wlock couldn't be grabbed, clear out the list.
1489 # Even if the wlock couldn't be grabbed, clear out the list.
1489 self._repo.clearpostdsstatus()
1490 self._repo.clearpostdsstatus()
1490
1491
1491 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1492 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1492 '''Gets the status from the dirstate -- internal use only.'''
1493 '''Gets the status from the dirstate -- internal use only.'''
1493 subrepos = []
1494 subrepos = []
1494 if '.hgsub' in self:
1495 if '.hgsub' in self:
1495 subrepos = sorted(self.substate)
1496 subrepos = sorted(self.substate)
1496 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1497 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1497 clean=clean, unknown=unknown)
1498 clean=clean, unknown=unknown)
1498
1499
1499 # check for any possibly clean files
1500 # check for any possibly clean files
1500 fixup = []
1501 fixup = []
1501 if cmp:
1502 if cmp:
1502 modified2, deleted2, fixup = self._checklookup(cmp)
1503 modified2, deleted2, fixup = self._checklookup(cmp)
1503 s.modified.extend(modified2)
1504 s.modified.extend(modified2)
1504 s.deleted.extend(deleted2)
1505 s.deleted.extend(deleted2)
1505
1506
1506 if fixup and clean:
1507 if fixup and clean:
1507 s.clean.extend(fixup)
1508 s.clean.extend(fixup)
1508
1509
1509 self._poststatusfixup(s, fixup)
1510 self._poststatusfixup(s, fixup)
1510
1511
1511 if match.always():
1512 if match.always():
1512 # cache for performance
1513 # cache for performance
1513 if s.unknown or s.ignored or s.clean:
1514 if s.unknown or s.ignored or s.clean:
1514 # "_status" is cached with list*=False in the normal route
1515 # "_status" is cached with list*=False in the normal route
1515 self._status = scmutil.status(s.modified, s.added, s.removed,
1516 self._status = scmutil.status(s.modified, s.added, s.removed,
1516 s.deleted, [], [], [])
1517 s.deleted, [], [], [])
1517 else:
1518 else:
1518 self._status = s
1519 self._status = s
1519
1520
1520 return s
1521 return s
1521
1522
1522 @propertycache
1523 @propertycache
1523 def _manifest(self):
1524 def _manifest(self):
1524 """generate a manifest corresponding to the values in self._status
1525 """generate a manifest corresponding to the values in self._status
1525
1526
1526 This reuse the file nodeid from parent, but we use special node
1527 This reuse the file nodeid from parent, but we use special node
1527 identifiers for added and modified files. This is used by manifests
1528 identifiers for added and modified files. This is used by manifests
1528 merge to see that files are different and by update logic to avoid
1529 merge to see that files are different and by update logic to avoid
1529 deleting newly added files.
1530 deleting newly added files.
1530 """
1531 """
1531 return self._buildstatusmanifest(self._status)
1532 return self._buildstatusmanifest(self._status)
1532
1533
1533 def _buildstatusmanifest(self, status):
1534 def _buildstatusmanifest(self, status):
1534 """Builds a manifest that includes the given status results."""
1535 """Builds a manifest that includes the given status results."""
1535 parents = self.parents()
1536 parents = self.parents()
1536
1537
1537 man = parents[0].manifest().copy()
1538 man = parents[0].manifest().copy()
1538
1539
1539 ff = self._flagfunc
1540 ff = self._flagfunc
1540 for i, l in ((addednodeid, status.added),
1541 for i, l in ((addednodeid, status.added),
1541 (modifiednodeid, status.modified)):
1542 (modifiednodeid, status.modified)):
1542 for f in l:
1543 for f in l:
1543 man[f] = i
1544 man[f] = i
1544 try:
1545 try:
1545 man.setflag(f, ff(f))
1546 man.setflag(f, ff(f))
1546 except OSError:
1547 except OSError:
1547 pass
1548 pass
1548
1549
1549 for f in status.deleted + status.removed:
1550 for f in status.deleted + status.removed:
1550 if f in man:
1551 if f in man:
1551 del man[f]
1552 del man[f]
1552
1553
1553 return man
1554 return man
1554
1555
1555 def _buildstatus(self, other, s, match, listignored, listclean,
1556 def _buildstatus(self, other, s, match, listignored, listclean,
1556 listunknown):
1557 listunknown):
1557 """build a status with respect to another context
1558 """build a status with respect to another context
1558
1559
1559 This includes logic for maintaining the fast path of status when
1560 This includes logic for maintaining the fast path of status when
1560 comparing the working directory against its parent, which is to skip
1561 comparing the working directory against its parent, which is to skip
1561 building a new manifest if self (working directory) is not comparing
1562 building a new manifest if self (working directory) is not comparing
1562 against its parent (repo['.']).
1563 against its parent (repo['.']).
1563 """
1564 """
1564 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1565 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1565 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1566 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1566 # might have accidentally ended up with the entire contents of the file
1567 # might have accidentally ended up with the entire contents of the file
1567 # they are supposed to be linking to.
1568 # they are supposed to be linking to.
1568 s.modified[:] = self._filtersuspectsymlink(s.modified)
1569 s.modified[:] = self._filtersuspectsymlink(s.modified)
1569 if other != self._repo['.']:
1570 if other != self._repo['.']:
1570 s = super(workingctx, self)._buildstatus(other, s, match,
1571 s = super(workingctx, self)._buildstatus(other, s, match,
1571 listignored, listclean,
1572 listignored, listclean,
1572 listunknown)
1573 listunknown)
1573 return s
1574 return s
1574
1575
1575 def _matchstatus(self, other, match):
1576 def _matchstatus(self, other, match):
1576 """override the match method with a filter for directory patterns
1577 """override the match method with a filter for directory patterns
1577
1578
1578 We use inheritance to customize the match.bad method only in cases of
1579 We use inheritance to customize the match.bad method only in cases of
1579 workingctx since it belongs only to the working directory when
1580 workingctx since it belongs only to the working directory when
1580 comparing against the parent changeset.
1581 comparing against the parent changeset.
1581
1582
1582 If we aren't comparing against the working directory's parent, then we
1583 If we aren't comparing against the working directory's parent, then we
1583 just use the default match object sent to us.
1584 just use the default match object sent to us.
1584 """
1585 """
1585 if other != self._repo['.']:
1586 if other != self._repo['.']:
1586 def bad(f, msg):
1587 def bad(f, msg):
1587 # 'f' may be a directory pattern from 'match.files()',
1588 # 'f' may be a directory pattern from 'match.files()',
1588 # so 'f not in ctx1' is not enough
1589 # so 'f not in ctx1' is not enough
1589 if f not in other and not other.hasdir(f):
1590 if f not in other and not other.hasdir(f):
1590 self._repo.ui.warn('%s: %s\n' %
1591 self._repo.ui.warn('%s: %s\n' %
1591 (self._repo.dirstate.pathto(f), msg))
1592 (self._repo.dirstate.pathto(f), msg))
1592 match.bad = bad
1593 match.bad = bad
1593 return match
1594 return match
1594
1595
1595 def markcommitted(self, node):
1596 def markcommitted(self, node):
1596 super(workingctx, self).markcommitted(node)
1597 super(workingctx, self).markcommitted(node)
1597
1598
1598 sparse.aftercommit(self._repo, node)
1599 sparse.aftercommit(self._repo, node)
1599
1600
1600 class committablefilectx(basefilectx):
1601 class committablefilectx(basefilectx):
1601 """A committablefilectx provides common functionality for a file context
1602 """A committablefilectx provides common functionality for a file context
1602 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1603 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1603 def __init__(self, repo, path, filelog=None, ctx=None):
1604 def __init__(self, repo, path, filelog=None, ctx=None):
1604 self._repo = repo
1605 self._repo = repo
1605 self._path = path
1606 self._path = path
1606 self._changeid = None
1607 self._changeid = None
1607 self._filerev = self._filenode = None
1608 self._filerev = self._filenode = None
1608
1609
1609 if filelog is not None:
1610 if filelog is not None:
1610 self._filelog = filelog
1611 self._filelog = filelog
1611 if ctx:
1612 if ctx:
1612 self._changectx = ctx
1613 self._changectx = ctx
1613
1614
1614 def __nonzero__(self):
1615 def __nonzero__(self):
1615 return True
1616 return True
1616
1617
1617 __bool__ = __nonzero__
1618 __bool__ = __nonzero__
1618
1619
1619 def linkrev(self):
1620 def linkrev(self):
1620 # linked to self._changectx no matter if file is modified or not
1621 # linked to self._changectx no matter if file is modified or not
1621 return self.rev()
1622 return self.rev()
1622
1623
1623 def parents(self):
1624 def parents(self):
1624 '''return parent filectxs, following copies if necessary'''
1625 '''return parent filectxs, following copies if necessary'''
1625 def filenode(ctx, path):
1626 def filenode(ctx, path):
1626 return ctx._manifest.get(path, nullid)
1627 return ctx._manifest.get(path, nullid)
1627
1628
1628 path = self._path
1629 path = self._path
1629 fl = self._filelog
1630 fl = self._filelog
1630 pcl = self._changectx._parents
1631 pcl = self._changectx._parents
1631 renamed = self.renamed()
1632 renamed = self.renamed()
1632
1633
1633 if renamed:
1634 if renamed:
1634 pl = [renamed + (None,)]
1635 pl = [renamed + (None,)]
1635 else:
1636 else:
1636 pl = [(path, filenode(pcl[0], path), fl)]
1637 pl = [(path, filenode(pcl[0], path), fl)]
1637
1638
1638 for pc in pcl[1:]:
1639 for pc in pcl[1:]:
1639 pl.append((path, filenode(pc, path), fl))
1640 pl.append((path, filenode(pc, path), fl))
1640
1641
1641 return [self._parentfilectx(p, fileid=n, filelog=l)
1642 return [self._parentfilectx(p, fileid=n, filelog=l)
1642 for p, n, l in pl if n != nullid]
1643 for p, n, l in pl if n != nullid]
1643
1644
1644 def children(self):
1645 def children(self):
1645 return []
1646 return []
1646
1647
1647 class workingfilectx(committablefilectx):
1648 class workingfilectx(committablefilectx):
1648 """A workingfilectx object makes access to data related to a particular
1649 """A workingfilectx object makes access to data related to a particular
1649 file in the working directory convenient."""
1650 file in the working directory convenient."""
1650 def __init__(self, repo, path, filelog=None, workingctx=None):
1651 def __init__(self, repo, path, filelog=None, workingctx=None):
1651 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1652 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1652
1653
1653 @propertycache
1654 @propertycache
1654 def _changectx(self):
1655 def _changectx(self):
1655 return workingctx(self._repo)
1656 return workingctx(self._repo)
1656
1657
1657 def data(self):
1658 def data(self):
1658 return self._repo.wread(self._path)
1659 return self._repo.wread(self._path)
1659 def renamed(self):
1660 def renamed(self):
1660 rp = self._repo.dirstate.copied(self._path)
1661 rp = self._repo.dirstate.copied(self._path)
1661 if not rp:
1662 if not rp:
1662 return None
1663 return None
1663 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1664 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1664
1665
1665 def size(self):
1666 def size(self):
1666 return self._repo.wvfs.lstat(self._path).st_size
1667 return self._repo.wvfs.lstat(self._path).st_size
1667 def date(self):
1668 def date(self):
1668 t, tz = self._changectx.date()
1669 t, tz = self._changectx.date()
1669 try:
1670 try:
1670 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1671 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1671 except OSError as err:
1672 except OSError as err:
1672 if err.errno != errno.ENOENT:
1673 if err.errno != errno.ENOENT:
1673 raise
1674 raise
1674 return (t, tz)
1675 return (t, tz)
1675
1676
1676 def exists(self):
1677 def exists(self):
1677 return self._repo.wvfs.exists(self._path)
1678 return self._repo.wvfs.exists(self._path)
1678
1679
1679 def lexists(self):
1680 def lexists(self):
1680 return self._repo.wvfs.lexists(self._path)
1681 return self._repo.wvfs.lexists(self._path)
1681
1682
1682 def audit(self):
1683 def audit(self):
1683 return self._repo.wvfs.audit(self._path)
1684 return self._repo.wvfs.audit(self._path)
1684
1685
1685 def cmp(self, fctx):
1686 def cmp(self, fctx):
1686 """compare with other file context
1687 """compare with other file context
1687
1688
1688 returns True if different than fctx.
1689 returns True if different than fctx.
1689 """
1690 """
1690 # fctx should be a filectx (not a workingfilectx)
1691 # fctx should be a filectx (not a workingfilectx)
1691 # invert comparison to reuse the same code path
1692 # invert comparison to reuse the same code path
1692 return fctx.cmp(self)
1693 return fctx.cmp(self)
1693
1694
1694 def remove(self, ignoremissing=False):
1695 def remove(self, ignoremissing=False):
1695 """wraps unlink for a repo's working directory"""
1696 """wraps unlink for a repo's working directory"""
1696 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1697 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1697 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1698 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1698 rmdir=rmdir)
1699 rmdir=rmdir)
1699
1700
1700 def write(self, data, flags, backgroundclose=False, **kwargs):
1701 def write(self, data, flags, backgroundclose=False, **kwargs):
1701 """wraps repo.wwrite"""
1702 """wraps repo.wwrite"""
1702 self._repo.wwrite(self._path, data, flags,
1703 self._repo.wwrite(self._path, data, flags,
1703 backgroundclose=backgroundclose,
1704 backgroundclose=backgroundclose,
1704 **kwargs)
1705 **kwargs)
1705
1706
1706 def markcopied(self, src):
1707 def markcopied(self, src):
1707 """marks this file a copy of `src`"""
1708 """marks this file a copy of `src`"""
1708 if self._repo.dirstate[self._path] in "nma":
1709 if self._repo.dirstate[self._path] in "nma":
1709 self._repo.dirstate.copy(src, self._path)
1710 self._repo.dirstate.copy(src, self._path)
1710
1711
1711 def clearunknown(self):
1712 def clearunknown(self):
1712 """Removes conflicting items in the working directory so that
1713 """Removes conflicting items in the working directory so that
1713 ``write()`` can be called successfully.
1714 ``write()`` can be called successfully.
1714 """
1715 """
1715 wvfs = self._repo.wvfs
1716 wvfs = self._repo.wvfs
1716 f = self._path
1717 f = self._path
1717 wvfs.audit(f)
1718 wvfs.audit(f)
1718 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1719 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1719 # remove files under the directory as they should already be
1720 # remove files under the directory as they should already be
1720 # warned and backed up
1721 # warned and backed up
1721 if wvfs.isdir(f) and not wvfs.islink(f):
1722 if wvfs.isdir(f) and not wvfs.islink(f):
1722 wvfs.rmtree(f, forcibly=True)
1723 wvfs.rmtree(f, forcibly=True)
1723 for p in reversed(list(util.finddirs(f))):
1724 for p in reversed(list(util.finddirs(f))):
1724 if wvfs.isfileorlink(p):
1725 if wvfs.isfileorlink(p):
1725 wvfs.unlink(p)
1726 wvfs.unlink(p)
1726 break
1727 break
1727 else:
1728 else:
1728 # don't remove files if path conflicts are not processed
1729 # don't remove files if path conflicts are not processed
1729 if wvfs.isdir(f) and not wvfs.islink(f):
1730 if wvfs.isdir(f) and not wvfs.islink(f):
1730 wvfs.removedirs(f)
1731 wvfs.removedirs(f)
1731
1732
1732 def setflags(self, l, x):
1733 def setflags(self, l, x):
1733 self._repo.wvfs.setflags(self._path, l, x)
1734 self._repo.wvfs.setflags(self._path, l, x)
1734
1735
1735 class overlayworkingctx(committablectx):
1736 class overlayworkingctx(committablectx):
1736 """Wraps another mutable context with a write-back cache that can be
1737 """Wraps another mutable context with a write-back cache that can be
1737 converted into a commit context.
1738 converted into a commit context.
1738
1739
1739 self._cache[path] maps to a dict with keys: {
1740 self._cache[path] maps to a dict with keys: {
1740 'exists': bool?
1741 'exists': bool?
1741 'date': date?
1742 'date': date?
1742 'data': str?
1743 'data': str?
1743 'flags': str?
1744 'flags': str?
1744 'copied': str? (path or None)
1745 'copied': str? (path or None)
1745 }
1746 }
1746 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1747 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1747 is `False`, the file was deleted.
1748 is `False`, the file was deleted.
1748 """
1749 """
1749
1750
1750 def __init__(self, repo):
1751 def __init__(self, repo):
1751 super(overlayworkingctx, self).__init__(repo)
1752 super(overlayworkingctx, self).__init__(repo)
1752 self.clean()
1753 self.clean()
1753
1754
1754 def setbase(self, wrappedctx):
1755 def setbase(self, wrappedctx):
1755 self._wrappedctx = wrappedctx
1756 self._wrappedctx = wrappedctx
1756 self._parents = [wrappedctx]
1757 self._parents = [wrappedctx]
1757 # Drop old manifest cache as it is now out of date.
1758 # Drop old manifest cache as it is now out of date.
1758 # This is necessary when, e.g., rebasing several nodes with one
1759 # This is necessary when, e.g., rebasing several nodes with one
1759 # ``overlayworkingctx`` (e.g. with --collapse).
1760 # ``overlayworkingctx`` (e.g. with --collapse).
1760 util.clearcachedproperty(self, '_manifest')
1761 util.clearcachedproperty(self, '_manifest')
1761
1762
1762 def data(self, path):
1763 def data(self, path):
1763 if self.isdirty(path):
1764 if self.isdirty(path):
1764 if self._cache[path]['exists']:
1765 if self._cache[path]['exists']:
1765 if self._cache[path]['data']:
1766 if self._cache[path]['data']:
1766 return self._cache[path]['data']
1767 return self._cache[path]['data']
1767 else:
1768 else:
1768 # Must fallback here, too, because we only set flags.
1769 # Must fallback here, too, because we only set flags.
1769 return self._wrappedctx[path].data()
1770 return self._wrappedctx[path].data()
1770 else:
1771 else:
1771 raise error.ProgrammingError("No such file or directory: %s" %
1772 raise error.ProgrammingError("No such file or directory: %s" %
1772 path)
1773 path)
1773 else:
1774 else:
1774 return self._wrappedctx[path].data()
1775 return self._wrappedctx[path].data()
1775
1776
1776 @propertycache
1777 @propertycache
1777 def _manifest(self):
1778 def _manifest(self):
1778 parents = self.parents()
1779 parents = self.parents()
1779 man = parents[0].manifest().copy()
1780 man = parents[0].manifest().copy()
1780
1781
1781 flag = self._flagfunc
1782 flag = self._flagfunc
1782 for path in self.added():
1783 for path in self.added():
1783 man[path] = addednodeid
1784 man[path] = addednodeid
1784 man.setflag(path, flag(path))
1785 man.setflag(path, flag(path))
1785 for path in self.modified():
1786 for path in self.modified():
1786 man[path] = modifiednodeid
1787 man[path] = modifiednodeid
1787 man.setflag(path, flag(path))
1788 man.setflag(path, flag(path))
1788 for path in self.removed():
1789 for path in self.removed():
1789 del man[path]
1790 del man[path]
1790 return man
1791 return man
1791
1792
1792 @propertycache
1793 @propertycache
1793 def _flagfunc(self):
1794 def _flagfunc(self):
1794 def f(path):
1795 def f(path):
1795 return self._cache[path]['flags']
1796 return self._cache[path]['flags']
1796 return f
1797 return f
1797
1798
1798 def files(self):
1799 def files(self):
1799 return sorted(self.added() + self.modified() + self.removed())
1800 return sorted(self.added() + self.modified() + self.removed())
1800
1801
1801 def modified(self):
1802 def modified(self):
1802 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1803 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1803 self._existsinparent(f)]
1804 self._existsinparent(f)]
1804
1805
1805 def added(self):
1806 def added(self):
1806 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1807 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1807 not self._existsinparent(f)]
1808 not self._existsinparent(f)]
1808
1809
1809 def removed(self):
1810 def removed(self):
1810 return [f for f in self._cache.keys() if
1811 return [f for f in self._cache.keys() if
1811 not self._cache[f]['exists'] and self._existsinparent(f)]
1812 not self._cache[f]['exists'] and self._existsinparent(f)]
1812
1813
1813 def isinmemory(self):
1814 def isinmemory(self):
1814 return True
1815 return True
1815
1816
1816 def filedate(self, path):
1817 def filedate(self, path):
1817 if self.isdirty(path):
1818 if self.isdirty(path):
1818 return self._cache[path]['date']
1819 return self._cache[path]['date']
1819 else:
1820 else:
1820 return self._wrappedctx[path].date()
1821 return self._wrappedctx[path].date()
1821
1822
1822 def markcopied(self, path, origin):
1823 def markcopied(self, path, origin):
1823 if self.isdirty(path):
1824 if self.isdirty(path):
1824 self._cache[path]['copied'] = origin
1825 self._cache[path]['copied'] = origin
1825 else:
1826 else:
1826 raise error.ProgrammingError('markcopied() called on clean context')
1827 raise error.ProgrammingError('markcopied() called on clean context')
1827
1828
1828 def copydata(self, path):
1829 def copydata(self, path):
1829 if self.isdirty(path):
1830 if self.isdirty(path):
1830 return self._cache[path]['copied']
1831 return self._cache[path]['copied']
1831 else:
1832 else:
1832 raise error.ProgrammingError('copydata() called on clean context')
1833 raise error.ProgrammingError('copydata() called on clean context')
1833
1834
1834 def flags(self, path):
1835 def flags(self, path):
1835 if self.isdirty(path):
1836 if self.isdirty(path):
1836 if self._cache[path]['exists']:
1837 if self._cache[path]['exists']:
1837 return self._cache[path]['flags']
1838 return self._cache[path]['flags']
1838 else:
1839 else:
1839 raise error.ProgrammingError("No such file or directory: %s" %
1840 raise error.ProgrammingError("No such file or directory: %s" %
1840 self._path)
1841 self._path)
1841 else:
1842 else:
1842 return self._wrappedctx[path].flags()
1843 return self._wrappedctx[path].flags()
1843
1844
1844 def __contains__(self, key):
1845 def __contains__(self, key):
1845 if key in self._cache:
1846 if key in self._cache:
1846 return self._cache[key]['exists']
1847 return self._cache[key]['exists']
1847 return key in self.p1()
1848 return key in self.p1()
1848
1849
1849 def _existsinparent(self, path):
1850 def _existsinparent(self, path):
1850 try:
1851 try:
1851 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1852 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1852 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1853 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1853 # with an ``exists()`` function.
1854 # with an ``exists()`` function.
1854 self._wrappedctx[path]
1855 self._wrappedctx[path]
1855 return True
1856 return True
1856 except error.ManifestLookupError:
1857 except error.ManifestLookupError:
1857 return False
1858 return False
1858
1859
1859 def _auditconflicts(self, path):
1860 def _auditconflicts(self, path):
1860 """Replicates conflict checks done by wvfs.write().
1861 """Replicates conflict checks done by wvfs.write().
1861
1862
1862 Since we never write to the filesystem and never call `applyupdates` in
1863 Since we never write to the filesystem and never call `applyupdates` in
1863 IMM, we'll never check that a path is actually writable -- e.g., because
1864 IMM, we'll never check that a path is actually writable -- e.g., because
1864 it adds `a/foo`, but `a` is actually a file in the other commit.
1865 it adds `a/foo`, but `a` is actually a file in the other commit.
1865 """
1866 """
1866 def fail(path, component):
1867 def fail(path, component):
1867 # p1() is the base and we're receiving "writes" for p2()'s
1868 # p1() is the base and we're receiving "writes" for p2()'s
1868 # files.
1869 # files.
1869 if 'l' in self.p1()[component].flags():
1870 if 'l' in self.p1()[component].flags():
1870 raise error.Abort("error: %s conflicts with symlink %s "
1871 raise error.Abort("error: %s conflicts with symlink %s "
1871 "in %d." % (path, component,
1872 "in %d." % (path, component,
1872 self.p1().rev()))
1873 self.p1().rev()))
1873 else:
1874 else:
1874 raise error.Abort("error: '%s' conflicts with file '%s' in "
1875 raise error.Abort("error: '%s' conflicts with file '%s' in "
1875 "%d." % (path, component,
1876 "%d." % (path, component,
1876 self.p1().rev()))
1877 self.p1().rev()))
1877
1878
1878 # Test that each new directory to be created to write this path from p2
1879 # Test that each new directory to be created to write this path from p2
1879 # is not a file in p1.
1880 # is not a file in p1.
1880 components = path.split('/')
1881 components = path.split('/')
1881 for i in pycompat.xrange(len(components)):
1882 for i in pycompat.xrange(len(components)):
1882 component = "/".join(components[0:i])
1883 component = "/".join(components[0:i])
1883 if component in self:
1884 if component in self:
1884 fail(path, component)
1885 fail(path, component)
1885
1886
1886 # Test the other direction -- that this path from p2 isn't a directory
1887 # Test the other direction -- that this path from p2 isn't a directory
1887 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1888 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1888 match = self.match(pats=[path + '/'], default=b'path')
1889 match = self.match(pats=[path + '/'], default=b'path')
1889 matches = self.p1().manifest().matches(match)
1890 matches = self.p1().manifest().matches(match)
1890 mfiles = matches.keys()
1891 mfiles = matches.keys()
1891 if len(mfiles) > 0:
1892 if len(mfiles) > 0:
1892 if len(mfiles) == 1 and mfiles[0] == path:
1893 if len(mfiles) == 1 and mfiles[0] == path:
1893 return
1894 return
1894 # omit the files which are deleted in current IMM wctx
1895 # omit the files which are deleted in current IMM wctx
1895 mfiles = [m for m in mfiles if m in self]
1896 mfiles = [m for m in mfiles if m in self]
1896 if not mfiles:
1897 if not mfiles:
1897 return
1898 return
1898 raise error.Abort("error: file '%s' cannot be written because "
1899 raise error.Abort("error: file '%s' cannot be written because "
1899 " '%s/' is a folder in %s (containing %d "
1900 " '%s/' is a folder in %s (containing %d "
1900 "entries: %s)"
1901 "entries: %s)"
1901 % (path, path, self.p1(), len(mfiles),
1902 % (path, path, self.p1(), len(mfiles),
1902 ', '.join(mfiles)))
1903 ', '.join(mfiles)))
1903
1904
1904 def write(self, path, data, flags='', **kwargs):
1905 def write(self, path, data, flags='', **kwargs):
1905 if data is None:
1906 if data is None:
1906 raise error.ProgrammingError("data must be non-None")
1907 raise error.ProgrammingError("data must be non-None")
1907 self._auditconflicts(path)
1908 self._auditconflicts(path)
1908 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1909 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1909 flags=flags)
1910 flags=flags)
1910
1911
1911 def setflags(self, path, l, x):
1912 def setflags(self, path, l, x):
1912 flag = ''
1913 flag = ''
1913 if l:
1914 if l:
1914 flag = 'l'
1915 flag = 'l'
1915 elif x:
1916 elif x:
1916 flag = 'x'
1917 flag = 'x'
1917 self._markdirty(path, exists=True, date=dateutil.makedate(),
1918 self._markdirty(path, exists=True, date=dateutil.makedate(),
1918 flags=flag)
1919 flags=flag)
1919
1920
1920 def remove(self, path):
1921 def remove(self, path):
1921 self._markdirty(path, exists=False)
1922 self._markdirty(path, exists=False)
1922
1923
1923 def exists(self, path):
1924 def exists(self, path):
1924 """exists behaves like `lexists`, but needs to follow symlinks and
1925 """exists behaves like `lexists`, but needs to follow symlinks and
1925 return False if they are broken.
1926 return False if they are broken.
1926 """
1927 """
1927 if self.isdirty(path):
1928 if self.isdirty(path):
1928 # If this path exists and is a symlink, "follow" it by calling
1929 # If this path exists and is a symlink, "follow" it by calling
1929 # exists on the destination path.
1930 # exists on the destination path.
1930 if (self._cache[path]['exists'] and
1931 if (self._cache[path]['exists'] and
1931 'l' in self._cache[path]['flags']):
1932 'l' in self._cache[path]['flags']):
1932 return self.exists(self._cache[path]['data'].strip())
1933 return self.exists(self._cache[path]['data'].strip())
1933 else:
1934 else:
1934 return self._cache[path]['exists']
1935 return self._cache[path]['exists']
1935
1936
1936 return self._existsinparent(path)
1937 return self._existsinparent(path)
1937
1938
1938 def lexists(self, path):
1939 def lexists(self, path):
1939 """lexists returns True if the path exists"""
1940 """lexists returns True if the path exists"""
1940 if self.isdirty(path):
1941 if self.isdirty(path):
1941 return self._cache[path]['exists']
1942 return self._cache[path]['exists']
1942
1943
1943 return self._existsinparent(path)
1944 return self._existsinparent(path)
1944
1945
1945 def size(self, path):
1946 def size(self, path):
1946 if self.isdirty(path):
1947 if self.isdirty(path):
1947 if self._cache[path]['exists']:
1948 if self._cache[path]['exists']:
1948 return len(self._cache[path]['data'])
1949 return len(self._cache[path]['data'])
1949 else:
1950 else:
1950 raise error.ProgrammingError("No such file or directory: %s" %
1951 raise error.ProgrammingError("No such file or directory: %s" %
1951 self._path)
1952 self._path)
1952 return self._wrappedctx[path].size()
1953 return self._wrappedctx[path].size()
1953
1954
1954 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1955 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1955 user=None, editor=None):
1956 user=None, editor=None):
1956 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1957 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1957 committed.
1958 committed.
1958
1959
1959 ``text`` is the commit message.
1960 ``text`` is the commit message.
1960 ``parents`` (optional) are rev numbers.
1961 ``parents`` (optional) are rev numbers.
1961 """
1962 """
1962 # Default parents to the wrapped contexts' if not passed.
1963 # Default parents to the wrapped contexts' if not passed.
1963 if parents is None:
1964 if parents is None:
1964 parents = self._wrappedctx.parents()
1965 parents = self._wrappedctx.parents()
1965 if len(parents) == 1:
1966 if len(parents) == 1:
1966 parents = (parents[0], None)
1967 parents = (parents[0], None)
1967
1968
1968 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1969 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1969 if parents[1] is None:
1970 if parents[1] is None:
1970 parents = (self._repo[parents[0]], None)
1971 parents = (self._repo[parents[0]], None)
1971 else:
1972 else:
1972 parents = (self._repo[parents[0]], self._repo[parents[1]])
1973 parents = (self._repo[parents[0]], self._repo[parents[1]])
1973
1974
1974 files = self._cache.keys()
1975 files = self._cache.keys()
1975 def getfile(repo, memctx, path):
1976 def getfile(repo, memctx, path):
1976 if self._cache[path]['exists']:
1977 if self._cache[path]['exists']:
1977 return memfilectx(repo, memctx, path,
1978 return memfilectx(repo, memctx, path,
1978 self._cache[path]['data'],
1979 self._cache[path]['data'],
1979 'l' in self._cache[path]['flags'],
1980 'l' in self._cache[path]['flags'],
1980 'x' in self._cache[path]['flags'],
1981 'x' in self._cache[path]['flags'],
1981 self._cache[path]['copied'])
1982 self._cache[path]['copied'])
1982 else:
1983 else:
1983 # Returning None, but including the path in `files`, is
1984 # Returning None, but including the path in `files`, is
1984 # necessary for memctx to register a deletion.
1985 # necessary for memctx to register a deletion.
1985 return None
1986 return None
1986 return memctx(self._repo, parents, text, files, getfile, date=date,
1987 return memctx(self._repo, parents, text, files, getfile, date=date,
1987 extra=extra, user=user, branch=branch, editor=editor)
1988 extra=extra, user=user, branch=branch, editor=editor)
1988
1989
1989 def isdirty(self, path):
1990 def isdirty(self, path):
1990 return path in self._cache
1991 return path in self._cache
1991
1992
1992 def isempty(self):
1993 def isempty(self):
1993 # We need to discard any keys that are actually clean before the empty
1994 # We need to discard any keys that are actually clean before the empty
1994 # commit check.
1995 # commit check.
1995 self._compact()
1996 self._compact()
1996 return len(self._cache) == 0
1997 return len(self._cache) == 0
1997
1998
1998 def clean(self):
1999 def clean(self):
1999 self._cache = {}
2000 self._cache = {}
2000
2001
2001 def _compact(self):
2002 def _compact(self):
2002 """Removes keys from the cache that are actually clean, by comparing
2003 """Removes keys from the cache that are actually clean, by comparing
2003 them with the underlying context.
2004 them with the underlying context.
2004
2005
2005 This can occur during the merge process, e.g. by passing --tool :local
2006 This can occur during the merge process, e.g. by passing --tool :local
2006 to resolve a conflict.
2007 to resolve a conflict.
2007 """
2008 """
2008 keys = []
2009 keys = []
2009 # This won't be perfect, but can help performance significantly when
2010 # This won't be perfect, but can help performance significantly when
2010 # using things like remotefilelog.
2011 # using things like remotefilelog.
2011 scmutil.prefetchfiles(
2012 scmutil.prefetchfiles(
2012 self.repo(), [self.p1().rev()],
2013 self.repo(), [self.p1().rev()],
2013 scmutil.matchfiles(self.repo(), self._cache.keys()))
2014 scmutil.matchfiles(self.repo(), self._cache.keys()))
2014
2015
2015 for path in self._cache.keys():
2016 for path in self._cache.keys():
2016 cache = self._cache[path]
2017 cache = self._cache[path]
2017 try:
2018 try:
2018 underlying = self._wrappedctx[path]
2019 underlying = self._wrappedctx[path]
2019 if (underlying.data() == cache['data'] and
2020 if (underlying.data() == cache['data'] and
2020 underlying.flags() == cache['flags']):
2021 underlying.flags() == cache['flags']):
2021 keys.append(path)
2022 keys.append(path)
2022 except error.ManifestLookupError:
2023 except error.ManifestLookupError:
2023 # Path not in the underlying manifest (created).
2024 # Path not in the underlying manifest (created).
2024 continue
2025 continue
2025
2026
2026 for path in keys:
2027 for path in keys:
2027 del self._cache[path]
2028 del self._cache[path]
2028 return keys
2029 return keys
2029
2030
2030 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2031 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2031 # data not provided, let's see if we already have some; if not, let's
2032 # data not provided, let's see if we already have some; if not, let's
2032 # grab it from our underlying context, so that we always have data if
2033 # grab it from our underlying context, so that we always have data if
2033 # the file is marked as existing.
2034 # the file is marked as existing.
2034 if exists and data is None:
2035 if exists and data is None:
2035 oldentry = self._cache.get(path) or {}
2036 oldentry = self._cache.get(path) or {}
2036 data = oldentry.get('data') or self._wrappedctx[path].data()
2037 data = oldentry.get('data') or self._wrappedctx[path].data()
2037
2038
2038 self._cache[path] = {
2039 self._cache[path] = {
2039 'exists': exists,
2040 'exists': exists,
2040 'data': data,
2041 'data': data,
2041 'date': date,
2042 'date': date,
2042 'flags': flags,
2043 'flags': flags,
2043 'copied': None,
2044 'copied': None,
2044 }
2045 }
2045
2046
2046 def filectx(self, path, filelog=None):
2047 def filectx(self, path, filelog=None):
2047 return overlayworkingfilectx(self._repo, path, parent=self,
2048 return overlayworkingfilectx(self._repo, path, parent=self,
2048 filelog=filelog)
2049 filelog=filelog)
2049
2050
2050 class overlayworkingfilectx(committablefilectx):
2051 class overlayworkingfilectx(committablefilectx):
2051 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2052 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2052 cache, which can be flushed through later by calling ``flush()``."""
2053 cache, which can be flushed through later by calling ``flush()``."""
2053
2054
2054 def __init__(self, repo, path, filelog=None, parent=None):
2055 def __init__(self, repo, path, filelog=None, parent=None):
2055 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2056 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2056 parent)
2057 parent)
2057 self._repo = repo
2058 self._repo = repo
2058 self._parent = parent
2059 self._parent = parent
2059 self._path = path
2060 self._path = path
2060
2061
2061 def cmp(self, fctx):
2062 def cmp(self, fctx):
2062 return self.data() != fctx.data()
2063 return self.data() != fctx.data()
2063
2064
2064 def changectx(self):
2065 def changectx(self):
2065 return self._parent
2066 return self._parent
2066
2067
2067 def data(self):
2068 def data(self):
2068 return self._parent.data(self._path)
2069 return self._parent.data(self._path)
2069
2070
2070 def date(self):
2071 def date(self):
2071 return self._parent.filedate(self._path)
2072 return self._parent.filedate(self._path)
2072
2073
2073 def exists(self):
2074 def exists(self):
2074 return self.lexists()
2075 return self.lexists()
2075
2076
2076 def lexists(self):
2077 def lexists(self):
2077 return self._parent.exists(self._path)
2078 return self._parent.exists(self._path)
2078
2079
2079 def renamed(self):
2080 def renamed(self):
2080 path = self._parent.copydata(self._path)
2081 path = self._parent.copydata(self._path)
2081 if not path:
2082 if not path:
2082 return None
2083 return None
2083 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2084 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2084
2085
2085 def size(self):
2086 def size(self):
2086 return self._parent.size(self._path)
2087 return self._parent.size(self._path)
2087
2088
2088 def markcopied(self, origin):
2089 def markcopied(self, origin):
2089 self._parent.markcopied(self._path, origin)
2090 self._parent.markcopied(self._path, origin)
2090
2091
2091 def audit(self):
2092 def audit(self):
2092 pass
2093 pass
2093
2094
2094 def flags(self):
2095 def flags(self):
2095 return self._parent.flags(self._path)
2096 return self._parent.flags(self._path)
2096
2097
2097 def setflags(self, islink, isexec):
2098 def setflags(self, islink, isexec):
2098 return self._parent.setflags(self._path, islink, isexec)
2099 return self._parent.setflags(self._path, islink, isexec)
2099
2100
2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2101 def write(self, data, flags, backgroundclose=False, **kwargs):
2101 return self._parent.write(self._path, data, flags, **kwargs)
2102 return self._parent.write(self._path, data, flags, **kwargs)
2102
2103
2103 def remove(self, ignoremissing=False):
2104 def remove(self, ignoremissing=False):
2104 return self._parent.remove(self._path)
2105 return self._parent.remove(self._path)
2105
2106
2106 def clearunknown(self):
2107 def clearunknown(self):
2107 pass
2108 pass
2108
2109
2109 class workingcommitctx(workingctx):
2110 class workingcommitctx(workingctx):
2110 """A workingcommitctx object makes access to data related to
2111 """A workingcommitctx object makes access to data related to
2111 the revision being committed convenient.
2112 the revision being committed convenient.
2112
2113
2113 This hides changes in the working directory, if they aren't
2114 This hides changes in the working directory, if they aren't
2114 committed in this context.
2115 committed in this context.
2115 """
2116 """
2116 def __init__(self, repo, changes,
2117 def __init__(self, repo, changes,
2117 text="", user=None, date=None, extra=None):
2118 text="", user=None, date=None, extra=None):
2118 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2119 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2119 changes)
2120 changes)
2120
2121
2121 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2122 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2122 """Return matched files only in ``self._status``
2123 """Return matched files only in ``self._status``
2123
2124
2124 Uncommitted files appear "clean" via this context, even if
2125 Uncommitted files appear "clean" via this context, even if
2125 they aren't actually so in the working directory.
2126 they aren't actually so in the working directory.
2126 """
2127 """
2127 if clean:
2128 if clean:
2128 clean = [f for f in self._manifest if f not in self._changedset]
2129 clean = [f for f in self._manifest if f not in self._changedset]
2129 else:
2130 else:
2130 clean = []
2131 clean = []
2131 return scmutil.status([f for f in self._status.modified if match(f)],
2132 return scmutil.status([f for f in self._status.modified if match(f)],
2132 [f for f in self._status.added if match(f)],
2133 [f for f in self._status.added if match(f)],
2133 [f for f in self._status.removed if match(f)],
2134 [f for f in self._status.removed if match(f)],
2134 [], [], [], clean)
2135 [], [], [], clean)
2135
2136
2136 @propertycache
2137 @propertycache
2137 def _changedset(self):
2138 def _changedset(self):
2138 """Return the set of files changed in this context
2139 """Return the set of files changed in this context
2139 """
2140 """
2140 changed = set(self._status.modified)
2141 changed = set(self._status.modified)
2141 changed.update(self._status.added)
2142 changed.update(self._status.added)
2142 changed.update(self._status.removed)
2143 changed.update(self._status.removed)
2143 return changed
2144 return changed
2144
2145
2145 def makecachingfilectxfn(func):
2146 def makecachingfilectxfn(func):
2146 """Create a filectxfn that caches based on the path.
2147 """Create a filectxfn that caches based on the path.
2147
2148
2148 We can't use util.cachefunc because it uses all arguments as the cache
2149 We can't use util.cachefunc because it uses all arguments as the cache
2149 key and this creates a cycle since the arguments include the repo and
2150 key and this creates a cycle since the arguments include the repo and
2150 memctx.
2151 memctx.
2151 """
2152 """
2152 cache = {}
2153 cache = {}
2153
2154
2154 def getfilectx(repo, memctx, path):
2155 def getfilectx(repo, memctx, path):
2155 if path not in cache:
2156 if path not in cache:
2156 cache[path] = func(repo, memctx, path)
2157 cache[path] = func(repo, memctx, path)
2157 return cache[path]
2158 return cache[path]
2158
2159
2159 return getfilectx
2160 return getfilectx
2160
2161
2161 def memfilefromctx(ctx):
2162 def memfilefromctx(ctx):
2162 """Given a context return a memfilectx for ctx[path]
2163 """Given a context return a memfilectx for ctx[path]
2163
2164
2164 This is a convenience method for building a memctx based on another
2165 This is a convenience method for building a memctx based on another
2165 context.
2166 context.
2166 """
2167 """
2167 def getfilectx(repo, memctx, path):
2168 def getfilectx(repo, memctx, path):
2168 fctx = ctx[path]
2169 fctx = ctx[path]
2169 copied = fctx.renamed()
2170 copied = fctx.renamed()
2170 if copied:
2171 if copied:
2171 copied = copied[0]
2172 copied = copied[0]
2172 return memfilectx(repo, memctx, path, fctx.data(),
2173 return memfilectx(repo, memctx, path, fctx.data(),
2173 islink=fctx.islink(), isexec=fctx.isexec(),
2174 islink=fctx.islink(), isexec=fctx.isexec(),
2174 copied=copied)
2175 copied=copied)
2175
2176
2176 return getfilectx
2177 return getfilectx
2177
2178
2178 def memfilefrompatch(patchstore):
2179 def memfilefrompatch(patchstore):
2179 """Given a patch (e.g. patchstore object) return a memfilectx
2180 """Given a patch (e.g. patchstore object) return a memfilectx
2180
2181
2181 This is a convenience method for building a memctx based on a patchstore.
2182 This is a convenience method for building a memctx based on a patchstore.
2182 """
2183 """
2183 def getfilectx(repo, memctx, path):
2184 def getfilectx(repo, memctx, path):
2184 data, mode, copied = patchstore.getfile(path)
2185 data, mode, copied = patchstore.getfile(path)
2185 if data is None:
2186 if data is None:
2186 return None
2187 return None
2187 islink, isexec = mode
2188 islink, isexec = mode
2188 return memfilectx(repo, memctx, path, data, islink=islink,
2189 return memfilectx(repo, memctx, path, data, islink=islink,
2189 isexec=isexec, copied=copied)
2190 isexec=isexec, copied=copied)
2190
2191
2191 return getfilectx
2192 return getfilectx
2192
2193
2193 class memctx(committablectx):
2194 class memctx(committablectx):
2194 """Use memctx to perform in-memory commits via localrepo.commitctx().
2195 """Use memctx to perform in-memory commits via localrepo.commitctx().
2195
2196
2196 Revision information is supplied at initialization time while
2197 Revision information is supplied at initialization time while
2197 related files data and is made available through a callback
2198 related files data and is made available through a callback
2198 mechanism. 'repo' is the current localrepo, 'parents' is a
2199 mechanism. 'repo' is the current localrepo, 'parents' is a
2199 sequence of two parent revisions identifiers (pass None for every
2200 sequence of two parent revisions identifiers (pass None for every
2200 missing parent), 'text' is the commit message and 'files' lists
2201 missing parent), 'text' is the commit message and 'files' lists
2201 names of files touched by the revision (normalized and relative to
2202 names of files touched by the revision (normalized and relative to
2202 repository root).
2203 repository root).
2203
2204
2204 filectxfn(repo, memctx, path) is a callable receiving the
2205 filectxfn(repo, memctx, path) is a callable receiving the
2205 repository, the current memctx object and the normalized path of
2206 repository, the current memctx object and the normalized path of
2206 requested file, relative to repository root. It is fired by the
2207 requested file, relative to repository root. It is fired by the
2207 commit function for every file in 'files', but calls order is
2208 commit function for every file in 'files', but calls order is
2208 undefined. If the file is available in the revision being
2209 undefined. If the file is available in the revision being
2209 committed (updated or added), filectxfn returns a memfilectx
2210 committed (updated or added), filectxfn returns a memfilectx
2210 object. If the file was removed, filectxfn return None for recent
2211 object. If the file was removed, filectxfn return None for recent
2211 Mercurial. Moved files are represented by marking the source file
2212 Mercurial. Moved files are represented by marking the source file
2212 removed and the new file added with copy information (see
2213 removed and the new file added with copy information (see
2213 memfilectx).
2214 memfilectx).
2214
2215
2215 user receives the committer name and defaults to current
2216 user receives the committer name and defaults to current
2216 repository username, date is the commit date in any format
2217 repository username, date is the commit date in any format
2217 supported by dateutil.parsedate() and defaults to current date, extra
2218 supported by dateutil.parsedate() and defaults to current date, extra
2218 is a dictionary of metadata or is left empty.
2219 is a dictionary of metadata or is left empty.
2219 """
2220 """
2220
2221
2221 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2222 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2222 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2223 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2223 # this field to determine what to do in filectxfn.
2224 # this field to determine what to do in filectxfn.
2224 _returnnoneformissingfiles = True
2225 _returnnoneformissingfiles = True
2225
2226
2226 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2227 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2227 date=None, extra=None, branch=None, editor=False):
2228 date=None, extra=None, branch=None, editor=False):
2228 super(memctx, self).__init__(repo, text, user, date, extra)
2229 super(memctx, self).__init__(repo, text, user, date, extra)
2229 self._rev = None
2230 self._rev = None
2230 self._node = None
2231 self._node = None
2231 parents = [(p or nullid) for p in parents]
2232 parents = [(p or nullid) for p in parents]
2232 p1, p2 = parents
2233 p1, p2 = parents
2233 self._parents = [self._repo[p] for p in (p1, p2)]
2234 self._parents = [self._repo[p] for p in (p1, p2)]
2234 files = sorted(set(files))
2235 files = sorted(set(files))
2235 self._files = files
2236 self._files = files
2236 if branch is not None:
2237 if branch is not None:
2237 self._extra['branch'] = encoding.fromlocal(branch)
2238 self._extra['branch'] = encoding.fromlocal(branch)
2238 self.substate = {}
2239 self.substate = {}
2239
2240
2240 if isinstance(filectxfn, patch.filestore):
2241 if isinstance(filectxfn, patch.filestore):
2241 filectxfn = memfilefrompatch(filectxfn)
2242 filectxfn = memfilefrompatch(filectxfn)
2242 elif not callable(filectxfn):
2243 elif not callable(filectxfn):
2243 # if store is not callable, wrap it in a function
2244 # if store is not callable, wrap it in a function
2244 filectxfn = memfilefromctx(filectxfn)
2245 filectxfn = memfilefromctx(filectxfn)
2245
2246
2246 # memoizing increases performance for e.g. vcs convert scenarios.
2247 # memoizing increases performance for e.g. vcs convert scenarios.
2247 self._filectxfn = makecachingfilectxfn(filectxfn)
2248 self._filectxfn = makecachingfilectxfn(filectxfn)
2248
2249
2249 if editor:
2250 if editor:
2250 self._text = editor(self._repo, self, [])
2251 self._text = editor(self._repo, self, [])
2251 self._repo.savecommitmessage(self._text)
2252 self._repo.savecommitmessage(self._text)
2252
2253
2253 def filectx(self, path, filelog=None):
2254 def filectx(self, path, filelog=None):
2254 """get a file context from the working directory
2255 """get a file context from the working directory
2255
2256
2256 Returns None if file doesn't exist and should be removed."""
2257 Returns None if file doesn't exist and should be removed."""
2257 return self._filectxfn(self._repo, self, path)
2258 return self._filectxfn(self._repo, self, path)
2258
2259
2259 def commit(self):
2260 def commit(self):
2260 """commit context to the repo"""
2261 """commit context to the repo"""
2261 return self._repo.commitctx(self)
2262 return self._repo.commitctx(self)
2262
2263
2263 @propertycache
2264 @propertycache
2264 def _manifest(self):
2265 def _manifest(self):
2265 """generate a manifest based on the return values of filectxfn"""
2266 """generate a manifest based on the return values of filectxfn"""
2266
2267
2267 # keep this simple for now; just worry about p1
2268 # keep this simple for now; just worry about p1
2268 pctx = self._parents[0]
2269 pctx = self._parents[0]
2269 man = pctx.manifest().copy()
2270 man = pctx.manifest().copy()
2270
2271
2271 for f in self._status.modified:
2272 for f in self._status.modified:
2272 man[f] = modifiednodeid
2273 man[f] = modifiednodeid
2273
2274
2274 for f in self._status.added:
2275 for f in self._status.added:
2275 man[f] = addednodeid
2276 man[f] = addednodeid
2276
2277
2277 for f in self._status.removed:
2278 for f in self._status.removed:
2278 if f in man:
2279 if f in man:
2279 del man[f]
2280 del man[f]
2280
2281
2281 return man
2282 return man
2282
2283
2283 @propertycache
2284 @propertycache
2284 def _status(self):
2285 def _status(self):
2285 """Calculate exact status from ``files`` specified at construction
2286 """Calculate exact status from ``files`` specified at construction
2286 """
2287 """
2287 man1 = self.p1().manifest()
2288 man1 = self.p1().manifest()
2288 p2 = self._parents[1]
2289 p2 = self._parents[1]
2289 # "1 < len(self._parents)" can't be used for checking
2290 # "1 < len(self._parents)" can't be used for checking
2290 # existence of the 2nd parent, because "memctx._parents" is
2291 # existence of the 2nd parent, because "memctx._parents" is
2291 # explicitly initialized by the list, of which length is 2.
2292 # explicitly initialized by the list, of which length is 2.
2292 if p2.node() != nullid:
2293 if p2.node() != nullid:
2293 man2 = p2.manifest()
2294 man2 = p2.manifest()
2294 managing = lambda f: f in man1 or f in man2
2295 managing = lambda f: f in man1 or f in man2
2295 else:
2296 else:
2296 managing = lambda f: f in man1
2297 managing = lambda f: f in man1
2297
2298
2298 modified, added, removed = [], [], []
2299 modified, added, removed = [], [], []
2299 for f in self._files:
2300 for f in self._files:
2300 if not managing(f):
2301 if not managing(f):
2301 added.append(f)
2302 added.append(f)
2302 elif self[f]:
2303 elif self[f]:
2303 modified.append(f)
2304 modified.append(f)
2304 else:
2305 else:
2305 removed.append(f)
2306 removed.append(f)
2306
2307
2307 return scmutil.status(modified, added, removed, [], [], [], [])
2308 return scmutil.status(modified, added, removed, [], [], [], [])
2308
2309
2309 class memfilectx(committablefilectx):
2310 class memfilectx(committablefilectx):
2310 """memfilectx represents an in-memory file to commit.
2311 """memfilectx represents an in-memory file to commit.
2311
2312
2312 See memctx and committablefilectx for more details.
2313 See memctx and committablefilectx for more details.
2313 """
2314 """
2314 def __init__(self, repo, changectx, path, data, islink=False,
2315 def __init__(self, repo, changectx, path, data, islink=False,
2315 isexec=False, copied=None):
2316 isexec=False, copied=None):
2316 """
2317 """
2317 path is the normalized file path relative to repository root.
2318 path is the normalized file path relative to repository root.
2318 data is the file content as a string.
2319 data is the file content as a string.
2319 islink is True if the file is a symbolic link.
2320 islink is True if the file is a symbolic link.
2320 isexec is True if the file is executable.
2321 isexec is True if the file is executable.
2321 copied is the source file path if current file was copied in the
2322 copied is the source file path if current file was copied in the
2322 revision being committed, or None."""
2323 revision being committed, or None."""
2323 super(memfilectx, self).__init__(repo, path, None, changectx)
2324 super(memfilectx, self).__init__(repo, path, None, changectx)
2324 self._data = data
2325 self._data = data
2325 if islink:
2326 if islink:
2326 self._flags = 'l'
2327 self._flags = 'l'
2327 elif isexec:
2328 elif isexec:
2328 self._flags = 'x'
2329 self._flags = 'x'
2329 else:
2330 else:
2330 self._flags = ''
2331 self._flags = ''
2331 self._copied = None
2332 self._copied = None
2332 if copied:
2333 if copied:
2333 self._copied = (copied, nullid)
2334 self._copied = (copied, nullid)
2334
2335
2335 def cmp(self, fctx):
2336 def cmp(self, fctx):
2336 return self.data() != fctx.data()
2337 return self.data() != fctx.data()
2337
2338
2338 def data(self):
2339 def data(self):
2339 return self._data
2340 return self._data
2340
2341
2341 def remove(self, ignoremissing=False):
2342 def remove(self, ignoremissing=False):
2342 """wraps unlink for a repo's working directory"""
2343 """wraps unlink for a repo's working directory"""
2343 # need to figure out what to do here
2344 # need to figure out what to do here
2344 del self._changectx[self._path]
2345 del self._changectx[self._path]
2345
2346
2346 def write(self, data, flags, **kwargs):
2347 def write(self, data, flags, **kwargs):
2347 """wraps repo.wwrite"""
2348 """wraps repo.wwrite"""
2348 self._data = data
2349 self._data = data
2349
2350
2350
2351
2351 class metadataonlyctx(committablectx):
2352 class metadataonlyctx(committablectx):
2352 """Like memctx but it's reusing the manifest of different commit.
2353 """Like memctx but it's reusing the manifest of different commit.
2353 Intended to be used by lightweight operations that are creating
2354 Intended to be used by lightweight operations that are creating
2354 metadata-only changes.
2355 metadata-only changes.
2355
2356
2356 Revision information is supplied at initialization time. 'repo' is the
2357 Revision information is supplied at initialization time. 'repo' is the
2357 current localrepo, 'ctx' is original revision which manifest we're reuisng
2358 current localrepo, 'ctx' is original revision which manifest we're reuisng
2358 'parents' is a sequence of two parent revisions identifiers (pass None for
2359 'parents' is a sequence of two parent revisions identifiers (pass None for
2359 every missing parent), 'text' is the commit.
2360 every missing parent), 'text' is the commit.
2360
2361
2361 user receives the committer name and defaults to current repository
2362 user receives the committer name and defaults to current repository
2362 username, date is the commit date in any format supported by
2363 username, date is the commit date in any format supported by
2363 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2364 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2364 metadata or is left empty.
2365 metadata or is left empty.
2365 """
2366 """
2366 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2367 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2367 date=None, extra=None, editor=False):
2368 date=None, extra=None, editor=False):
2368 if text is None:
2369 if text is None:
2369 text = originalctx.description()
2370 text = originalctx.description()
2370 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2371 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2371 self._rev = None
2372 self._rev = None
2372 self._node = None
2373 self._node = None
2373 self._originalctx = originalctx
2374 self._originalctx = originalctx
2374 self._manifestnode = originalctx.manifestnode()
2375 self._manifestnode = originalctx.manifestnode()
2375 if parents is None:
2376 if parents is None:
2376 parents = originalctx.parents()
2377 parents = originalctx.parents()
2377 else:
2378 else:
2378 parents = [repo[p] for p in parents if p is not None]
2379 parents = [repo[p] for p in parents if p is not None]
2379 parents = parents[:]
2380 parents = parents[:]
2380 while len(parents) < 2:
2381 while len(parents) < 2:
2381 parents.append(repo[nullid])
2382 parents.append(repo[nullid])
2382 p1, p2 = self._parents = parents
2383 p1, p2 = self._parents = parents
2383
2384
2384 # sanity check to ensure that the reused manifest parents are
2385 # sanity check to ensure that the reused manifest parents are
2385 # manifests of our commit parents
2386 # manifests of our commit parents
2386 mp1, mp2 = self.manifestctx().parents
2387 mp1, mp2 = self.manifestctx().parents
2387 if p1 != nullid and p1.manifestnode() != mp1:
2388 if p1 != nullid and p1.manifestnode() != mp1:
2388 raise RuntimeError(r"can't reuse the manifest: its p1 "
2389 raise RuntimeError(r"can't reuse the manifest: its p1 "
2389 r"doesn't match the new ctx p1")
2390 r"doesn't match the new ctx p1")
2390 if p2 != nullid and p2.manifestnode() != mp2:
2391 if p2 != nullid and p2.manifestnode() != mp2:
2391 raise RuntimeError(r"can't reuse the manifest: "
2392 raise RuntimeError(r"can't reuse the manifest: "
2392 r"its p2 doesn't match the new ctx p2")
2393 r"its p2 doesn't match the new ctx p2")
2393
2394
2394 self._files = originalctx.files()
2395 self._files = originalctx.files()
2395 self.substate = {}
2396 self.substate = {}
2396
2397
2397 if editor:
2398 if editor:
2398 self._text = editor(self._repo, self, [])
2399 self._text = editor(self._repo, self, [])
2399 self._repo.savecommitmessage(self._text)
2400 self._repo.savecommitmessage(self._text)
2400
2401
2401 def manifestnode(self):
2402 def manifestnode(self):
2402 return self._manifestnode
2403 return self._manifestnode
2403
2404
2404 @property
2405 @property
2405 def _manifestctx(self):
2406 def _manifestctx(self):
2406 return self._repo.manifestlog[self._manifestnode]
2407 return self._repo.manifestlog[self._manifestnode]
2407
2408
2408 def filectx(self, path, filelog=None):
2409 def filectx(self, path, filelog=None):
2409 return self._originalctx.filectx(path, filelog=filelog)
2410 return self._originalctx.filectx(path, filelog=filelog)
2410
2411
2411 def commit(self):
2412 def commit(self):
2412 """commit context to the repo"""
2413 """commit context to the repo"""
2413 return self._repo.commitctx(self)
2414 return self._repo.commitctx(self)
2414
2415
2415 @property
2416 @property
2416 def _manifest(self):
2417 def _manifest(self):
2417 return self._originalctx.manifest()
2418 return self._originalctx.manifest()
2418
2419
2419 @propertycache
2420 @propertycache
2420 def _status(self):
2421 def _status(self):
2421 """Calculate exact status from ``files`` specified in the ``origctx``
2422 """Calculate exact status from ``files`` specified in the ``origctx``
2422 and parents manifests.
2423 and parents manifests.
2423 """
2424 """
2424 man1 = self.p1().manifest()
2425 man1 = self.p1().manifest()
2425 p2 = self._parents[1]
2426 p2 = self._parents[1]
2426 # "1 < len(self._parents)" can't be used for checking
2427 # "1 < len(self._parents)" can't be used for checking
2427 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2428 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2428 # explicitly initialized by the list, of which length is 2.
2429 # explicitly initialized by the list, of which length is 2.
2429 if p2.node() != nullid:
2430 if p2.node() != nullid:
2430 man2 = p2.manifest()
2431 man2 = p2.manifest()
2431 managing = lambda f: f in man1 or f in man2
2432 managing = lambda f: f in man1 or f in man2
2432 else:
2433 else:
2433 managing = lambda f: f in man1
2434 managing = lambda f: f in man1
2434
2435
2435 modified, added, removed = [], [], []
2436 modified, added, removed = [], [], []
2436 for f in self._files:
2437 for f in self._files:
2437 if not managing(f):
2438 if not managing(f):
2438 added.append(f)
2439 added.append(f)
2439 elif f in self:
2440 elif f in self:
2440 modified.append(f)
2441 modified.append(f)
2441 else:
2442 else:
2442 removed.append(f)
2443 removed.append(f)
2443
2444
2444 return scmutil.status(modified, added, removed, [], [], [], [])
2445 return scmutil.status(modified, added, removed, [], [], [], [])
2445
2446
2446 class arbitraryfilectx(object):
2447 class arbitraryfilectx(object):
2447 """Allows you to use filectx-like functions on a file in an arbitrary
2448 """Allows you to use filectx-like functions on a file in an arbitrary
2448 location on disk, possibly not in the working directory.
2449 location on disk, possibly not in the working directory.
2449 """
2450 """
2450 def __init__(self, path, repo=None):
2451 def __init__(self, path, repo=None):
2451 # Repo is optional because contrib/simplemerge uses this class.
2452 # Repo is optional because contrib/simplemerge uses this class.
2452 self._repo = repo
2453 self._repo = repo
2453 self._path = path
2454 self._path = path
2454
2455
2455 def cmp(self, fctx):
2456 def cmp(self, fctx):
2456 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2457 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2457 # path if either side is a symlink.
2458 # path if either side is a symlink.
2458 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2459 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2459 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2460 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2460 # Add a fast-path for merge if both sides are disk-backed.
2461 # Add a fast-path for merge if both sides are disk-backed.
2461 # Note that filecmp uses the opposite return values (True if same)
2462 # Note that filecmp uses the opposite return values (True if same)
2462 # from our cmp functions (True if different).
2463 # from our cmp functions (True if different).
2463 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2464 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2464 return self.data() != fctx.data()
2465 return self.data() != fctx.data()
2465
2466
2466 def path(self):
2467 def path(self):
2467 return self._path
2468 return self._path
2468
2469
2469 def flags(self):
2470 def flags(self):
2470 return ''
2471 return ''
2471
2472
2472 def data(self):
2473 def data(self):
2473 return util.readfile(self._path)
2474 return util.readfile(self._path)
2474
2475
2475 def decodeddata(self):
2476 def decodeddata(self):
2476 with open(self._path, "rb") as f:
2477 with open(self._path, "rb") as f:
2477 return f.read()
2478 return f.read()
2478
2479
2479 def remove(self):
2480 def remove(self):
2480 util.unlink(self._path)
2481 util.unlink(self._path)
2481
2482
2482 def write(self, data, flags, **kwargs):
2483 def write(self, data, flags, **kwargs):
2483 assert not flags
2484 assert not flags
2484 with open(self._path, "wb") as f:
2485 with open(self._path, "wb") as f:
2485 f.write(data)
2486 f.write(data)
@@ -1,916 +1,919 b''
1 # logcmdutil.py - utility for log-like commands
1 # logcmdutil.py - utility for log-like commands
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import itertools
10 import itertools
11 import os
11 import os
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 nullid,
15 nullid,
16 wdirid,
16 wdirid,
17 wdirrev,
17 wdirrev,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 dagop,
21 dagop,
22 error,
22 error,
23 formatter,
23 formatter,
24 graphmod,
24 graphmod,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 patch,
27 patch,
28 pathutil,
28 pathutil,
29 pycompat,
29 pycompat,
30 revset,
30 revset,
31 revsetlang,
31 revsetlang,
32 scmutil,
32 scmutil,
33 smartset,
33 smartset,
34 templatekw,
34 templatekw,
35 templater,
35 templater,
36 util,
36 util,
37 )
37 )
38 from .utils import (
38 from .utils import (
39 dateutil,
39 dateutil,
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 def getlimit(opts):
43 def getlimit(opts):
44 """get the log limit according to option -l/--limit"""
44 """get the log limit according to option -l/--limit"""
45 limit = opts.get('limit')
45 limit = opts.get('limit')
46 if limit:
46 if limit:
47 try:
47 try:
48 limit = int(limit)
48 limit = int(limit)
49 except ValueError:
49 except ValueError:
50 raise error.Abort(_('limit must be a positive integer'))
50 raise error.Abort(_('limit must be a positive integer'))
51 if limit <= 0:
51 if limit <= 0:
52 raise error.Abort(_('limit must be positive'))
52 raise error.Abort(_('limit must be positive'))
53 else:
53 else:
54 limit = None
54 limit = None
55 return limit
55 return limit
56
56
57 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
57 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
58 changes=None, stat=False, fp=None, graphwidth=0,
58 changes=None, stat=False, fp=None, graphwidth=0,
59 prefix='', root='', listsubrepos=False, hunksfilterfn=None):
59 prefix='', root='', listsubrepos=False, hunksfilterfn=None):
60 '''show diff or diffstat.'''
60 '''show diff or diffstat.'''
61 ctx1 = repo[node1]
61 ctx1 = repo[node1]
62 ctx2 = repo[node2]
62 ctx2 = repo[node2]
63 if root:
63 if root:
64 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
64 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
65 else:
65 else:
66 relroot = ''
66 relroot = ''
67 copysourcematch = None
67 if relroot != '':
68 if relroot != '':
68 # XXX relative roots currently don't work if the root is within a
69 # XXX relative roots currently don't work if the root is within a
69 # subrepo
70 # subrepo
70 uirelroot = match.uipath(relroot)
71 uirelroot = match.uipath(relroot)
71 relroot += '/'
72 relroot += '/'
72 for matchroot in match.files():
73 for matchroot in match.files():
73 if not matchroot.startswith(relroot):
74 if not matchroot.startswith(relroot):
74 ui.warn(_('warning: %s not inside relative root %s\n') % (
75 ui.warn(_('warning: %s not inside relative root %s\n') % (
75 match.uipath(matchroot), uirelroot))
76 match.uipath(matchroot), uirelroot))
76
77
77 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
78 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
78 match = matchmod.intersectmatchers(match, relrootmatch)
79 match = matchmod.intersectmatchers(match, relrootmatch)
80 copysourcematch = relrootmatch
79
81
80 if stat:
82 if stat:
81 diffopts = diffopts.copy(context=0, noprefix=False)
83 diffopts = diffopts.copy(context=0, noprefix=False)
82 width = 80
84 width = 80
83 if not ui.plain():
85 if not ui.plain():
84 width = ui.termwidth() - graphwidth
86 width = ui.termwidth() - graphwidth
85
87
86 chunks = ctx2.diff(ctx1, match, changes, opts=diffopts, prefix=prefix,
88 chunks = ctx2.diff(ctx1, match, changes, opts=diffopts, prefix=prefix,
87 relroot=relroot, hunksfilterfn=hunksfilterfn)
89 relroot=relroot, copysourcematch=copysourcematch,
90 hunksfilterfn=hunksfilterfn)
88
91
89 if fp is not None or ui.canwritewithoutlabels():
92 if fp is not None or ui.canwritewithoutlabels():
90 out = fp or ui
93 out = fp or ui
91 if stat:
94 if stat:
92 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
95 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
93 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
96 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
94 out.write(chunk)
97 out.write(chunk)
95 else:
98 else:
96 if stat:
99 if stat:
97 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
100 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
98 else:
101 else:
99 chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
102 chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
100 opts=diffopts)
103 opts=diffopts)
101 if ui.canbatchlabeledwrites():
104 if ui.canbatchlabeledwrites():
102 def gen():
105 def gen():
103 for chunk, label in chunks:
106 for chunk, label in chunks:
104 yield ui.label(chunk, label=label)
107 yield ui.label(chunk, label=label)
105 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
108 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
106 ui.write(chunk)
109 ui.write(chunk)
107 else:
110 else:
108 for chunk, label in chunks:
111 for chunk, label in chunks:
109 ui.write(chunk, label=label)
112 ui.write(chunk, label=label)
110
113
111 if listsubrepos:
114 if listsubrepos:
112 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
115 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
113 tempnode2 = node2
116 tempnode2 = node2
114 try:
117 try:
115 if node2 is not None:
118 if node2 is not None:
116 tempnode2 = ctx2.substate[subpath][1]
119 tempnode2 = ctx2.substate[subpath][1]
117 except KeyError:
120 except KeyError:
118 # A subrepo that existed in node1 was deleted between node1 and
121 # A subrepo that existed in node1 was deleted between node1 and
119 # node2 (inclusive). Thus, ctx2's substate won't contain that
122 # node2 (inclusive). Thus, ctx2's substate won't contain that
120 # subpath. The best we can do is to ignore it.
123 # subpath. The best we can do is to ignore it.
121 tempnode2 = None
124 tempnode2 = None
122 submatch = matchmod.subdirmatcher(subpath, match)
125 submatch = matchmod.subdirmatcher(subpath, match)
123 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
126 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
124 stat=stat, fp=fp, prefix=prefix)
127 stat=stat, fp=fp, prefix=prefix)
125
128
126 class changesetdiffer(object):
129 class changesetdiffer(object):
127 """Generate diff of changeset with pre-configured filtering functions"""
130 """Generate diff of changeset with pre-configured filtering functions"""
128
131
129 def _makefilematcher(self, ctx):
132 def _makefilematcher(self, ctx):
130 return scmutil.matchall(ctx.repo())
133 return scmutil.matchall(ctx.repo())
131
134
132 def _makehunksfilter(self, ctx):
135 def _makehunksfilter(self, ctx):
133 return None
136 return None
134
137
135 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
138 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
136 repo = ctx.repo()
139 repo = ctx.repo()
137 node = ctx.node()
140 node = ctx.node()
138 prev = ctx.p1().node()
141 prev = ctx.p1().node()
139 diffordiffstat(ui, repo, diffopts, prev, node,
142 diffordiffstat(ui, repo, diffopts, prev, node,
140 match=self._makefilematcher(ctx), stat=stat,
143 match=self._makefilematcher(ctx), stat=stat,
141 graphwidth=graphwidth,
144 graphwidth=graphwidth,
142 hunksfilterfn=self._makehunksfilter(ctx))
145 hunksfilterfn=self._makehunksfilter(ctx))
143
146
144 def changesetlabels(ctx):
147 def changesetlabels(ctx):
145 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
148 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
146 if ctx.obsolete():
149 if ctx.obsolete():
147 labels.append('changeset.obsolete')
150 labels.append('changeset.obsolete')
148 if ctx.isunstable():
151 if ctx.isunstable():
149 labels.append('changeset.unstable')
152 labels.append('changeset.unstable')
150 for instability in ctx.instabilities():
153 for instability in ctx.instabilities():
151 labels.append('instability.%s' % instability)
154 labels.append('instability.%s' % instability)
152 return ' '.join(labels)
155 return ' '.join(labels)
153
156
154 class changesetprinter(object):
157 class changesetprinter(object):
155 '''show changeset information when templating not requested.'''
158 '''show changeset information when templating not requested.'''
156
159
157 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
160 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
158 self.ui = ui
161 self.ui = ui
159 self.repo = repo
162 self.repo = repo
160 self.buffered = buffered
163 self.buffered = buffered
161 self._differ = differ or changesetdiffer()
164 self._differ = differ or changesetdiffer()
162 self._diffopts = patch.diffallopts(ui, diffopts)
165 self._diffopts = patch.diffallopts(ui, diffopts)
163 self._includestat = diffopts and diffopts.get('stat')
166 self._includestat = diffopts and diffopts.get('stat')
164 self._includediff = diffopts and diffopts.get('patch')
167 self._includediff = diffopts and diffopts.get('patch')
165 self.header = {}
168 self.header = {}
166 self.hunk = {}
169 self.hunk = {}
167 self.lastheader = None
170 self.lastheader = None
168 self.footer = None
171 self.footer = None
169 self._columns = templatekw.getlogcolumns()
172 self._columns = templatekw.getlogcolumns()
170
173
171 def flush(self, ctx):
174 def flush(self, ctx):
172 rev = ctx.rev()
175 rev = ctx.rev()
173 if rev in self.header:
176 if rev in self.header:
174 h = self.header[rev]
177 h = self.header[rev]
175 if h != self.lastheader:
178 if h != self.lastheader:
176 self.lastheader = h
179 self.lastheader = h
177 self.ui.write(h)
180 self.ui.write(h)
178 del self.header[rev]
181 del self.header[rev]
179 if rev in self.hunk:
182 if rev in self.hunk:
180 self.ui.write(self.hunk[rev])
183 self.ui.write(self.hunk[rev])
181 del self.hunk[rev]
184 del self.hunk[rev]
182
185
183 def close(self):
186 def close(self):
184 if self.footer:
187 if self.footer:
185 self.ui.write(self.footer)
188 self.ui.write(self.footer)
186
189
187 def show(self, ctx, copies=None, **props):
190 def show(self, ctx, copies=None, **props):
188 props = pycompat.byteskwargs(props)
191 props = pycompat.byteskwargs(props)
189 if self.buffered:
192 if self.buffered:
190 self.ui.pushbuffer(labeled=True)
193 self.ui.pushbuffer(labeled=True)
191 self._show(ctx, copies, props)
194 self._show(ctx, copies, props)
192 self.hunk[ctx.rev()] = self.ui.popbuffer()
195 self.hunk[ctx.rev()] = self.ui.popbuffer()
193 else:
196 else:
194 self._show(ctx, copies, props)
197 self._show(ctx, copies, props)
195
198
196 def _show(self, ctx, copies, props):
199 def _show(self, ctx, copies, props):
197 '''show a single changeset or file revision'''
200 '''show a single changeset or file revision'''
198 changenode = ctx.node()
201 changenode = ctx.node()
199 graphwidth = props.get('graphwidth', 0)
202 graphwidth = props.get('graphwidth', 0)
200
203
201 if self.ui.quiet:
204 if self.ui.quiet:
202 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
205 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
203 label='log.node')
206 label='log.node')
204 return
207 return
205
208
206 columns = self._columns
209 columns = self._columns
207 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
210 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
208 label=changesetlabels(ctx))
211 label=changesetlabels(ctx))
209
212
210 # branches are shown first before any other names due to backwards
213 # branches are shown first before any other names due to backwards
211 # compatibility
214 # compatibility
212 branch = ctx.branch()
215 branch = ctx.branch()
213 # don't show the default branch name
216 # don't show the default branch name
214 if branch != 'default':
217 if branch != 'default':
215 self.ui.write(columns['branch'] % branch, label='log.branch')
218 self.ui.write(columns['branch'] % branch, label='log.branch')
216
219
217 for nsname, ns in self.repo.names.iteritems():
220 for nsname, ns in self.repo.names.iteritems():
218 # branches has special logic already handled above, so here we just
221 # branches has special logic already handled above, so here we just
219 # skip it
222 # skip it
220 if nsname == 'branches':
223 if nsname == 'branches':
221 continue
224 continue
222 # we will use the templatename as the color name since those two
225 # we will use the templatename as the color name since those two
223 # should be the same
226 # should be the same
224 for name in ns.names(self.repo, changenode):
227 for name in ns.names(self.repo, changenode):
225 self.ui.write(ns.logfmt % name,
228 self.ui.write(ns.logfmt % name,
226 label='log.%s' % ns.colorname)
229 label='log.%s' % ns.colorname)
227 if self.ui.debugflag:
230 if self.ui.debugflag:
228 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
231 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
229 for pctx in scmutil.meaningfulparents(self.repo, ctx):
232 for pctx in scmutil.meaningfulparents(self.repo, ctx):
230 label = 'log.parent changeset.%s' % pctx.phasestr()
233 label = 'log.parent changeset.%s' % pctx.phasestr()
231 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
234 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
232 label=label)
235 label=label)
233
236
234 if self.ui.debugflag:
237 if self.ui.debugflag:
235 mnode = ctx.manifestnode()
238 mnode = ctx.manifestnode()
236 if mnode is None:
239 if mnode is None:
237 mnode = wdirid
240 mnode = wdirid
238 mrev = wdirrev
241 mrev = wdirrev
239 else:
242 else:
240 mrev = self.repo.manifestlog.rev(mnode)
243 mrev = self.repo.manifestlog.rev(mnode)
241 self.ui.write(columns['manifest']
244 self.ui.write(columns['manifest']
242 % scmutil.formatrevnode(self.ui, mrev, mnode),
245 % scmutil.formatrevnode(self.ui, mrev, mnode),
243 label='ui.debug log.manifest')
246 label='ui.debug log.manifest')
244 self.ui.write(columns['user'] % ctx.user(), label='log.user')
247 self.ui.write(columns['user'] % ctx.user(), label='log.user')
245 self.ui.write(columns['date'] % dateutil.datestr(ctx.date()),
248 self.ui.write(columns['date'] % dateutil.datestr(ctx.date()),
246 label='log.date')
249 label='log.date')
247
250
248 if ctx.isunstable():
251 if ctx.isunstable():
249 instabilities = ctx.instabilities()
252 instabilities = ctx.instabilities()
250 self.ui.write(columns['instability'] % ', '.join(instabilities),
253 self.ui.write(columns['instability'] % ', '.join(instabilities),
251 label='log.instability')
254 label='log.instability')
252
255
253 elif ctx.obsolete():
256 elif ctx.obsolete():
254 self._showobsfate(ctx)
257 self._showobsfate(ctx)
255
258
256 self._exthook(ctx)
259 self._exthook(ctx)
257
260
258 if self.ui.debugflag:
261 if self.ui.debugflag:
259 files = ctx.p1().status(ctx)[:3]
262 files = ctx.p1().status(ctx)[:3]
260 for key, value in zip(['files', 'files+', 'files-'], files):
263 for key, value in zip(['files', 'files+', 'files-'], files):
261 if value:
264 if value:
262 self.ui.write(columns[key] % " ".join(value),
265 self.ui.write(columns[key] % " ".join(value),
263 label='ui.debug log.files')
266 label='ui.debug log.files')
264 elif ctx.files() and self.ui.verbose:
267 elif ctx.files() and self.ui.verbose:
265 self.ui.write(columns['files'] % " ".join(ctx.files()),
268 self.ui.write(columns['files'] % " ".join(ctx.files()),
266 label='ui.note log.files')
269 label='ui.note log.files')
267 if copies and self.ui.verbose:
270 if copies and self.ui.verbose:
268 copies = ['%s (%s)' % c for c in copies]
271 copies = ['%s (%s)' % c for c in copies]
269 self.ui.write(columns['copies'] % ' '.join(copies),
272 self.ui.write(columns['copies'] % ' '.join(copies),
270 label='ui.note log.copies')
273 label='ui.note log.copies')
271
274
272 extra = ctx.extra()
275 extra = ctx.extra()
273 if extra and self.ui.debugflag:
276 if extra and self.ui.debugflag:
274 for key, value in sorted(extra.items()):
277 for key, value in sorted(extra.items()):
275 self.ui.write(columns['extra']
278 self.ui.write(columns['extra']
276 % (key, stringutil.escapestr(value)),
279 % (key, stringutil.escapestr(value)),
277 label='ui.debug log.extra')
280 label='ui.debug log.extra')
278
281
279 description = ctx.description().strip()
282 description = ctx.description().strip()
280 if description:
283 if description:
281 if self.ui.verbose:
284 if self.ui.verbose:
282 self.ui.write(_("description:\n"),
285 self.ui.write(_("description:\n"),
283 label='ui.note log.description')
286 label='ui.note log.description')
284 self.ui.write(description,
287 self.ui.write(description,
285 label='ui.note log.description')
288 label='ui.note log.description')
286 self.ui.write("\n\n")
289 self.ui.write("\n\n")
287 else:
290 else:
288 self.ui.write(columns['summary'] % description.splitlines()[0],
291 self.ui.write(columns['summary'] % description.splitlines()[0],
289 label='log.summary')
292 label='log.summary')
290 self.ui.write("\n")
293 self.ui.write("\n")
291
294
292 self._showpatch(ctx, graphwidth)
295 self._showpatch(ctx, graphwidth)
293
296
294 def _showobsfate(self, ctx):
297 def _showobsfate(self, ctx):
295 # TODO: do not depend on templater
298 # TODO: do not depend on templater
296 tres = formatter.templateresources(self.repo.ui, self.repo)
299 tres = formatter.templateresources(self.repo.ui, self.repo)
297 t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}',
300 t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}',
298 defaults=templatekw.keywords,
301 defaults=templatekw.keywords,
299 resources=tres)
302 resources=tres)
300 obsfate = t.renderdefault({'ctx': ctx}).splitlines()
303 obsfate = t.renderdefault({'ctx': ctx}).splitlines()
301
304
302 if obsfate:
305 if obsfate:
303 for obsfateline in obsfate:
306 for obsfateline in obsfate:
304 self.ui.write(self._columns['obsolete'] % obsfateline,
307 self.ui.write(self._columns['obsolete'] % obsfateline,
305 label='log.obsfate')
308 label='log.obsfate')
306
309
307 def _exthook(self, ctx):
310 def _exthook(self, ctx):
308 '''empty method used by extension as a hook point
311 '''empty method used by extension as a hook point
309 '''
312 '''
310
313
311 def _showpatch(self, ctx, graphwidth=0):
314 def _showpatch(self, ctx, graphwidth=0):
312 if self._includestat:
315 if self._includestat:
313 self._differ.showdiff(self.ui, ctx, self._diffopts,
316 self._differ.showdiff(self.ui, ctx, self._diffopts,
314 graphwidth, stat=True)
317 graphwidth, stat=True)
315 if self._includestat and self._includediff:
318 if self._includestat and self._includediff:
316 self.ui.write("\n")
319 self.ui.write("\n")
317 if self._includediff:
320 if self._includediff:
318 self._differ.showdiff(self.ui, ctx, self._diffopts,
321 self._differ.showdiff(self.ui, ctx, self._diffopts,
319 graphwidth, stat=False)
322 graphwidth, stat=False)
320 if self._includestat or self._includediff:
323 if self._includestat or self._includediff:
321 self.ui.write("\n")
324 self.ui.write("\n")
322
325
323 class changesetformatter(changesetprinter):
326 class changesetformatter(changesetprinter):
324 """Format changeset information by generic formatter"""
327 """Format changeset information by generic formatter"""
325
328
326 def __init__(self, ui, repo, fm, differ=None, diffopts=None,
329 def __init__(self, ui, repo, fm, differ=None, diffopts=None,
327 buffered=False):
330 buffered=False):
328 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
331 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
329 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
332 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
330 self._fm = fm
333 self._fm = fm
331
334
332 def close(self):
335 def close(self):
333 self._fm.end()
336 self._fm.end()
334
337
335 def _show(self, ctx, copies, props):
338 def _show(self, ctx, copies, props):
336 '''show a single changeset or file revision'''
339 '''show a single changeset or file revision'''
337 fm = self._fm
340 fm = self._fm
338 fm.startitem()
341 fm.startitem()
339 fm.context(ctx=ctx)
342 fm.context(ctx=ctx)
340 fm.data(rev=scmutil.intrev(ctx),
343 fm.data(rev=scmutil.intrev(ctx),
341 node=fm.hexfunc(scmutil.binnode(ctx)))
344 node=fm.hexfunc(scmutil.binnode(ctx)))
342
345
343 if self.ui.quiet:
346 if self.ui.quiet:
344 return
347 return
345
348
346 fm.data(branch=ctx.branch(),
349 fm.data(branch=ctx.branch(),
347 phase=ctx.phasestr(),
350 phase=ctx.phasestr(),
348 user=ctx.user(),
351 user=ctx.user(),
349 date=fm.formatdate(ctx.date()),
352 date=fm.formatdate(ctx.date()),
350 desc=ctx.description(),
353 desc=ctx.description(),
351 bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'),
354 bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'),
352 tags=fm.formatlist(ctx.tags(), name='tag'),
355 tags=fm.formatlist(ctx.tags(), name='tag'),
353 parents=fm.formatlist([fm.hexfunc(c.node())
356 parents=fm.formatlist([fm.hexfunc(c.node())
354 for c in ctx.parents()], name='node'))
357 for c in ctx.parents()], name='node'))
355
358
356 if self.ui.debugflag:
359 if self.ui.debugflag:
357 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid),
360 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid),
358 extra=fm.formatdict(ctx.extra()))
361 extra=fm.formatdict(ctx.extra()))
359
362
360 files = ctx.p1().status(ctx)
363 files = ctx.p1().status(ctx)
361 fm.data(modified=fm.formatlist(files[0], name='file'),
364 fm.data(modified=fm.formatlist(files[0], name='file'),
362 added=fm.formatlist(files[1], name='file'),
365 added=fm.formatlist(files[1], name='file'),
363 removed=fm.formatlist(files[2], name='file'))
366 removed=fm.formatlist(files[2], name='file'))
364
367
365 elif self.ui.verbose:
368 elif self.ui.verbose:
366 fm.data(files=fm.formatlist(ctx.files(), name='file'))
369 fm.data(files=fm.formatlist(ctx.files(), name='file'))
367 if copies:
370 if copies:
368 fm.data(copies=fm.formatdict(copies,
371 fm.data(copies=fm.formatdict(copies,
369 key='name', value='source'))
372 key='name', value='source'))
370
373
371 if self._includestat:
374 if self._includestat:
372 self.ui.pushbuffer()
375 self.ui.pushbuffer()
373 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
376 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
374 fm.data(diffstat=self.ui.popbuffer())
377 fm.data(diffstat=self.ui.popbuffer())
375 if self._includediff:
378 if self._includediff:
376 self.ui.pushbuffer()
379 self.ui.pushbuffer()
377 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
380 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
378 fm.data(diff=self.ui.popbuffer())
381 fm.data(diff=self.ui.popbuffer())
379
382
380 class changesettemplater(changesetprinter):
383 class changesettemplater(changesetprinter):
381 '''format changeset information.
384 '''format changeset information.
382
385
383 Note: there are a variety of convenience functions to build a
386 Note: there are a variety of convenience functions to build a
384 changesettemplater for common cases. See functions such as:
387 changesettemplater for common cases. See functions such as:
385 maketemplater, changesetdisplayer, buildcommittemplate, or other
388 maketemplater, changesetdisplayer, buildcommittemplate, or other
386 functions that use changesest_templater.
389 functions that use changesest_templater.
387 '''
390 '''
388
391
389 # Arguments before "buffered" used to be positional. Consider not
392 # Arguments before "buffered" used to be positional. Consider not
390 # adding/removing arguments before "buffered" to not break callers.
393 # adding/removing arguments before "buffered" to not break callers.
391 def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
394 def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
392 buffered=False):
395 buffered=False):
393 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
396 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
394 # tres is shared with _graphnodeformatter()
397 # tres is shared with _graphnodeformatter()
395 self._tresources = tres = formatter.templateresources(ui, repo)
398 self._tresources = tres = formatter.templateresources(ui, repo)
396 self.t = formatter.loadtemplater(ui, tmplspec,
399 self.t = formatter.loadtemplater(ui, tmplspec,
397 defaults=templatekw.keywords,
400 defaults=templatekw.keywords,
398 resources=tres,
401 resources=tres,
399 cache=templatekw.defaulttempl)
402 cache=templatekw.defaulttempl)
400 self._counter = itertools.count()
403 self._counter = itertools.count()
401
404
402 self._tref = tmplspec.ref
405 self._tref = tmplspec.ref
403 self._parts = {'header': '', 'footer': '',
406 self._parts = {'header': '', 'footer': '',
404 tmplspec.ref: tmplspec.ref,
407 tmplspec.ref: tmplspec.ref,
405 'docheader': '', 'docfooter': '',
408 'docheader': '', 'docfooter': '',
406 'separator': ''}
409 'separator': ''}
407 if tmplspec.mapfile:
410 if tmplspec.mapfile:
408 # find correct templates for current mode, for backward
411 # find correct templates for current mode, for backward
409 # compatibility with 'log -v/-q/--debug' using a mapfile
412 # compatibility with 'log -v/-q/--debug' using a mapfile
410 tmplmodes = [
413 tmplmodes = [
411 (True, ''),
414 (True, ''),
412 (self.ui.verbose, '_verbose'),
415 (self.ui.verbose, '_verbose'),
413 (self.ui.quiet, '_quiet'),
416 (self.ui.quiet, '_quiet'),
414 (self.ui.debugflag, '_debug'),
417 (self.ui.debugflag, '_debug'),
415 ]
418 ]
416 for mode, postfix in tmplmodes:
419 for mode, postfix in tmplmodes:
417 for t in self._parts:
420 for t in self._parts:
418 cur = t + postfix
421 cur = t + postfix
419 if mode and cur in self.t:
422 if mode and cur in self.t:
420 self._parts[t] = cur
423 self._parts[t] = cur
421 else:
424 else:
422 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
425 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
423 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
426 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
424 self._parts.update(m)
427 self._parts.update(m)
425
428
426 if self._parts['docheader']:
429 if self._parts['docheader']:
427 self.ui.write(self.t.render(self._parts['docheader'], {}))
430 self.ui.write(self.t.render(self._parts['docheader'], {}))
428
431
429 def close(self):
432 def close(self):
430 if self._parts['docfooter']:
433 if self._parts['docfooter']:
431 if not self.footer:
434 if not self.footer:
432 self.footer = ""
435 self.footer = ""
433 self.footer += self.t.render(self._parts['docfooter'], {})
436 self.footer += self.t.render(self._parts['docfooter'], {})
434 return super(changesettemplater, self).close()
437 return super(changesettemplater, self).close()
435
438
436 def _show(self, ctx, copies, props):
439 def _show(self, ctx, copies, props):
437 '''show a single changeset or file revision'''
440 '''show a single changeset or file revision'''
438 props = props.copy()
441 props = props.copy()
439 props['ctx'] = ctx
442 props['ctx'] = ctx
440 props['index'] = index = next(self._counter)
443 props['index'] = index = next(self._counter)
441 props['revcache'] = {'copies': copies}
444 props['revcache'] = {'copies': copies}
442 graphwidth = props.get('graphwidth', 0)
445 graphwidth = props.get('graphwidth', 0)
443
446
444 # write separator, which wouldn't work well with the header part below
447 # write separator, which wouldn't work well with the header part below
445 # since there's inherently a conflict between header (across items) and
448 # since there's inherently a conflict between header (across items) and
446 # separator (per item)
449 # separator (per item)
447 if self._parts['separator'] and index > 0:
450 if self._parts['separator'] and index > 0:
448 self.ui.write(self.t.render(self._parts['separator'], {}))
451 self.ui.write(self.t.render(self._parts['separator'], {}))
449
452
450 # write header
453 # write header
451 if self._parts['header']:
454 if self._parts['header']:
452 h = self.t.render(self._parts['header'], props)
455 h = self.t.render(self._parts['header'], props)
453 if self.buffered:
456 if self.buffered:
454 self.header[ctx.rev()] = h
457 self.header[ctx.rev()] = h
455 else:
458 else:
456 if self.lastheader != h:
459 if self.lastheader != h:
457 self.lastheader = h
460 self.lastheader = h
458 self.ui.write(h)
461 self.ui.write(h)
459
462
460 # write changeset metadata, then patch if requested
463 # write changeset metadata, then patch if requested
461 key = self._parts[self._tref]
464 key = self._parts[self._tref]
462 self.ui.write(self.t.render(key, props))
465 self.ui.write(self.t.render(key, props))
463 self._showpatch(ctx, graphwidth)
466 self._showpatch(ctx, graphwidth)
464
467
465 if self._parts['footer']:
468 if self._parts['footer']:
466 if not self.footer:
469 if not self.footer:
467 self.footer = self.t.render(self._parts['footer'], props)
470 self.footer = self.t.render(self._parts['footer'], props)
468
471
469 def templatespec(tmpl, mapfile):
472 def templatespec(tmpl, mapfile):
470 if pycompat.ispy3:
473 if pycompat.ispy3:
471 assert not isinstance(tmpl, str), 'tmpl must not be a str'
474 assert not isinstance(tmpl, str), 'tmpl must not be a str'
472 if mapfile:
475 if mapfile:
473 return formatter.templatespec('changeset', tmpl, mapfile)
476 return formatter.templatespec('changeset', tmpl, mapfile)
474 else:
477 else:
475 return formatter.templatespec('', tmpl, None)
478 return formatter.templatespec('', tmpl, None)
476
479
477 def _lookuptemplate(ui, tmpl, style):
480 def _lookuptemplate(ui, tmpl, style):
478 """Find the template matching the given template spec or style
481 """Find the template matching the given template spec or style
479
482
480 See formatter.lookuptemplate() for details.
483 See formatter.lookuptemplate() for details.
481 """
484 """
482
485
483 # ui settings
486 # ui settings
484 if not tmpl and not style: # template are stronger than style
487 if not tmpl and not style: # template are stronger than style
485 tmpl = ui.config('ui', 'logtemplate')
488 tmpl = ui.config('ui', 'logtemplate')
486 if tmpl:
489 if tmpl:
487 return templatespec(templater.unquotestring(tmpl), None)
490 return templatespec(templater.unquotestring(tmpl), None)
488 else:
491 else:
489 style = util.expandpath(ui.config('ui', 'style'))
492 style = util.expandpath(ui.config('ui', 'style'))
490
493
491 if not tmpl and style:
494 if not tmpl and style:
492 mapfile = style
495 mapfile = style
493 if not os.path.split(mapfile)[0]:
496 if not os.path.split(mapfile)[0]:
494 mapname = (templater.templatepath('map-cmdline.' + mapfile)
497 mapname = (templater.templatepath('map-cmdline.' + mapfile)
495 or templater.templatepath(mapfile))
498 or templater.templatepath(mapfile))
496 if mapname:
499 if mapname:
497 mapfile = mapname
500 mapfile = mapname
498 return templatespec(None, mapfile)
501 return templatespec(None, mapfile)
499
502
500 if not tmpl:
503 if not tmpl:
501 return templatespec(None, None)
504 return templatespec(None, None)
502
505
503 return formatter.lookuptemplate(ui, 'changeset', tmpl)
506 return formatter.lookuptemplate(ui, 'changeset', tmpl)
504
507
505 def maketemplater(ui, repo, tmpl, buffered=False):
508 def maketemplater(ui, repo, tmpl, buffered=False):
506 """Create a changesettemplater from a literal template 'tmpl'
509 """Create a changesettemplater from a literal template 'tmpl'
507 byte-string."""
510 byte-string."""
508 spec = templatespec(tmpl, None)
511 spec = templatespec(tmpl, None)
509 return changesettemplater(ui, repo, spec, buffered=buffered)
512 return changesettemplater(ui, repo, spec, buffered=buffered)
510
513
511 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
514 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
512 """show one changeset using template or regular display.
515 """show one changeset using template or regular display.
513
516
514 Display format will be the first non-empty hit of:
517 Display format will be the first non-empty hit of:
515 1. option 'template'
518 1. option 'template'
516 2. option 'style'
519 2. option 'style'
517 3. [ui] setting 'logtemplate'
520 3. [ui] setting 'logtemplate'
518 4. [ui] setting 'style'
521 4. [ui] setting 'style'
519 If all of these values are either the unset or the empty string,
522 If all of these values are either the unset or the empty string,
520 regular display via changesetprinter() is done.
523 regular display via changesetprinter() is done.
521 """
524 """
522 postargs = (differ, opts, buffered)
525 postargs = (differ, opts, buffered)
523 if opts.get('template') == 'json':
526 if opts.get('template') == 'json':
524 fm = ui.formatter('log', opts)
527 fm = ui.formatter('log', opts)
525 return changesetformatter(ui, repo, fm, *postargs)
528 return changesetformatter(ui, repo, fm, *postargs)
526
529
527 spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
530 spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
528
531
529 if not spec.ref and not spec.tmpl and not spec.mapfile:
532 if not spec.ref and not spec.tmpl and not spec.mapfile:
530 return changesetprinter(ui, repo, *postargs)
533 return changesetprinter(ui, repo, *postargs)
531
534
532 return changesettemplater(ui, repo, spec, *postargs)
535 return changesettemplater(ui, repo, spec, *postargs)
533
536
534 def _makematcher(repo, revs, pats, opts):
537 def _makematcher(repo, revs, pats, opts):
535 """Build matcher and expanded patterns from log options
538 """Build matcher and expanded patterns from log options
536
539
537 If --follow, revs are the revisions to follow from.
540 If --follow, revs are the revisions to follow from.
538
541
539 Returns (match, pats, slowpath) where
542 Returns (match, pats, slowpath) where
540 - match: a matcher built from the given pats and -I/-X opts
543 - match: a matcher built from the given pats and -I/-X opts
541 - pats: patterns used (globs are expanded on Windows)
544 - pats: patterns used (globs are expanded on Windows)
542 - slowpath: True if patterns aren't as simple as scanning filelogs
545 - slowpath: True if patterns aren't as simple as scanning filelogs
543 """
546 """
544 # pats/include/exclude are passed to match.match() directly in
547 # pats/include/exclude are passed to match.match() directly in
545 # _matchfiles() revset but walkchangerevs() builds its matcher with
548 # _matchfiles() revset but walkchangerevs() builds its matcher with
546 # scmutil.match(). The difference is input pats are globbed on
549 # scmutil.match(). The difference is input pats are globbed on
547 # platforms without shell expansion (windows).
550 # platforms without shell expansion (windows).
548 wctx = repo[None]
551 wctx = repo[None]
549 match, pats = scmutil.matchandpats(wctx, pats, opts)
552 match, pats = scmutil.matchandpats(wctx, pats, opts)
550 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
553 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
551 if not slowpath:
554 if not slowpath:
552 follow = opts.get('follow') or opts.get('follow_first')
555 follow = opts.get('follow') or opts.get('follow_first')
553 startctxs = []
556 startctxs = []
554 if follow and opts.get('rev'):
557 if follow and opts.get('rev'):
555 startctxs = [repo[r] for r in revs]
558 startctxs = [repo[r] for r in revs]
556 for f in match.files():
559 for f in match.files():
557 if follow and startctxs:
560 if follow and startctxs:
558 # No idea if the path was a directory at that revision, so
561 # No idea if the path was a directory at that revision, so
559 # take the slow path.
562 # take the slow path.
560 if any(f not in c for c in startctxs):
563 if any(f not in c for c in startctxs):
561 slowpath = True
564 slowpath = True
562 continue
565 continue
563 elif follow and f not in wctx:
566 elif follow and f not in wctx:
564 # If the file exists, it may be a directory, so let it
567 # If the file exists, it may be a directory, so let it
565 # take the slow path.
568 # take the slow path.
566 if os.path.exists(repo.wjoin(f)):
569 if os.path.exists(repo.wjoin(f)):
567 slowpath = True
570 slowpath = True
568 continue
571 continue
569 else:
572 else:
570 raise error.Abort(_('cannot follow file not in parent '
573 raise error.Abort(_('cannot follow file not in parent '
571 'revision: "%s"') % f)
574 'revision: "%s"') % f)
572 filelog = repo.file(f)
575 filelog = repo.file(f)
573 if not filelog:
576 if not filelog:
574 # A zero count may be a directory or deleted file, so
577 # A zero count may be a directory or deleted file, so
575 # try to find matching entries on the slow path.
578 # try to find matching entries on the slow path.
576 if follow:
579 if follow:
577 raise error.Abort(
580 raise error.Abort(
578 _('cannot follow nonexistent file: "%s"') % f)
581 _('cannot follow nonexistent file: "%s"') % f)
579 slowpath = True
582 slowpath = True
580
583
581 # We decided to fall back to the slowpath because at least one
584 # We decided to fall back to the slowpath because at least one
582 # of the paths was not a file. Check to see if at least one of them
585 # of the paths was not a file. Check to see if at least one of them
583 # existed in history - in that case, we'll continue down the
586 # existed in history - in that case, we'll continue down the
584 # slowpath; otherwise, we can turn off the slowpath
587 # slowpath; otherwise, we can turn off the slowpath
585 if slowpath:
588 if slowpath:
586 for path in match.files():
589 for path in match.files():
587 if path == '.' or path in repo.store:
590 if path == '.' or path in repo.store:
588 break
591 break
589 else:
592 else:
590 slowpath = False
593 slowpath = False
591
594
592 return match, pats, slowpath
595 return match, pats, slowpath
593
596
594 def _fileancestors(repo, revs, match, followfirst):
597 def _fileancestors(repo, revs, match, followfirst):
595 fctxs = []
598 fctxs = []
596 for r in revs:
599 for r in revs:
597 ctx = repo[r]
600 ctx = repo[r]
598 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
601 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
599
602
600 # When displaying a revision with --patch --follow FILE, we have
603 # When displaying a revision with --patch --follow FILE, we have
601 # to know which file of the revision must be diffed. With
604 # to know which file of the revision must be diffed. With
602 # --follow, we want the names of the ancestors of FILE in the
605 # --follow, we want the names of the ancestors of FILE in the
603 # revision, stored in "fcache". "fcache" is populated as a side effect
606 # revision, stored in "fcache". "fcache" is populated as a side effect
604 # of the graph traversal.
607 # of the graph traversal.
605 fcache = {}
608 fcache = {}
606 def filematcher(ctx):
609 def filematcher(ctx):
607 return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
610 return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
608
611
609 def revgen():
612 def revgen():
610 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
613 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
611 fcache[rev] = [c.path() for c in cs]
614 fcache[rev] = [c.path() for c in cs]
612 yield rev
615 yield rev
613 return smartset.generatorset(revgen(), iterasc=False), filematcher
616 return smartset.generatorset(revgen(), iterasc=False), filematcher
614
617
615 def _makenofollowfilematcher(repo, pats, opts):
618 def _makenofollowfilematcher(repo, pats, opts):
616 '''hook for extensions to override the filematcher for non-follow cases'''
619 '''hook for extensions to override the filematcher for non-follow cases'''
617 return None
620 return None
618
621
619 _opt2logrevset = {
622 _opt2logrevset = {
620 'no_merges': ('not merge()', None),
623 'no_merges': ('not merge()', None),
621 'only_merges': ('merge()', None),
624 'only_merges': ('merge()', None),
622 '_matchfiles': (None, '_matchfiles(%ps)'),
625 '_matchfiles': (None, '_matchfiles(%ps)'),
623 'date': ('date(%s)', None),
626 'date': ('date(%s)', None),
624 'branch': ('branch(%s)', '%lr'),
627 'branch': ('branch(%s)', '%lr'),
625 '_patslog': ('filelog(%s)', '%lr'),
628 '_patslog': ('filelog(%s)', '%lr'),
626 'keyword': ('keyword(%s)', '%lr'),
629 'keyword': ('keyword(%s)', '%lr'),
627 'prune': ('ancestors(%s)', 'not %lr'),
630 'prune': ('ancestors(%s)', 'not %lr'),
628 'user': ('user(%s)', '%lr'),
631 'user': ('user(%s)', '%lr'),
629 }
632 }
630
633
631 def _makerevset(repo, match, pats, slowpath, opts):
634 def _makerevset(repo, match, pats, slowpath, opts):
632 """Return a revset string built from log options and file patterns"""
635 """Return a revset string built from log options and file patterns"""
633 opts = dict(opts)
636 opts = dict(opts)
634 # follow or not follow?
637 # follow or not follow?
635 follow = opts.get('follow') or opts.get('follow_first')
638 follow = opts.get('follow') or opts.get('follow_first')
636
639
637 # branch and only_branch are really aliases and must be handled at
640 # branch and only_branch are really aliases and must be handled at
638 # the same time
641 # the same time
639 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
642 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
640 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
643 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
641
644
642 if slowpath:
645 if slowpath:
643 # See walkchangerevs() slow path.
646 # See walkchangerevs() slow path.
644 #
647 #
645 # pats/include/exclude cannot be represented as separate
648 # pats/include/exclude cannot be represented as separate
646 # revset expressions as their filtering logic applies at file
649 # revset expressions as their filtering logic applies at file
647 # level. For instance "-I a -X b" matches a revision touching
650 # level. For instance "-I a -X b" matches a revision touching
648 # "a" and "b" while "file(a) and not file(b)" does
651 # "a" and "b" while "file(a) and not file(b)" does
649 # not. Besides, filesets are evaluated against the working
652 # not. Besides, filesets are evaluated against the working
650 # directory.
653 # directory.
651 matchargs = ['r:', 'd:relpath']
654 matchargs = ['r:', 'd:relpath']
652 for p in pats:
655 for p in pats:
653 matchargs.append('p:' + p)
656 matchargs.append('p:' + p)
654 for p in opts.get('include', []):
657 for p in opts.get('include', []):
655 matchargs.append('i:' + p)
658 matchargs.append('i:' + p)
656 for p in opts.get('exclude', []):
659 for p in opts.get('exclude', []):
657 matchargs.append('x:' + p)
660 matchargs.append('x:' + p)
658 opts['_matchfiles'] = matchargs
661 opts['_matchfiles'] = matchargs
659 elif not follow:
662 elif not follow:
660 opts['_patslog'] = list(pats)
663 opts['_patslog'] = list(pats)
661
664
662 expr = []
665 expr = []
663 for op, val in sorted(opts.iteritems()):
666 for op, val in sorted(opts.iteritems()):
664 if not val:
667 if not val:
665 continue
668 continue
666 if op not in _opt2logrevset:
669 if op not in _opt2logrevset:
667 continue
670 continue
668 revop, listop = _opt2logrevset[op]
671 revop, listop = _opt2logrevset[op]
669 if revop and '%' not in revop:
672 if revop and '%' not in revop:
670 expr.append(revop)
673 expr.append(revop)
671 elif not listop:
674 elif not listop:
672 expr.append(revsetlang.formatspec(revop, val))
675 expr.append(revsetlang.formatspec(revop, val))
673 else:
676 else:
674 if revop:
677 if revop:
675 val = [revsetlang.formatspec(revop, v) for v in val]
678 val = [revsetlang.formatspec(revop, v) for v in val]
676 expr.append(revsetlang.formatspec(listop, val))
679 expr.append(revsetlang.formatspec(listop, val))
677
680
678 if expr:
681 if expr:
679 expr = '(' + ' and '.join(expr) + ')'
682 expr = '(' + ' and '.join(expr) + ')'
680 else:
683 else:
681 expr = None
684 expr = None
682 return expr
685 return expr
683
686
684 def _initialrevs(repo, opts):
687 def _initialrevs(repo, opts):
685 """Return the initial set of revisions to be filtered or followed"""
688 """Return the initial set of revisions to be filtered or followed"""
686 follow = opts.get('follow') or opts.get('follow_first')
689 follow = opts.get('follow') or opts.get('follow_first')
687 if opts.get('rev'):
690 if opts.get('rev'):
688 revs = scmutil.revrange(repo, opts['rev'])
691 revs = scmutil.revrange(repo, opts['rev'])
689 elif follow and repo.dirstate.p1() == nullid:
692 elif follow and repo.dirstate.p1() == nullid:
690 revs = smartset.baseset()
693 revs = smartset.baseset()
691 elif follow:
694 elif follow:
692 revs = repo.revs('.')
695 revs = repo.revs('.')
693 else:
696 else:
694 revs = smartset.spanset(repo)
697 revs = smartset.spanset(repo)
695 revs.reverse()
698 revs.reverse()
696 return revs
699 return revs
697
700
698 def getrevs(repo, pats, opts):
701 def getrevs(repo, pats, opts):
699 """Return (revs, differ) where revs is a smartset
702 """Return (revs, differ) where revs is a smartset
700
703
701 differ is a changesetdiffer with pre-configured file matcher.
704 differ is a changesetdiffer with pre-configured file matcher.
702 """
705 """
703 follow = opts.get('follow') or opts.get('follow_first')
706 follow = opts.get('follow') or opts.get('follow_first')
704 followfirst = opts.get('follow_first')
707 followfirst = opts.get('follow_first')
705 limit = getlimit(opts)
708 limit = getlimit(opts)
706 revs = _initialrevs(repo, opts)
709 revs = _initialrevs(repo, opts)
707 if not revs:
710 if not revs:
708 return smartset.baseset(), None
711 return smartset.baseset(), None
709 match, pats, slowpath = _makematcher(repo, revs, pats, opts)
712 match, pats, slowpath = _makematcher(repo, revs, pats, opts)
710 filematcher = None
713 filematcher = None
711 if follow:
714 if follow:
712 if slowpath or match.always():
715 if slowpath or match.always():
713 revs = dagop.revancestors(repo, revs, followfirst=followfirst)
716 revs = dagop.revancestors(repo, revs, followfirst=followfirst)
714 else:
717 else:
715 revs, filematcher = _fileancestors(repo, revs, match, followfirst)
718 revs, filematcher = _fileancestors(repo, revs, match, followfirst)
716 revs.reverse()
719 revs.reverse()
717 if filematcher is None:
720 if filematcher is None:
718 filematcher = _makenofollowfilematcher(repo, pats, opts)
721 filematcher = _makenofollowfilematcher(repo, pats, opts)
719 if filematcher is None:
722 if filematcher is None:
720 def filematcher(ctx):
723 def filematcher(ctx):
721 return match
724 return match
722
725
723 expr = _makerevset(repo, match, pats, slowpath, opts)
726 expr = _makerevset(repo, match, pats, slowpath, opts)
724 if opts.get('graph') and opts.get('rev'):
727 if opts.get('graph') and opts.get('rev'):
725 # User-specified revs might be unsorted, but don't sort before
728 # User-specified revs might be unsorted, but don't sort before
726 # _makerevset because it might depend on the order of revs
729 # _makerevset because it might depend on the order of revs
727 if not (revs.isdescending() or revs.istopo()):
730 if not (revs.isdescending() or revs.istopo()):
728 revs.sort(reverse=True)
731 revs.sort(reverse=True)
729 if expr:
732 if expr:
730 matcher = revset.match(None, expr)
733 matcher = revset.match(None, expr)
731 revs = matcher(repo, revs)
734 revs = matcher(repo, revs)
732 if limit is not None:
735 if limit is not None:
733 revs = revs.slice(0, limit)
736 revs = revs.slice(0, limit)
734
737
735 differ = changesetdiffer()
738 differ = changesetdiffer()
736 differ._makefilematcher = filematcher
739 differ._makefilematcher = filematcher
737 return revs, differ
740 return revs, differ
738
741
739 def _parselinerangeopt(repo, opts):
742 def _parselinerangeopt(repo, opts):
740 """Parse --line-range log option and return a list of tuples (filename,
743 """Parse --line-range log option and return a list of tuples (filename,
741 (fromline, toline)).
744 (fromline, toline)).
742 """
745 """
743 linerangebyfname = []
746 linerangebyfname = []
744 for pat in opts.get('line_range', []):
747 for pat in opts.get('line_range', []):
745 try:
748 try:
746 pat, linerange = pat.rsplit(',', 1)
749 pat, linerange = pat.rsplit(',', 1)
747 except ValueError:
750 except ValueError:
748 raise error.Abort(_('malformatted line-range pattern %s') % pat)
751 raise error.Abort(_('malformatted line-range pattern %s') % pat)
749 try:
752 try:
750 fromline, toline = map(int, linerange.split(':'))
753 fromline, toline = map(int, linerange.split(':'))
751 except ValueError:
754 except ValueError:
752 raise error.Abort(_("invalid line range for %s") % pat)
755 raise error.Abort(_("invalid line range for %s") % pat)
753 msg = _("line range pattern '%s' must match exactly one file") % pat
756 msg = _("line range pattern '%s' must match exactly one file") % pat
754 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
757 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
755 linerangebyfname.append(
758 linerangebyfname.append(
756 (fname, util.processlinerange(fromline, toline)))
759 (fname, util.processlinerange(fromline, toline)))
757 return linerangebyfname
760 return linerangebyfname
758
761
759 def getlinerangerevs(repo, userrevs, opts):
762 def getlinerangerevs(repo, userrevs, opts):
760 """Return (revs, differ).
763 """Return (revs, differ).
761
764
762 "revs" are revisions obtained by processing "line-range" log options and
765 "revs" are revisions obtained by processing "line-range" log options and
763 walking block ancestors of each specified file/line-range.
766 walking block ancestors of each specified file/line-range.
764
767
765 "differ" is a changesetdiffer with pre-configured file matcher and hunks
768 "differ" is a changesetdiffer with pre-configured file matcher and hunks
766 filter.
769 filter.
767 """
770 """
768 wctx = repo[None]
771 wctx = repo[None]
769
772
770 # Two-levels map of "rev -> file ctx -> [line range]".
773 # Two-levels map of "rev -> file ctx -> [line range]".
771 linerangesbyrev = {}
774 linerangesbyrev = {}
772 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
775 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
773 if fname not in wctx:
776 if fname not in wctx:
774 raise error.Abort(_('cannot follow file not in parent '
777 raise error.Abort(_('cannot follow file not in parent '
775 'revision: "%s"') % fname)
778 'revision: "%s"') % fname)
776 fctx = wctx.filectx(fname)
779 fctx = wctx.filectx(fname)
777 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
780 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
778 rev = fctx.introrev()
781 rev = fctx.introrev()
779 if rev not in userrevs:
782 if rev not in userrevs:
780 continue
783 continue
781 linerangesbyrev.setdefault(
784 linerangesbyrev.setdefault(
782 rev, {}).setdefault(
785 rev, {}).setdefault(
783 fctx.path(), []).append(linerange)
786 fctx.path(), []).append(linerange)
784
787
785 def nofilterhunksfn(fctx, hunks):
788 def nofilterhunksfn(fctx, hunks):
786 return hunks
789 return hunks
787
790
788 def hunksfilter(ctx):
791 def hunksfilter(ctx):
789 fctxlineranges = linerangesbyrev.get(ctx.rev())
792 fctxlineranges = linerangesbyrev.get(ctx.rev())
790 if fctxlineranges is None:
793 if fctxlineranges is None:
791 return nofilterhunksfn
794 return nofilterhunksfn
792
795
793 def filterfn(fctx, hunks):
796 def filterfn(fctx, hunks):
794 lineranges = fctxlineranges.get(fctx.path())
797 lineranges = fctxlineranges.get(fctx.path())
795 if lineranges is not None:
798 if lineranges is not None:
796 for hr, lines in hunks:
799 for hr, lines in hunks:
797 if hr is None: # binary
800 if hr is None: # binary
798 yield hr, lines
801 yield hr, lines
799 continue
802 continue
800 if any(mdiff.hunkinrange(hr[2:], lr)
803 if any(mdiff.hunkinrange(hr[2:], lr)
801 for lr in lineranges):
804 for lr in lineranges):
802 yield hr, lines
805 yield hr, lines
803 else:
806 else:
804 for hunk in hunks:
807 for hunk in hunks:
805 yield hunk
808 yield hunk
806
809
807 return filterfn
810 return filterfn
808
811
809 def filematcher(ctx):
812 def filematcher(ctx):
810 files = list(linerangesbyrev.get(ctx.rev(), []))
813 files = list(linerangesbyrev.get(ctx.rev(), []))
811 return scmutil.matchfiles(repo, files)
814 return scmutil.matchfiles(repo, files)
812
815
813 revs = sorted(linerangesbyrev, reverse=True)
816 revs = sorted(linerangesbyrev, reverse=True)
814
817
815 differ = changesetdiffer()
818 differ = changesetdiffer()
816 differ._makefilematcher = filematcher
819 differ._makefilematcher = filematcher
817 differ._makehunksfilter = hunksfilter
820 differ._makehunksfilter = hunksfilter
818 return revs, differ
821 return revs, differ
819
822
820 def _graphnodeformatter(ui, displayer):
823 def _graphnodeformatter(ui, displayer):
821 spec = ui.config('ui', 'graphnodetemplate')
824 spec = ui.config('ui', 'graphnodetemplate')
822 if not spec:
825 if not spec:
823 return templatekw.getgraphnode # fast path for "{graphnode}"
826 return templatekw.getgraphnode # fast path for "{graphnode}"
824
827
825 spec = templater.unquotestring(spec)
828 spec = templater.unquotestring(spec)
826 if isinstance(displayer, changesettemplater):
829 if isinstance(displayer, changesettemplater):
827 # reuse cache of slow templates
830 # reuse cache of slow templates
828 tres = displayer._tresources
831 tres = displayer._tresources
829 else:
832 else:
830 tres = formatter.templateresources(ui)
833 tres = formatter.templateresources(ui)
831 templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
834 templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
832 resources=tres)
835 resources=tres)
833 def formatnode(repo, ctx):
836 def formatnode(repo, ctx):
834 props = {'ctx': ctx, 'repo': repo}
837 props = {'ctx': ctx, 'repo': repo}
835 return templ.renderdefault(props)
838 return templ.renderdefault(props)
836 return formatnode
839 return formatnode
837
840
838 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
841 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
839 props = props or {}
842 props = props or {}
840 formatnode = _graphnodeformatter(ui, displayer)
843 formatnode = _graphnodeformatter(ui, displayer)
841 state = graphmod.asciistate()
844 state = graphmod.asciistate()
842 styles = state['styles']
845 styles = state['styles']
843
846
844 # only set graph styling if HGPLAIN is not set.
847 # only set graph styling if HGPLAIN is not set.
845 if ui.plain('graph'):
848 if ui.plain('graph'):
846 # set all edge styles to |, the default pre-3.8 behaviour
849 # set all edge styles to |, the default pre-3.8 behaviour
847 styles.update(dict.fromkeys(styles, '|'))
850 styles.update(dict.fromkeys(styles, '|'))
848 else:
851 else:
849 edgetypes = {
852 edgetypes = {
850 'parent': graphmod.PARENT,
853 'parent': graphmod.PARENT,
851 'grandparent': graphmod.GRANDPARENT,
854 'grandparent': graphmod.GRANDPARENT,
852 'missing': graphmod.MISSINGPARENT
855 'missing': graphmod.MISSINGPARENT
853 }
856 }
854 for name, key in edgetypes.items():
857 for name, key in edgetypes.items():
855 # experimental config: experimental.graphstyle.*
858 # experimental config: experimental.graphstyle.*
856 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
859 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
857 styles[key])
860 styles[key])
858 if not styles[key]:
861 if not styles[key]:
859 styles[key] = None
862 styles[key] = None
860
863
861 # experimental config: experimental.graphshorten
864 # experimental config: experimental.graphshorten
862 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
865 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
863
866
864 for rev, type, ctx, parents in dag:
867 for rev, type, ctx, parents in dag:
865 char = formatnode(repo, ctx)
868 char = formatnode(repo, ctx)
866 copies = None
869 copies = None
867 if getrenamed and ctx.rev():
870 if getrenamed and ctx.rev():
868 copies = []
871 copies = []
869 for fn in ctx.files():
872 for fn in ctx.files():
870 rename = getrenamed(fn, ctx.rev())
873 rename = getrenamed(fn, ctx.rev())
871 if rename:
874 if rename:
872 copies.append((fn, rename))
875 copies.append((fn, rename))
873 edges = edgefn(type, char, state, rev, parents)
876 edges = edgefn(type, char, state, rev, parents)
874 firstedge = next(edges)
877 firstedge = next(edges)
875 width = firstedge[2]
878 width = firstedge[2]
876 displayer.show(ctx, copies=copies,
879 displayer.show(ctx, copies=copies,
877 graphwidth=width, **pycompat.strkwargs(props))
880 graphwidth=width, **pycompat.strkwargs(props))
878 lines = displayer.hunk.pop(rev).split('\n')
881 lines = displayer.hunk.pop(rev).split('\n')
879 if not lines[-1]:
882 if not lines[-1]:
880 del lines[-1]
883 del lines[-1]
881 displayer.flush(ctx)
884 displayer.flush(ctx)
882 for type, char, width, coldata in itertools.chain([firstedge], edges):
885 for type, char, width, coldata in itertools.chain([firstedge], edges):
883 graphmod.ascii(ui, state, type, char, lines, coldata)
886 graphmod.ascii(ui, state, type, char, lines, coldata)
884 lines = []
887 lines = []
885 displayer.close()
888 displayer.close()
886
889
887 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
890 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
888 revdag = graphmod.dagwalker(repo, revs)
891 revdag = graphmod.dagwalker(repo, revs)
889 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
892 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
890
893
891 def displayrevs(ui, repo, revs, displayer, getrenamed):
894 def displayrevs(ui, repo, revs, displayer, getrenamed):
892 for rev in revs:
895 for rev in revs:
893 ctx = repo[rev]
896 ctx = repo[rev]
894 copies = None
897 copies = None
895 if getrenamed is not None and rev:
898 if getrenamed is not None and rev:
896 copies = []
899 copies = []
897 for fn in ctx.files():
900 for fn in ctx.files():
898 rename = getrenamed(fn, rev)
901 rename = getrenamed(fn, rev)
899 if rename:
902 if rename:
900 copies.append((fn, rename))
903 copies.append((fn, rename))
901 displayer.show(ctx, copies=copies)
904 displayer.show(ctx, copies=copies)
902 displayer.flush(ctx)
905 displayer.flush(ctx)
903 displayer.close()
906 displayer.close()
904
907
905 def checkunsupportedgraphflags(pats, opts):
908 def checkunsupportedgraphflags(pats, opts):
906 for op in ["newest_first"]:
909 for op in ["newest_first"]:
907 if op in opts and opts[op]:
910 if op in opts and opts[op]:
908 raise error.Abort(_("-G/--graph option is incompatible with --%s")
911 raise error.Abort(_("-G/--graph option is incompatible with --%s")
909 % op.replace("_", "-"))
912 % op.replace("_", "-"))
910
913
911 def graphrevs(repo, nodes, opts):
914 def graphrevs(repo, nodes, opts):
912 limit = getlimit(opts)
915 limit = getlimit(opts)
913 nodes.reverse()
916 nodes.reverse()
914 if limit is not None:
917 if limit is not None:
915 nodes = nodes[:limit]
918 nodes = nodes[:limit]
916 return graphmod.nodes(repo, nodes)
919 return graphmod.nodes(repo, nodes)
@@ -1,2857 +1,2861 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 diffhelper,
30 diffhelper,
31 diffutil,
31 diffutil,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 from .utils import (
43 from .utils import (
44 dateutil,
44 dateutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60 def split(stream):
60 def split(stream):
61 '''return an iterator of individual patches from a stream'''
61 '''return an iterator of individual patches from a stream'''
62 def isheader(line, inheader):
62 def isheader(line, inheader):
63 if inheader and line.startswith((' ', '\t')):
63 if inheader and line.startswith((' ', '\t')):
64 # continuation
64 # continuation
65 return True
65 return True
66 if line.startswith((' ', '-', '+')):
66 if line.startswith((' ', '-', '+')):
67 # diff line - don't check for header pattern in there
67 # diff line - don't check for header pattern in there
68 return False
68 return False
69 l = line.split(': ', 1)
69 l = line.split(': ', 1)
70 return len(l) == 2 and ' ' not in l[0]
70 return len(l) == 2 and ' ' not in l[0]
71
71
72 def chunk(lines):
72 def chunk(lines):
73 return stringio(''.join(lines))
73 return stringio(''.join(lines))
74
74
75 def hgsplit(stream, cur):
75 def hgsplit(stream, cur):
76 inheader = True
76 inheader = True
77
77
78 for line in stream:
78 for line in stream:
79 if not line.strip():
79 if not line.strip():
80 inheader = False
80 inheader = False
81 if not inheader and line.startswith('# HG changeset patch'):
81 if not inheader and line.startswith('# HG changeset patch'):
82 yield chunk(cur)
82 yield chunk(cur)
83 cur = []
83 cur = []
84 inheader = True
84 inheader = True
85
85
86 cur.append(line)
86 cur.append(line)
87
87
88 if cur:
88 if cur:
89 yield chunk(cur)
89 yield chunk(cur)
90
90
91 def mboxsplit(stream, cur):
91 def mboxsplit(stream, cur):
92 for line in stream:
92 for line in stream:
93 if line.startswith('From '):
93 if line.startswith('From '):
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96 cur = []
96 cur = []
97
97
98 cur.append(line)
98 cur.append(line)
99
99
100 if cur:
100 if cur:
101 for c in split(chunk(cur[1:])):
101 for c in split(chunk(cur[1:])):
102 yield c
102 yield c
103
103
104 def mimesplit(stream, cur):
104 def mimesplit(stream, cur):
105 def msgfp(m):
105 def msgfp(m):
106 fp = stringio()
106 fp = stringio()
107 g = email.Generator.Generator(fp, mangle_from_=False)
107 g = email.Generator.Generator(fp, mangle_from_=False)
108 g.flatten(m)
108 g.flatten(m)
109 fp.seek(0)
109 fp.seek(0)
110 return fp
110 return fp
111
111
112 for line in stream:
112 for line in stream:
113 cur.append(line)
113 cur.append(line)
114 c = chunk(cur)
114 c = chunk(cur)
115
115
116 m = mail.parse(c)
116 m = mail.parse(c)
117 if not m.is_multipart():
117 if not m.is_multipart():
118 yield msgfp(m)
118 yield msgfp(m)
119 else:
119 else:
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 for part in m.walk():
121 for part in m.walk():
122 ct = part.get_content_type()
122 ct = part.get_content_type()
123 if ct not in ok_types:
123 if ct not in ok_types:
124 continue
124 continue
125 yield msgfp(part)
125 yield msgfp(part)
126
126
127 def headersplit(stream, cur):
127 def headersplit(stream, cur):
128 inheader = False
128 inheader = False
129
129
130 for line in stream:
130 for line in stream:
131 if not inheader and isheader(line, inheader):
131 if not inheader and isheader(line, inheader):
132 yield chunk(cur)
132 yield chunk(cur)
133 cur = []
133 cur = []
134 inheader = True
134 inheader = True
135 if inheader and not isheader(line, inheader):
135 if inheader and not isheader(line, inheader):
136 inheader = False
136 inheader = False
137
137
138 cur.append(line)
138 cur.append(line)
139
139
140 if cur:
140 if cur:
141 yield chunk(cur)
141 yield chunk(cur)
142
142
143 def remainder(cur):
143 def remainder(cur):
144 yield chunk(cur)
144 yield chunk(cur)
145
145
146 class fiter(object):
146 class fiter(object):
147 def __init__(self, fp):
147 def __init__(self, fp):
148 self.fp = fp
148 self.fp = fp
149
149
150 def __iter__(self):
150 def __iter__(self):
151 return self
151 return self
152
152
153 def next(self):
153 def next(self):
154 l = self.fp.readline()
154 l = self.fp.readline()
155 if not l:
155 if not l:
156 raise StopIteration
156 raise StopIteration
157 return l
157 return l
158
158
159 __next__ = next
159 __next__ = next
160
160
161 inheader = False
161 inheader = False
162 cur = []
162 cur = []
163
163
164 mimeheaders = ['content-type']
164 mimeheaders = ['content-type']
165
165
166 if not util.safehasattr(stream, 'next'):
166 if not util.safehasattr(stream, 'next'):
167 # http responses, for example, have readline but not next
167 # http responses, for example, have readline but not next
168 stream = fiter(stream)
168 stream = fiter(stream)
169
169
170 for line in stream:
170 for line in stream:
171 cur.append(line)
171 cur.append(line)
172 if line.startswith('# HG changeset patch'):
172 if line.startswith('# HG changeset patch'):
173 return hgsplit(stream, cur)
173 return hgsplit(stream, cur)
174 elif line.startswith('From '):
174 elif line.startswith('From '):
175 return mboxsplit(stream, cur)
175 return mboxsplit(stream, cur)
176 elif isheader(line, inheader):
176 elif isheader(line, inheader):
177 inheader = True
177 inheader = True
178 if line.split(':', 1)[0].lower() in mimeheaders:
178 if line.split(':', 1)[0].lower() in mimeheaders:
179 # let email parser handle this
179 # let email parser handle this
180 return mimesplit(stream, cur)
180 return mimesplit(stream, cur)
181 elif line.startswith('--- ') and inheader:
181 elif line.startswith('--- ') and inheader:
182 # No evil headers seen by diff start, split by hand
182 # No evil headers seen by diff start, split by hand
183 return headersplit(stream, cur)
183 return headersplit(stream, cur)
184 # Not enough info, keep reading
184 # Not enough info, keep reading
185
185
186 # if we are here, we have a very plain patch
186 # if we are here, we have a very plain patch
187 return remainder(cur)
187 return remainder(cur)
188
188
189 ## Some facility for extensible patch parsing:
189 ## Some facility for extensible patch parsing:
190 # list of pairs ("header to match", "data key")
190 # list of pairs ("header to match", "data key")
191 patchheadermap = [('Date', 'date'),
191 patchheadermap = [('Date', 'date'),
192 ('Branch', 'branch'),
192 ('Branch', 'branch'),
193 ('Node ID', 'nodeid'),
193 ('Node ID', 'nodeid'),
194 ]
194 ]
195
195
196 @contextlib.contextmanager
196 @contextlib.contextmanager
197 def extract(ui, fileobj):
197 def extract(ui, fileobj):
198 '''extract patch from data read from fileobj.
198 '''extract patch from data read from fileobj.
199
199
200 patch can be a normal patch or contained in an email message.
200 patch can be a normal patch or contained in an email message.
201
201
202 return a dictionary. Standard keys are:
202 return a dictionary. Standard keys are:
203 - filename,
203 - filename,
204 - message,
204 - message,
205 - user,
205 - user,
206 - date,
206 - date,
207 - branch,
207 - branch,
208 - node,
208 - node,
209 - p1,
209 - p1,
210 - p2.
210 - p2.
211 Any item can be missing from the dictionary. If filename is missing,
211 Any item can be missing from the dictionary. If filename is missing,
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
213
213
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, r'wb')
215 tmpfp = os.fdopen(fd, r'wb')
216 try:
216 try:
217 yield _extract(ui, fileobj, tmpname, tmpfp)
217 yield _extract(ui, fileobj, tmpname, tmpfp)
218 finally:
218 finally:
219 tmpfp.close()
219 tmpfp.close()
220 os.unlink(tmpname)
220 os.unlink(tmpname)
221
221
222 def _extract(ui, fileobj, tmpname, tmpfp):
222 def _extract(ui, fileobj, tmpname, tmpfp):
223
223
224 # attempt to detect the start of a patch
224 # attempt to detect the start of a patch
225 # (this heuristic is borrowed from quilt)
225 # (this heuristic is borrowed from quilt)
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
229 br'\*\*\*[ \t].*?^---[ \t])',
229 br'\*\*\*[ \t].*?^---[ \t])',
230 re.MULTILINE | re.DOTALL)
230 re.MULTILINE | re.DOTALL)
231
231
232 data = {}
232 data = {}
233
233
234 msg = mail.parse(fileobj)
234 msg = mail.parse(fileobj)
235
235
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 if not subject and not data['user']:
238 if not subject and not data['user']:
239 # Not an email, restore parsed headers if any
239 # Not an email, restore parsed headers if any
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 for h in msg.items()) + '\n'
241 for h in msg.items()) + '\n'
242
242
243 # should try to parse msg['Date']
243 # should try to parse msg['Date']
244 parents = []
244 parents = []
245
245
246 if subject:
246 if subject:
247 if subject.startswith('[PATCH'):
247 if subject.startswith('[PATCH'):
248 pend = subject.find(']')
248 pend = subject.find(']')
249 if pend >= 0:
249 if pend >= 0:
250 subject = subject[pend + 1:].lstrip()
250 subject = subject[pend + 1:].lstrip()
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 ui.debug('Subject: %s\n' % subject)
252 ui.debug('Subject: %s\n' % subject)
253 if data['user']:
253 if data['user']:
254 ui.debug('From: %s\n' % data['user'])
254 ui.debug('From: %s\n' % data['user'])
255 diffs_seen = 0
255 diffs_seen = 0
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 message = ''
257 message = ''
258 for part in msg.walk():
258 for part in msg.walk():
259 content_type = pycompat.bytestr(part.get_content_type())
259 content_type = pycompat.bytestr(part.get_content_type())
260 ui.debug('Content-Type: %s\n' % content_type)
260 ui.debug('Content-Type: %s\n' % content_type)
261 if content_type not in ok_types:
261 if content_type not in ok_types:
262 continue
262 continue
263 payload = part.get_payload(decode=True)
263 payload = part.get_payload(decode=True)
264 m = diffre.search(payload)
264 m = diffre.search(payload)
265 if m:
265 if m:
266 hgpatch = False
266 hgpatch = False
267 hgpatchheader = False
267 hgpatchheader = False
268 ignoretext = False
268 ignoretext = False
269
269
270 ui.debug('found patch at byte %d\n' % m.start(0))
270 ui.debug('found patch at byte %d\n' % m.start(0))
271 diffs_seen += 1
271 diffs_seen += 1
272 cfp = stringio()
272 cfp = stringio()
273 for line in payload[:m.start(0)].splitlines():
273 for line in payload[:m.start(0)].splitlines():
274 if line.startswith('# HG changeset patch') and not hgpatch:
274 if line.startswith('# HG changeset patch') and not hgpatch:
275 ui.debug('patch generated by hg export\n')
275 ui.debug('patch generated by hg export\n')
276 hgpatch = True
276 hgpatch = True
277 hgpatchheader = True
277 hgpatchheader = True
278 # drop earlier commit message content
278 # drop earlier commit message content
279 cfp.seek(0)
279 cfp.seek(0)
280 cfp.truncate()
280 cfp.truncate()
281 subject = None
281 subject = None
282 elif hgpatchheader:
282 elif hgpatchheader:
283 if line.startswith('# User '):
283 if line.startswith('# User '):
284 data['user'] = line[7:]
284 data['user'] = line[7:]
285 ui.debug('From: %s\n' % data['user'])
285 ui.debug('From: %s\n' % data['user'])
286 elif line.startswith("# Parent "):
286 elif line.startswith("# Parent "):
287 parents.append(line[9:].lstrip())
287 parents.append(line[9:].lstrip())
288 elif line.startswith("# "):
288 elif line.startswith("# "):
289 for header, key in patchheadermap:
289 for header, key in patchheadermap:
290 prefix = '# %s ' % header
290 prefix = '# %s ' % header
291 if line.startswith(prefix):
291 if line.startswith(prefix):
292 data[key] = line[len(prefix):]
292 data[key] = line[len(prefix):]
293 else:
293 else:
294 hgpatchheader = False
294 hgpatchheader = False
295 elif line == '---':
295 elif line == '---':
296 ignoretext = True
296 ignoretext = True
297 if not hgpatchheader and not ignoretext:
297 if not hgpatchheader and not ignoretext:
298 cfp.write(line)
298 cfp.write(line)
299 cfp.write('\n')
299 cfp.write('\n')
300 message = cfp.getvalue()
300 message = cfp.getvalue()
301 if tmpfp:
301 if tmpfp:
302 tmpfp.write(payload)
302 tmpfp.write(payload)
303 if not payload.endswith('\n'):
303 if not payload.endswith('\n'):
304 tmpfp.write('\n')
304 tmpfp.write('\n')
305 elif not diffs_seen and message and content_type == 'text/plain':
305 elif not diffs_seen and message and content_type == 'text/plain':
306 message += '\n' + payload
306 message += '\n' + payload
307
307
308 if subject and not message.startswith(subject):
308 if subject and not message.startswith(subject):
309 message = '%s\n%s' % (subject, message)
309 message = '%s\n%s' % (subject, message)
310 data['message'] = message
310 data['message'] = message
311 tmpfp.close()
311 tmpfp.close()
312 if parents:
312 if parents:
313 data['p1'] = parents.pop(0)
313 data['p1'] = parents.pop(0)
314 if parents:
314 if parents:
315 data['p2'] = parents.pop(0)
315 data['p2'] = parents.pop(0)
316
316
317 if diffs_seen:
317 if diffs_seen:
318 data['filename'] = tmpname
318 data['filename'] = tmpname
319
319
320 return data
320 return data
321
321
322 class patchmeta(object):
322 class patchmeta(object):
323 """Patched file metadata
323 """Patched file metadata
324
324
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 'islink' is True if the file is a symlink and 'isexec' is True if
329 'islink' is True if the file is a symlink and 'isexec' is True if
330 the file is executable. Otherwise, 'mode' is None.
330 the file is executable. Otherwise, 'mode' is None.
331 """
331 """
332 def __init__(self, path):
332 def __init__(self, path):
333 self.path = path
333 self.path = path
334 self.oldpath = None
334 self.oldpath = None
335 self.mode = None
335 self.mode = None
336 self.op = 'MODIFY'
336 self.op = 'MODIFY'
337 self.binary = False
337 self.binary = False
338
338
339 def setmode(self, mode):
339 def setmode(self, mode):
340 islink = mode & 0o20000
340 islink = mode & 0o20000
341 isexec = mode & 0o100
341 isexec = mode & 0o100
342 self.mode = (islink, isexec)
342 self.mode = (islink, isexec)
343
343
344 def copy(self):
344 def copy(self):
345 other = patchmeta(self.path)
345 other = patchmeta(self.path)
346 other.oldpath = self.oldpath
346 other.oldpath = self.oldpath
347 other.mode = self.mode
347 other.mode = self.mode
348 other.op = self.op
348 other.op = self.op
349 other.binary = self.binary
349 other.binary = self.binary
350 return other
350 return other
351
351
352 def _ispatchinga(self, afile):
352 def _ispatchinga(self, afile):
353 if afile == '/dev/null':
353 if afile == '/dev/null':
354 return self.op == 'ADD'
354 return self.op == 'ADD'
355 return afile == 'a/' + (self.oldpath or self.path)
355 return afile == 'a/' + (self.oldpath or self.path)
356
356
357 def _ispatchingb(self, bfile):
357 def _ispatchingb(self, bfile):
358 if bfile == '/dev/null':
358 if bfile == '/dev/null':
359 return self.op == 'DELETE'
359 return self.op == 'DELETE'
360 return bfile == 'b/' + self.path
360 return bfile == 'b/' + self.path
361
361
362 def ispatching(self, afile, bfile):
362 def ispatching(self, afile, bfile):
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364
364
365 def __repr__(self):
365 def __repr__(self):
366 return "<patchmeta %s %r>" % (self.op, self.path)
366 return "<patchmeta %s %r>" % (self.op, self.path)
367
367
368 def readgitpatch(lr):
368 def readgitpatch(lr):
369 """extract git-style metadata about patches from <patchname>"""
369 """extract git-style metadata about patches from <patchname>"""
370
370
371 # Filter patch for git information
371 # Filter patch for git information
372 gp = None
372 gp = None
373 gitpatches = []
373 gitpatches = []
374 for line in lr:
374 for line in lr:
375 line = line.rstrip(' \r\n')
375 line = line.rstrip(' \r\n')
376 if line.startswith('diff --git a/'):
376 if line.startswith('diff --git a/'):
377 m = gitre.match(line)
377 m = gitre.match(line)
378 if m:
378 if m:
379 if gp:
379 if gp:
380 gitpatches.append(gp)
380 gitpatches.append(gp)
381 dst = m.group(2)
381 dst = m.group(2)
382 gp = patchmeta(dst)
382 gp = patchmeta(dst)
383 elif gp:
383 elif gp:
384 if line.startswith('--- '):
384 if line.startswith('--- '):
385 gitpatches.append(gp)
385 gitpatches.append(gp)
386 gp = None
386 gp = None
387 continue
387 continue
388 if line.startswith('rename from '):
388 if line.startswith('rename from '):
389 gp.op = 'RENAME'
389 gp.op = 'RENAME'
390 gp.oldpath = line[12:]
390 gp.oldpath = line[12:]
391 elif line.startswith('rename to '):
391 elif line.startswith('rename to '):
392 gp.path = line[10:]
392 gp.path = line[10:]
393 elif line.startswith('copy from '):
393 elif line.startswith('copy from '):
394 gp.op = 'COPY'
394 gp.op = 'COPY'
395 gp.oldpath = line[10:]
395 gp.oldpath = line[10:]
396 elif line.startswith('copy to '):
396 elif line.startswith('copy to '):
397 gp.path = line[8:]
397 gp.path = line[8:]
398 elif line.startswith('deleted file'):
398 elif line.startswith('deleted file'):
399 gp.op = 'DELETE'
399 gp.op = 'DELETE'
400 elif line.startswith('new file mode '):
400 elif line.startswith('new file mode '):
401 gp.op = 'ADD'
401 gp.op = 'ADD'
402 gp.setmode(int(line[-6:], 8))
402 gp.setmode(int(line[-6:], 8))
403 elif line.startswith('new mode '):
403 elif line.startswith('new mode '):
404 gp.setmode(int(line[-6:], 8))
404 gp.setmode(int(line[-6:], 8))
405 elif line.startswith('GIT binary patch'):
405 elif line.startswith('GIT binary patch'):
406 gp.binary = True
406 gp.binary = True
407 if gp:
407 if gp:
408 gitpatches.append(gp)
408 gitpatches.append(gp)
409
409
410 return gitpatches
410 return gitpatches
411
411
412 class linereader(object):
412 class linereader(object):
413 # simple class to allow pushing lines back into the input stream
413 # simple class to allow pushing lines back into the input stream
414 def __init__(self, fp):
414 def __init__(self, fp):
415 self.fp = fp
415 self.fp = fp
416 self.buf = []
416 self.buf = []
417
417
418 def push(self, line):
418 def push(self, line):
419 if line is not None:
419 if line is not None:
420 self.buf.append(line)
420 self.buf.append(line)
421
421
422 def readline(self):
422 def readline(self):
423 if self.buf:
423 if self.buf:
424 l = self.buf[0]
424 l = self.buf[0]
425 del self.buf[0]
425 del self.buf[0]
426 return l
426 return l
427 return self.fp.readline()
427 return self.fp.readline()
428
428
429 def __iter__(self):
429 def __iter__(self):
430 return iter(self.readline, '')
430 return iter(self.readline, '')
431
431
432 class abstractbackend(object):
432 class abstractbackend(object):
433 def __init__(self, ui):
433 def __init__(self, ui):
434 self.ui = ui
434 self.ui = ui
435
435
436 def getfile(self, fname):
436 def getfile(self, fname):
437 """Return target file data and flags as a (data, (islink,
437 """Return target file data and flags as a (data, (islink,
438 isexec)) tuple. Data is None if file is missing/deleted.
438 isexec)) tuple. Data is None if file is missing/deleted.
439 """
439 """
440 raise NotImplementedError
440 raise NotImplementedError
441
441
442 def setfile(self, fname, data, mode, copysource):
442 def setfile(self, fname, data, mode, copysource):
443 """Write data to target file fname and set its mode. mode is a
443 """Write data to target file fname and set its mode. mode is a
444 (islink, isexec) tuple. If data is None, the file content should
444 (islink, isexec) tuple. If data is None, the file content should
445 be left unchanged. If the file is modified after being copied,
445 be left unchanged. If the file is modified after being copied,
446 copysource is set to the original file name.
446 copysource is set to the original file name.
447 """
447 """
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 def unlink(self, fname):
450 def unlink(self, fname):
451 """Unlink target file."""
451 """Unlink target file."""
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 def writerej(self, fname, failed, total, lines):
454 def writerej(self, fname, failed, total, lines):
455 """Write rejected lines for fname. total is the number of hunks
455 """Write rejected lines for fname. total is the number of hunks
456 which failed to apply and total the total number of hunks for this
456 which failed to apply and total the total number of hunks for this
457 files.
457 files.
458 """
458 """
459
459
460 def exists(self, fname):
460 def exists(self, fname):
461 raise NotImplementedError
461 raise NotImplementedError
462
462
463 def close(self):
463 def close(self):
464 raise NotImplementedError
464 raise NotImplementedError
465
465
466 class fsbackend(abstractbackend):
466 class fsbackend(abstractbackend):
467 def __init__(self, ui, basedir):
467 def __init__(self, ui, basedir):
468 super(fsbackend, self).__init__(ui)
468 super(fsbackend, self).__init__(ui)
469 self.opener = vfsmod.vfs(basedir)
469 self.opener = vfsmod.vfs(basedir)
470
470
471 def getfile(self, fname):
471 def getfile(self, fname):
472 if self.opener.islink(fname):
472 if self.opener.islink(fname):
473 return (self.opener.readlink(fname), (True, False))
473 return (self.opener.readlink(fname), (True, False))
474
474
475 isexec = False
475 isexec = False
476 try:
476 try:
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 except OSError as e:
478 except OSError as e:
479 if e.errno != errno.ENOENT:
479 if e.errno != errno.ENOENT:
480 raise
480 raise
481 try:
481 try:
482 return (self.opener.read(fname), (False, isexec))
482 return (self.opener.read(fname), (False, isexec))
483 except IOError as e:
483 except IOError as e:
484 if e.errno != errno.ENOENT:
484 if e.errno != errno.ENOENT:
485 raise
485 raise
486 return None, None
486 return None, None
487
487
488 def setfile(self, fname, data, mode, copysource):
488 def setfile(self, fname, data, mode, copysource):
489 islink, isexec = mode
489 islink, isexec = mode
490 if data is None:
490 if data is None:
491 self.opener.setflags(fname, islink, isexec)
491 self.opener.setflags(fname, islink, isexec)
492 return
492 return
493 if islink:
493 if islink:
494 self.opener.symlink(data, fname)
494 self.opener.symlink(data, fname)
495 else:
495 else:
496 self.opener.write(fname, data)
496 self.opener.write(fname, data)
497 if isexec:
497 if isexec:
498 self.opener.setflags(fname, False, True)
498 self.opener.setflags(fname, False, True)
499
499
500 def unlink(self, fname):
500 def unlink(self, fname):
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503
503
504 def writerej(self, fname, failed, total, lines):
504 def writerej(self, fname, failed, total, lines):
505 fname = fname + ".rej"
505 fname = fname + ".rej"
506 self.ui.warn(
506 self.ui.warn(
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 (failed, total, fname))
508 (failed, total, fname))
509 fp = self.opener(fname, 'w')
509 fp = self.opener(fname, 'w')
510 fp.writelines(lines)
510 fp.writelines(lines)
511 fp.close()
511 fp.close()
512
512
513 def exists(self, fname):
513 def exists(self, fname):
514 return self.opener.lexists(fname)
514 return self.opener.lexists(fname)
515
515
516 class workingbackend(fsbackend):
516 class workingbackend(fsbackend):
517 def __init__(self, ui, repo, similarity):
517 def __init__(self, ui, repo, similarity):
518 super(workingbackend, self).__init__(ui, repo.root)
518 super(workingbackend, self).__init__(ui, repo.root)
519 self.repo = repo
519 self.repo = repo
520 self.similarity = similarity
520 self.similarity = similarity
521 self.removed = set()
521 self.removed = set()
522 self.changed = set()
522 self.changed = set()
523 self.copied = []
523 self.copied = []
524
524
525 def _checkknown(self, fname):
525 def _checkknown(self, fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528
528
529 def setfile(self, fname, data, mode, copysource):
529 def setfile(self, fname, data, mode, copysource):
530 self._checkknown(fname)
530 self._checkknown(fname)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 if copysource is not None:
532 if copysource is not None:
533 self.copied.append((copysource, fname))
533 self.copied.append((copysource, fname))
534 self.changed.add(fname)
534 self.changed.add(fname)
535
535
536 def unlink(self, fname):
536 def unlink(self, fname):
537 self._checkknown(fname)
537 self._checkknown(fname)
538 super(workingbackend, self).unlink(fname)
538 super(workingbackend, self).unlink(fname)
539 self.removed.add(fname)
539 self.removed.add(fname)
540 self.changed.add(fname)
540 self.changed.add(fname)
541
541
542 def close(self):
542 def close(self):
543 wctx = self.repo[None]
543 wctx = self.repo[None]
544 changed = set(self.changed)
544 changed = set(self.changed)
545 for src, dst in self.copied:
545 for src, dst in self.copied:
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 if self.removed:
547 if self.removed:
548 wctx.forget(sorted(self.removed))
548 wctx.forget(sorted(self.removed))
549 for f in self.removed:
549 for f in self.removed:
550 if f not in self.repo.dirstate:
550 if f not in self.repo.dirstate:
551 # File was deleted and no longer belongs to the
551 # File was deleted and no longer belongs to the
552 # dirstate, it was probably marked added then
552 # dirstate, it was probably marked added then
553 # deleted, and should not be considered by
553 # deleted, and should not be considered by
554 # marktouched().
554 # marktouched().
555 changed.discard(f)
555 changed.discard(f)
556 if changed:
556 if changed:
557 scmutil.marktouched(self.repo, changed, self.similarity)
557 scmutil.marktouched(self.repo, changed, self.similarity)
558 return sorted(self.changed)
558 return sorted(self.changed)
559
559
560 class filestore(object):
560 class filestore(object):
561 def __init__(self, maxsize=None):
561 def __init__(self, maxsize=None):
562 self.opener = None
562 self.opener = None
563 self.files = {}
563 self.files = {}
564 self.created = 0
564 self.created = 0
565 self.maxsize = maxsize
565 self.maxsize = maxsize
566 if self.maxsize is None:
566 if self.maxsize is None:
567 self.maxsize = 4*(2**20)
567 self.maxsize = 4*(2**20)
568 self.size = 0
568 self.size = 0
569 self.data = {}
569 self.data = {}
570
570
571 def setfile(self, fname, data, mode, copied=None):
571 def setfile(self, fname, data, mode, copied=None):
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 self.data[fname] = (data, mode, copied)
573 self.data[fname] = (data, mode, copied)
574 self.size += len(data)
574 self.size += len(data)
575 else:
575 else:
576 if self.opener is None:
576 if self.opener is None:
577 root = pycompat.mkdtemp(prefix='hg-patch-')
577 root = pycompat.mkdtemp(prefix='hg-patch-')
578 self.opener = vfsmod.vfs(root)
578 self.opener = vfsmod.vfs(root)
579 # Avoid filename issues with these simple names
579 # Avoid filename issues with these simple names
580 fn = '%d' % self.created
580 fn = '%d' % self.created
581 self.opener.write(fn, data)
581 self.opener.write(fn, data)
582 self.created += 1
582 self.created += 1
583 self.files[fname] = (fn, mode, copied)
583 self.files[fname] = (fn, mode, copied)
584
584
585 def getfile(self, fname):
585 def getfile(self, fname):
586 if fname in self.data:
586 if fname in self.data:
587 return self.data[fname]
587 return self.data[fname]
588 if not self.opener or fname not in self.files:
588 if not self.opener or fname not in self.files:
589 return None, None, None
589 return None, None, None
590 fn, mode, copied = self.files[fname]
590 fn, mode, copied = self.files[fname]
591 return self.opener.read(fn), mode, copied
591 return self.opener.read(fn), mode, copied
592
592
593 def close(self):
593 def close(self):
594 if self.opener:
594 if self.opener:
595 shutil.rmtree(self.opener.base)
595 shutil.rmtree(self.opener.base)
596
596
597 class repobackend(abstractbackend):
597 class repobackend(abstractbackend):
598 def __init__(self, ui, repo, ctx, store):
598 def __init__(self, ui, repo, ctx, store):
599 super(repobackend, self).__init__(ui)
599 super(repobackend, self).__init__(ui)
600 self.repo = repo
600 self.repo = repo
601 self.ctx = ctx
601 self.ctx = ctx
602 self.store = store
602 self.store = store
603 self.changed = set()
603 self.changed = set()
604 self.removed = set()
604 self.removed = set()
605 self.copied = {}
605 self.copied = {}
606
606
607 def _checkknown(self, fname):
607 def _checkknown(self, fname):
608 if fname not in self.ctx:
608 if fname not in self.ctx:
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610
610
611 def getfile(self, fname):
611 def getfile(self, fname):
612 try:
612 try:
613 fctx = self.ctx[fname]
613 fctx = self.ctx[fname]
614 except error.LookupError:
614 except error.LookupError:
615 return None, None
615 return None, None
616 flags = fctx.flags()
616 flags = fctx.flags()
617 return fctx.data(), ('l' in flags, 'x' in flags)
617 return fctx.data(), ('l' in flags, 'x' in flags)
618
618
619 def setfile(self, fname, data, mode, copysource):
619 def setfile(self, fname, data, mode, copysource):
620 if copysource:
620 if copysource:
621 self._checkknown(copysource)
621 self._checkknown(copysource)
622 if data is None:
622 if data is None:
623 data = self.ctx[fname].data()
623 data = self.ctx[fname].data()
624 self.store.setfile(fname, data, mode, copysource)
624 self.store.setfile(fname, data, mode, copysource)
625 self.changed.add(fname)
625 self.changed.add(fname)
626 if copysource:
626 if copysource:
627 self.copied[fname] = copysource
627 self.copied[fname] = copysource
628
628
629 def unlink(self, fname):
629 def unlink(self, fname):
630 self._checkknown(fname)
630 self._checkknown(fname)
631 self.removed.add(fname)
631 self.removed.add(fname)
632
632
633 def exists(self, fname):
633 def exists(self, fname):
634 return fname in self.ctx
634 return fname in self.ctx
635
635
636 def close(self):
636 def close(self):
637 return self.changed | self.removed
637 return self.changed | self.removed
638
638
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
640 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
641 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643
643
644 class patchfile(object):
644 class patchfile(object):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 self.fname = gp.path
646 self.fname = gp.path
647 self.eolmode = eolmode
647 self.eolmode = eolmode
648 self.eol = None
648 self.eol = None
649 self.backend = backend
649 self.backend = backend
650 self.ui = ui
650 self.ui = ui
651 self.lines = []
651 self.lines = []
652 self.exists = False
652 self.exists = False
653 self.missing = True
653 self.missing = True
654 self.mode = gp.mode
654 self.mode = gp.mode
655 self.copysource = gp.oldpath
655 self.copysource = gp.oldpath
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 self.remove = gp.op == 'DELETE'
657 self.remove = gp.op == 'DELETE'
658 if self.copysource is None:
658 if self.copysource is None:
659 data, mode = backend.getfile(self.fname)
659 data, mode = backend.getfile(self.fname)
660 else:
660 else:
661 data, mode = store.getfile(self.copysource)[:2]
661 data, mode = store.getfile(self.copysource)[:2]
662 if data is not None:
662 if data is not None:
663 self.exists = self.copysource is None or backend.exists(self.fname)
663 self.exists = self.copysource is None or backend.exists(self.fname)
664 self.missing = False
664 self.missing = False
665 if data:
665 if data:
666 self.lines = mdiff.splitnewlines(data)
666 self.lines = mdiff.splitnewlines(data)
667 if self.mode is None:
667 if self.mode is None:
668 self.mode = mode
668 self.mode = mode
669 if self.lines:
669 if self.lines:
670 # Normalize line endings
670 # Normalize line endings
671 if self.lines[0].endswith('\r\n'):
671 if self.lines[0].endswith('\r\n'):
672 self.eol = '\r\n'
672 self.eol = '\r\n'
673 elif self.lines[0].endswith('\n'):
673 elif self.lines[0].endswith('\n'):
674 self.eol = '\n'
674 self.eol = '\n'
675 if eolmode != 'strict':
675 if eolmode != 'strict':
676 nlines = []
676 nlines = []
677 for l in self.lines:
677 for l in self.lines:
678 if l.endswith('\r\n'):
678 if l.endswith('\r\n'):
679 l = l[:-2] + '\n'
679 l = l[:-2] + '\n'
680 nlines.append(l)
680 nlines.append(l)
681 self.lines = nlines
681 self.lines = nlines
682 else:
682 else:
683 if self.create:
683 if self.create:
684 self.missing = False
684 self.missing = False
685 if self.mode is None:
685 if self.mode is None:
686 self.mode = (False, False)
686 self.mode = (False, False)
687 if self.missing:
687 if self.missing:
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 "current directory)\n"))
690 "current directory)\n"))
691
691
692 self.hash = {}
692 self.hash = {}
693 self.dirty = 0
693 self.dirty = 0
694 self.offset = 0
694 self.offset = 0
695 self.skew = 0
695 self.skew = 0
696 self.rej = []
696 self.rej = []
697 self.fileprinted = False
697 self.fileprinted = False
698 self.printfile(False)
698 self.printfile(False)
699 self.hunks = 0
699 self.hunks = 0
700
700
701 def writelines(self, fname, lines, mode):
701 def writelines(self, fname, lines, mode):
702 if self.eolmode == 'auto':
702 if self.eolmode == 'auto':
703 eol = self.eol
703 eol = self.eol
704 elif self.eolmode == 'crlf':
704 elif self.eolmode == 'crlf':
705 eol = '\r\n'
705 eol = '\r\n'
706 else:
706 else:
707 eol = '\n'
707 eol = '\n'
708
708
709 if self.eolmode != 'strict' and eol and eol != '\n':
709 if self.eolmode != 'strict' and eol and eol != '\n':
710 rawlines = []
710 rawlines = []
711 for l in lines:
711 for l in lines:
712 if l and l.endswith('\n'):
712 if l and l.endswith('\n'):
713 l = l[:-1] + eol
713 l = l[:-1] + eol
714 rawlines.append(l)
714 rawlines.append(l)
715 lines = rawlines
715 lines = rawlines
716
716
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718
718
719 def printfile(self, warn):
719 def printfile(self, warn):
720 if self.fileprinted:
720 if self.fileprinted:
721 return
721 return
722 if warn or self.ui.verbose:
722 if warn or self.ui.verbose:
723 self.fileprinted = True
723 self.fileprinted = True
724 s = _("patching file %s\n") % self.fname
724 s = _("patching file %s\n") % self.fname
725 if warn:
725 if warn:
726 self.ui.warn(s)
726 self.ui.warn(s)
727 else:
727 else:
728 self.ui.note(s)
728 self.ui.note(s)
729
729
730
730
731 def findlines(self, l, linenum):
731 def findlines(self, l, linenum):
732 # looks through the hash and finds candidate lines. The
732 # looks through the hash and finds candidate lines. The
733 # result is a list of line numbers sorted based on distance
733 # result is a list of line numbers sorted based on distance
734 # from linenum
734 # from linenum
735
735
736 cand = self.hash.get(l, [])
736 cand = self.hash.get(l, [])
737 if len(cand) > 1:
737 if len(cand) > 1:
738 # resort our list of potentials forward then back.
738 # resort our list of potentials forward then back.
739 cand.sort(key=lambda x: abs(x - linenum))
739 cand.sort(key=lambda x: abs(x - linenum))
740 return cand
740 return cand
741
741
742 def write_rej(self):
742 def write_rej(self):
743 # our rejects are a little different from patch(1). This always
743 # our rejects are a little different from patch(1). This always
744 # creates rejects in the same form as the original patch. A file
744 # creates rejects in the same form as the original patch. A file
745 # header is inserted so that you can run the reject through patch again
745 # header is inserted so that you can run the reject through patch again
746 # without having to type the filename.
746 # without having to type the filename.
747 if not self.rej:
747 if not self.rej:
748 return
748 return
749 base = os.path.basename(self.fname)
749 base = os.path.basename(self.fname)
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 for x in self.rej:
751 for x in self.rej:
752 for l in x.hunk:
752 for l in x.hunk:
753 lines.append(l)
753 lines.append(l)
754 if l[-1:] != '\n':
754 if l[-1:] != '\n':
755 lines.append("\n\\ No newline at end of file\n")
755 lines.append("\n\\ No newline at end of file\n")
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757
757
758 def apply(self, h):
758 def apply(self, h):
759 if not h.complete():
759 if not h.complete():
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 h.lenb))
762 h.lenb))
763
763
764 self.hunks += 1
764 self.hunks += 1
765
765
766 if self.missing:
766 if self.missing:
767 self.rej.append(h)
767 self.rej.append(h)
768 return -1
768 return -1
769
769
770 if self.exists and self.create:
770 if self.exists and self.create:
771 if self.copysource:
771 if self.copysource:
772 self.ui.warn(_("cannot create %s: destination already "
772 self.ui.warn(_("cannot create %s: destination already "
773 "exists\n") % self.fname)
773 "exists\n") % self.fname)
774 else:
774 else:
775 self.ui.warn(_("file %s already exists\n") % self.fname)
775 self.ui.warn(_("file %s already exists\n") % self.fname)
776 self.rej.append(h)
776 self.rej.append(h)
777 return -1
777 return -1
778
778
779 if isinstance(h, binhunk):
779 if isinstance(h, binhunk):
780 if self.remove:
780 if self.remove:
781 self.backend.unlink(self.fname)
781 self.backend.unlink(self.fname)
782 else:
782 else:
783 l = h.new(self.lines)
783 l = h.new(self.lines)
784 self.lines[:] = l
784 self.lines[:] = l
785 self.offset += len(l)
785 self.offset += len(l)
786 self.dirty = True
786 self.dirty = True
787 return 0
787 return 0
788
788
789 horig = h
789 horig = h
790 if (self.eolmode in ('crlf', 'lf')
790 if (self.eolmode in ('crlf', 'lf')
791 or self.eolmode == 'auto' and self.eol):
791 or self.eolmode == 'auto' and self.eol):
792 # If new eols are going to be normalized, then normalize
792 # If new eols are going to be normalized, then normalize
793 # hunk data before patching. Otherwise, preserve input
793 # hunk data before patching. Otherwise, preserve input
794 # line-endings.
794 # line-endings.
795 h = h.getnormalized()
795 h = h.getnormalized()
796
796
797 # fast case first, no offsets, no fuzz
797 # fast case first, no offsets, no fuzz
798 old, oldstart, new, newstart = h.fuzzit(0, False)
798 old, oldstart, new, newstart = h.fuzzit(0, False)
799 oldstart += self.offset
799 oldstart += self.offset
800 orig_start = oldstart
800 orig_start = oldstart
801 # if there's skew we want to emit the "(offset %d lines)" even
801 # if there's skew we want to emit the "(offset %d lines)" even
802 # when the hunk cleanly applies at start + skew, so skip the
802 # when the hunk cleanly applies at start + skew, so skip the
803 # fast case code
803 # fast case code
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 if self.remove:
805 if self.remove:
806 self.backend.unlink(self.fname)
806 self.backend.unlink(self.fname)
807 else:
807 else:
808 self.lines[oldstart:oldstart + len(old)] = new
808 self.lines[oldstart:oldstart + len(old)] = new
809 self.offset += len(new) - len(old)
809 self.offset += len(new) - len(old)
810 self.dirty = True
810 self.dirty = True
811 return 0
811 return 0
812
812
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 self.hash = {}
814 self.hash = {}
815 for x, s in enumerate(self.lines):
815 for x, s in enumerate(self.lines):
816 self.hash.setdefault(s, []).append(x)
816 self.hash.setdefault(s, []).append(x)
817
817
818 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
818 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
819 for toponly in [True, False]:
819 for toponly in [True, False]:
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 oldstart = oldstart + self.offset + self.skew
821 oldstart = oldstart + self.offset + self.skew
822 oldstart = min(oldstart, len(self.lines))
822 oldstart = min(oldstart, len(self.lines))
823 if old:
823 if old:
824 cand = self.findlines(old[0][1:], oldstart)
824 cand = self.findlines(old[0][1:], oldstart)
825 else:
825 else:
826 # Only adding lines with no or fuzzed context, just
826 # Only adding lines with no or fuzzed context, just
827 # take the skew in account
827 # take the skew in account
828 cand = [oldstart]
828 cand = [oldstart]
829
829
830 for l in cand:
830 for l in cand:
831 if not old or diffhelper.testhunk(old, self.lines, l):
831 if not old or diffhelper.testhunk(old, self.lines, l):
832 self.lines[l : l + len(old)] = new
832 self.lines[l : l + len(old)] = new
833 self.offset += len(new) - len(old)
833 self.offset += len(new) - len(old)
834 self.skew = l - orig_start
834 self.skew = l - orig_start
835 self.dirty = True
835 self.dirty = True
836 offset = l - orig_start - fuzzlen
836 offset = l - orig_start - fuzzlen
837 if fuzzlen:
837 if fuzzlen:
838 msg = _("Hunk #%d succeeded at %d "
838 msg = _("Hunk #%d succeeded at %d "
839 "with fuzz %d "
839 "with fuzz %d "
840 "(offset %d lines).\n")
840 "(offset %d lines).\n")
841 self.printfile(True)
841 self.printfile(True)
842 self.ui.warn(msg %
842 self.ui.warn(msg %
843 (h.number, l + 1, fuzzlen, offset))
843 (h.number, l + 1, fuzzlen, offset))
844 else:
844 else:
845 msg = _("Hunk #%d succeeded at %d "
845 msg = _("Hunk #%d succeeded at %d "
846 "(offset %d lines).\n")
846 "(offset %d lines).\n")
847 self.ui.note(msg % (h.number, l + 1, offset))
847 self.ui.note(msg % (h.number, l + 1, offset))
848 return fuzzlen
848 return fuzzlen
849 self.printfile(True)
849 self.printfile(True)
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 self.rej.append(horig)
851 self.rej.append(horig)
852 return -1
852 return -1
853
853
854 def close(self):
854 def close(self):
855 if self.dirty:
855 if self.dirty:
856 self.writelines(self.fname, self.lines, self.mode)
856 self.writelines(self.fname, self.lines, self.mode)
857 self.write_rej()
857 self.write_rej()
858 return len(self.rej)
858 return len(self.rej)
859
859
860 class header(object):
860 class header(object):
861 """patch header
861 """patch header
862 """
862 """
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
865 allhunks_re = re.compile('(?:index|deleted file) ')
865 allhunks_re = re.compile('(?:index|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 newfile_re = re.compile('(?:new file)')
868 newfile_re = re.compile('(?:new file)')
869
869
870 def __init__(self, header):
870 def __init__(self, header):
871 self.header = header
871 self.header = header
872 self.hunks = []
872 self.hunks = []
873
873
874 def binary(self):
874 def binary(self):
875 return any(h.startswith('index ') for h in self.header)
875 return any(h.startswith('index ') for h in self.header)
876
876
877 def pretty(self, fp):
877 def pretty(self, fp):
878 for h in self.header:
878 for h in self.header:
879 if h.startswith('index '):
879 if h.startswith('index '):
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 break
881 break
882 if self.pretty_re.match(h):
882 if self.pretty_re.match(h):
883 fp.write(h)
883 fp.write(h)
884 if self.binary():
884 if self.binary():
885 fp.write(_('this is a binary file\n'))
885 fp.write(_('this is a binary file\n'))
886 break
886 break
887 if h.startswith('---'):
887 if h.startswith('---'):
888 fp.write(_('%d hunks, %d lines changed\n') %
888 fp.write(_('%d hunks, %d lines changed\n') %
889 (len(self.hunks),
889 (len(self.hunks),
890 sum([max(h.added, h.removed) for h in self.hunks])))
890 sum([max(h.added, h.removed) for h in self.hunks])))
891 break
891 break
892 fp.write(h)
892 fp.write(h)
893
893
894 def write(self, fp):
894 def write(self, fp):
895 fp.write(''.join(self.header))
895 fp.write(''.join(self.header))
896
896
897 def allhunks(self):
897 def allhunks(self):
898 return any(self.allhunks_re.match(h) for h in self.header)
898 return any(self.allhunks_re.match(h) for h in self.header)
899
899
900 def files(self):
900 def files(self):
901 match = self.diffgit_re.match(self.header[0])
901 match = self.diffgit_re.match(self.header[0])
902 if match:
902 if match:
903 fromfile, tofile = match.groups()
903 fromfile, tofile = match.groups()
904 if fromfile == tofile:
904 if fromfile == tofile:
905 return [fromfile]
905 return [fromfile]
906 return [fromfile, tofile]
906 return [fromfile, tofile]
907 else:
907 else:
908 return self.diff_re.match(self.header[0]).groups()
908 return self.diff_re.match(self.header[0]).groups()
909
909
910 def filename(self):
910 def filename(self):
911 return self.files()[-1]
911 return self.files()[-1]
912
912
913 def __repr__(self):
913 def __repr__(self):
914 return '<header %s>' % (' '.join(map(repr, self.files())))
914 return '<header %s>' % (' '.join(map(repr, self.files())))
915
915
916 def isnewfile(self):
916 def isnewfile(self):
917 return any(self.newfile_re.match(h) for h in self.header)
917 return any(self.newfile_re.match(h) for h in self.header)
918
918
919 def special(self):
919 def special(self):
920 # Special files are shown only at the header level and not at the hunk
920 # Special files are shown only at the header level and not at the hunk
921 # level for example a file that has been deleted is a special file.
921 # level for example a file that has been deleted is a special file.
922 # The user cannot change the content of the operation, in the case of
922 # The user cannot change the content of the operation, in the case of
923 # the deleted file he has to take the deletion or not take it, he
923 # the deleted file he has to take the deletion or not take it, he
924 # cannot take some of it.
924 # cannot take some of it.
925 # Newly added files are special if they are empty, they are not special
925 # Newly added files are special if they are empty, they are not special
926 # if they have some content as we want to be able to change it
926 # if they have some content as we want to be able to change it
927 nocontent = len(self.header) == 2
927 nocontent = len(self.header) == 2
928 emptynewfile = self.isnewfile() and nocontent
928 emptynewfile = self.isnewfile() and nocontent
929 return emptynewfile or \
929 return emptynewfile or \
930 any(self.special_re.match(h) for h in self.header)
930 any(self.special_re.match(h) for h in self.header)
931
931
932 class recordhunk(object):
932 class recordhunk(object):
933 """patch hunk
933 """patch hunk
934
934
935 XXX shouldn't we merge this with the other hunk class?
935 XXX shouldn't we merge this with the other hunk class?
936 """
936 """
937
937
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 maxcontext=None):
939 maxcontext=None):
940 def trimcontext(lines, reverse=False):
940 def trimcontext(lines, reverse=False):
941 if maxcontext is not None:
941 if maxcontext is not None:
942 delta = len(lines) - maxcontext
942 delta = len(lines) - maxcontext
943 if delta > 0:
943 if delta > 0:
944 if reverse:
944 if reverse:
945 return delta, lines[delta:]
945 return delta, lines[delta:]
946 else:
946 else:
947 return delta, lines[:maxcontext]
947 return delta, lines[:maxcontext]
948 return 0, lines
948 return 0, lines
949
949
950 self.header = header
950 self.header = header
951 trimedbefore, self.before = trimcontext(before, True)
951 trimedbefore, self.before = trimcontext(before, True)
952 self.fromline = fromline + trimedbefore
952 self.fromline = fromline + trimedbefore
953 self.toline = toline + trimedbefore
953 self.toline = toline + trimedbefore
954 _trimedafter, self.after = trimcontext(after, False)
954 _trimedafter, self.after = trimcontext(after, False)
955 self.proc = proc
955 self.proc = proc
956 self.hunk = hunk
956 self.hunk = hunk
957 self.added, self.removed = self.countchanges(self.hunk)
957 self.added, self.removed = self.countchanges(self.hunk)
958
958
959 def __eq__(self, v):
959 def __eq__(self, v):
960 if not isinstance(v, recordhunk):
960 if not isinstance(v, recordhunk):
961 return False
961 return False
962
962
963 return ((v.hunk == self.hunk) and
963 return ((v.hunk == self.hunk) and
964 (v.proc == self.proc) and
964 (v.proc == self.proc) and
965 (self.fromline == v.fromline) and
965 (self.fromline == v.fromline) and
966 (self.header.files() == v.header.files()))
966 (self.header.files() == v.header.files()))
967
967
968 def __hash__(self):
968 def __hash__(self):
969 return hash((tuple(self.hunk),
969 return hash((tuple(self.hunk),
970 tuple(self.header.files()),
970 tuple(self.header.files()),
971 self.fromline,
971 self.fromline,
972 self.proc))
972 self.proc))
973
973
974 def countchanges(self, hunk):
974 def countchanges(self, hunk):
975 """hunk -> (n+,n-)"""
975 """hunk -> (n+,n-)"""
976 add = len([h for h in hunk if h.startswith('+')])
976 add = len([h for h in hunk if h.startswith('+')])
977 rem = len([h for h in hunk if h.startswith('-')])
977 rem = len([h for h in hunk if h.startswith('-')])
978 return add, rem
978 return add, rem
979
979
980 def reversehunk(self):
980 def reversehunk(self):
981 """return another recordhunk which is the reverse of the hunk
981 """return another recordhunk which is the reverse of the hunk
982
982
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 that, swap fromline/toline and +/- signs while keep other things
984 that, swap fromline/toline and +/- signs while keep other things
985 unchanged.
985 unchanged.
986 """
986 """
987 m = {'+': '-', '-': '+', '\\': '\\'}
987 m = {'+': '-', '-': '+', '\\': '\\'}
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 self.before, hunk, self.after)
990 self.before, hunk, self.after)
991
991
992 def write(self, fp):
992 def write(self, fp):
993 delta = len(self.before) + len(self.after)
993 delta = len(self.before) + len(self.after)
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 delta -= 1
995 delta -= 1
996 fromlen = delta + self.removed
996 fromlen = delta + self.removed
997 tolen = delta + self.added
997 tolen = delta + self.added
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 (self.fromline, fromlen, self.toline, tolen,
999 (self.fromline, fromlen, self.toline, tolen,
1000 self.proc and (' ' + self.proc)))
1000 self.proc and (' ' + self.proc)))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1002
1002
1003 pretty = write
1003 pretty = write
1004
1004
1005 def filename(self):
1005 def filename(self):
1006 return self.header.filename()
1006 return self.header.filename()
1007
1007
1008 def __repr__(self):
1008 def __repr__(self):
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010
1010
1011 def getmessages():
1011 def getmessages():
1012 return {
1012 return {
1013 'multiple': {
1013 'multiple': {
1014 'apply': _("apply change %d/%d to '%s'?"),
1014 'apply': _("apply change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1017 },
1017 },
1018 'single': {
1018 'single': {
1019 'apply': _("apply this change to '%s'?"),
1019 'apply': _("apply this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1022 },
1022 },
1023 'help': {
1023 'help': {
1024 'apply': _('[Ynesfdaq?]'
1024 'apply': _('[Ynesfdaq?]'
1025 '$$ &Yes, apply this change'
1025 '$$ &Yes, apply this change'
1026 '$$ &No, skip this change'
1026 '$$ &No, skip this change'
1027 '$$ &Edit this change manually'
1027 '$$ &Edit this change manually'
1028 '$$ &Skip remaining changes to this file'
1028 '$$ &Skip remaining changes to this file'
1029 '$$ Apply remaining changes to this &file'
1029 '$$ Apply remaining changes to this &file'
1030 '$$ &Done, skip remaining changes and files'
1030 '$$ &Done, skip remaining changes and files'
1031 '$$ Apply &all changes to all remaining files'
1031 '$$ Apply &all changes to all remaining files'
1032 '$$ &Quit, applying no changes'
1032 '$$ &Quit, applying no changes'
1033 '$$ &? (display help)'),
1033 '$$ &? (display help)'),
1034 'discard': _('[Ynesfdaq?]'
1034 'discard': _('[Ynesfdaq?]'
1035 '$$ &Yes, discard this change'
1035 '$$ &Yes, discard this change'
1036 '$$ &No, skip this change'
1036 '$$ &No, skip this change'
1037 '$$ &Edit this change manually'
1037 '$$ &Edit this change manually'
1038 '$$ &Skip remaining changes to this file'
1038 '$$ &Skip remaining changes to this file'
1039 '$$ Discard remaining changes to this &file'
1039 '$$ Discard remaining changes to this &file'
1040 '$$ &Done, skip remaining changes and files'
1040 '$$ &Done, skip remaining changes and files'
1041 '$$ Discard &all changes to all remaining files'
1041 '$$ Discard &all changes to all remaining files'
1042 '$$ &Quit, discarding no changes'
1042 '$$ &Quit, discarding no changes'
1043 '$$ &? (display help)'),
1043 '$$ &? (display help)'),
1044 'record': _('[Ynesfdaq?]'
1044 'record': _('[Ynesfdaq?]'
1045 '$$ &Yes, record this change'
1045 '$$ &Yes, record this change'
1046 '$$ &No, skip this change'
1046 '$$ &No, skip this change'
1047 '$$ &Edit this change manually'
1047 '$$ &Edit this change manually'
1048 '$$ &Skip remaining changes to this file'
1048 '$$ &Skip remaining changes to this file'
1049 '$$ Record remaining changes to this &file'
1049 '$$ Record remaining changes to this &file'
1050 '$$ &Done, skip remaining changes and files'
1050 '$$ &Done, skip remaining changes and files'
1051 '$$ Record &all changes to all remaining files'
1051 '$$ Record &all changes to all remaining files'
1052 '$$ &Quit, recording no changes'
1052 '$$ &Quit, recording no changes'
1053 '$$ &? (display help)'),
1053 '$$ &? (display help)'),
1054 }
1054 }
1055 }
1055 }
1056
1056
1057 def filterpatch(ui, headers, operation=None):
1057 def filterpatch(ui, headers, operation=None):
1058 """Interactively filter patch chunks into applied-only chunks"""
1058 """Interactively filter patch chunks into applied-only chunks"""
1059 messages = getmessages()
1059 messages = getmessages()
1060
1060
1061 if operation is None:
1061 if operation is None:
1062 operation = 'record'
1062 operation = 'record'
1063
1063
1064 def prompt(skipfile, skipall, query, chunk):
1064 def prompt(skipfile, skipall, query, chunk):
1065 """prompt query, and process base inputs
1065 """prompt query, and process base inputs
1066
1066
1067 - y/n for the rest of file
1067 - y/n for the rest of file
1068 - y/n for the rest
1068 - y/n for the rest
1069 - ? (help)
1069 - ? (help)
1070 - q (quit)
1070 - q (quit)
1071
1071
1072 Return True/False and possibly updated skipfile and skipall.
1072 Return True/False and possibly updated skipfile and skipall.
1073 """
1073 """
1074 newpatches = None
1074 newpatches = None
1075 if skipall is not None:
1075 if skipall is not None:
1076 return skipall, skipfile, skipall, newpatches
1076 return skipall, skipfile, skipall, newpatches
1077 if skipfile is not None:
1077 if skipfile is not None:
1078 return skipfile, skipfile, skipall, newpatches
1078 return skipfile, skipfile, skipall, newpatches
1079 while True:
1079 while True:
1080 resps = messages['help'][operation]
1080 resps = messages['help'][operation]
1081 r = ui.promptchoice("%s %s" % (query, resps))
1081 r = ui.promptchoice("%s %s" % (query, resps))
1082 ui.write("\n")
1082 ui.write("\n")
1083 if r == 8: # ?
1083 if r == 8: # ?
1084 for c, t in ui.extractchoices(resps)[1]:
1084 for c, t in ui.extractchoices(resps)[1]:
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 continue
1086 continue
1087 elif r == 0: # yes
1087 elif r == 0: # yes
1088 ret = True
1088 ret = True
1089 elif r == 1: # no
1089 elif r == 1: # no
1090 ret = False
1090 ret = False
1091 elif r == 2: # Edit patch
1091 elif r == 2: # Edit patch
1092 if chunk is None:
1092 if chunk is None:
1093 ui.write(_('cannot edit patch for whole file'))
1093 ui.write(_('cannot edit patch for whole file'))
1094 ui.write("\n")
1094 ui.write("\n")
1095 continue
1095 continue
1096 if chunk.header.binary():
1096 if chunk.header.binary():
1097 ui.write(_('cannot edit patch for binary file'))
1097 ui.write(_('cannot edit patch for binary file'))
1098 ui.write("\n")
1098 ui.write("\n")
1099 continue
1099 continue
1100 # Patch comment based on the Git one (based on comment at end of
1100 # Patch comment based on the Git one (based on comment at end of
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1102 phelp = '---' + _("""
1102 phelp = '---' + _("""
1103 To remove '-' lines, make them ' ' lines (context).
1103 To remove '-' lines, make them ' ' lines (context).
1104 To remove '+' lines, delete them.
1104 To remove '+' lines, delete them.
1105 Lines starting with # will be removed from the patch.
1105 Lines starting with # will be removed from the patch.
1106
1106
1107 If the patch applies cleanly, the edited hunk will immediately be
1107 If the patch applies cleanly, the edited hunk will immediately be
1108 added to the record list. If it does not apply cleanly, a rejects
1108 added to the record list. If it does not apply cleanly, a rejects
1109 file will be generated: you can use that when you try again. If
1109 file will be generated: you can use that when you try again. If
1110 all lines of the hunk are removed, then the edit is aborted and
1110 all lines of the hunk are removed, then the edit is aborted and
1111 the hunk is left unchanged.
1111 the hunk is left unchanged.
1112 """)
1112 """)
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 suffix=".diff")
1114 suffix=".diff")
1115 ncpatchfp = None
1115 ncpatchfp = None
1116 try:
1116 try:
1117 # Write the initial patch
1117 # Write the initial patch
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 chunk.header.write(f)
1119 chunk.header.write(f)
1120 chunk.write(f)
1120 chunk.write(f)
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 f.close()
1122 f.close()
1123 # Start the editor and wait for it to complete
1123 # Start the editor and wait for it to complete
1124 editor = ui.geteditor()
1124 editor = ui.geteditor()
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 environ={'HGUSER': ui.username()},
1126 environ={'HGUSER': ui.username()},
1127 blockedtag='filterpatch')
1127 blockedtag='filterpatch')
1128 if ret != 0:
1128 if ret != 0:
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 continue
1130 continue
1131 # Remove comment lines
1131 # Remove comment lines
1132 patchfp = open(patchfn, r'rb')
1132 patchfp = open(patchfn, r'rb')
1133 ncpatchfp = stringio()
1133 ncpatchfp = stringio()
1134 for line in util.iterfile(patchfp):
1134 for line in util.iterfile(patchfp):
1135 line = util.fromnativeeol(line)
1135 line = util.fromnativeeol(line)
1136 if not line.startswith('#'):
1136 if not line.startswith('#'):
1137 ncpatchfp.write(line)
1137 ncpatchfp.write(line)
1138 patchfp.close()
1138 patchfp.close()
1139 ncpatchfp.seek(0)
1139 ncpatchfp.seek(0)
1140 newpatches = parsepatch(ncpatchfp)
1140 newpatches = parsepatch(ncpatchfp)
1141 finally:
1141 finally:
1142 os.unlink(patchfn)
1142 os.unlink(patchfn)
1143 del ncpatchfp
1143 del ncpatchfp
1144 # Signal that the chunk shouldn't be applied as-is, but
1144 # Signal that the chunk shouldn't be applied as-is, but
1145 # provide the new patch to be used instead.
1145 # provide the new patch to be used instead.
1146 ret = False
1146 ret = False
1147 elif r == 3: # Skip
1147 elif r == 3: # Skip
1148 ret = skipfile = False
1148 ret = skipfile = False
1149 elif r == 4: # file (Record remaining)
1149 elif r == 4: # file (Record remaining)
1150 ret = skipfile = True
1150 ret = skipfile = True
1151 elif r == 5: # done, skip remaining
1151 elif r == 5: # done, skip remaining
1152 ret = skipall = False
1152 ret = skipall = False
1153 elif r == 6: # all
1153 elif r == 6: # all
1154 ret = skipall = True
1154 ret = skipall = True
1155 elif r == 7: # quit
1155 elif r == 7: # quit
1156 raise error.Abort(_('user quit'))
1156 raise error.Abort(_('user quit'))
1157 return ret, skipfile, skipall, newpatches
1157 return ret, skipfile, skipall, newpatches
1158
1158
1159 seen = set()
1159 seen = set()
1160 applied = {} # 'filename' -> [] of chunks
1160 applied = {} # 'filename' -> [] of chunks
1161 skipfile, skipall = None, None
1161 skipfile, skipall = None, None
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 for h in headers:
1163 for h in headers:
1164 pos += len(h.hunks)
1164 pos += len(h.hunks)
1165 skipfile = None
1165 skipfile = None
1166 fixoffset = 0
1166 fixoffset = 0
1167 hdr = ''.join(h.header)
1167 hdr = ''.join(h.header)
1168 if hdr in seen:
1168 if hdr in seen:
1169 continue
1169 continue
1170 seen.add(hdr)
1170 seen.add(hdr)
1171 if skipall is None:
1171 if skipall is None:
1172 h.pretty(ui)
1172 h.pretty(ui)
1173 msg = (_('examine changes to %s?') %
1173 msg = (_('examine changes to %s?') %
1174 _(' and ').join("'%s'" % f for f in h.files()))
1174 _(' and ').join("'%s'" % f for f in h.files()))
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 if not r:
1176 if not r:
1177 continue
1177 continue
1178 applied[h.filename()] = [h]
1178 applied[h.filename()] = [h]
1179 if h.allhunks():
1179 if h.allhunks():
1180 applied[h.filename()] += h.hunks
1180 applied[h.filename()] += h.hunks
1181 continue
1181 continue
1182 for i, chunk in enumerate(h.hunks):
1182 for i, chunk in enumerate(h.hunks):
1183 if skipfile is None and skipall is None:
1183 if skipfile is None and skipall is None:
1184 chunk.pretty(ui)
1184 chunk.pretty(ui)
1185 if total == 1:
1185 if total == 1:
1186 msg = messages['single'][operation] % chunk.filename()
1186 msg = messages['single'][operation] % chunk.filename()
1187 else:
1187 else:
1188 idx = pos - len(h.hunks) + i
1188 idx = pos - len(h.hunks) + i
1189 msg = messages['multiple'][operation] % (idx, total,
1189 msg = messages['multiple'][operation] % (idx, total,
1190 chunk.filename())
1190 chunk.filename())
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 skipall, msg, chunk)
1192 skipall, msg, chunk)
1193 if r:
1193 if r:
1194 if fixoffset:
1194 if fixoffset:
1195 chunk = copy.copy(chunk)
1195 chunk = copy.copy(chunk)
1196 chunk.toline += fixoffset
1196 chunk.toline += fixoffset
1197 applied[chunk.filename()].append(chunk)
1197 applied[chunk.filename()].append(chunk)
1198 elif newpatches is not None:
1198 elif newpatches is not None:
1199 for newpatch in newpatches:
1199 for newpatch in newpatches:
1200 for newhunk in newpatch.hunks:
1200 for newhunk in newpatch.hunks:
1201 if fixoffset:
1201 if fixoffset:
1202 newhunk.toline += fixoffset
1202 newhunk.toline += fixoffset
1203 applied[newhunk.filename()].append(newhunk)
1203 applied[newhunk.filename()].append(newhunk)
1204 else:
1204 else:
1205 fixoffset += chunk.removed - chunk.added
1205 fixoffset += chunk.removed - chunk.added
1206 return (sum([h for h in applied.itervalues()
1206 return (sum([h for h in applied.itervalues()
1207 if h[0].special() or len(h) > 1], []), {})
1207 if h[0].special() or len(h) > 1], []), {})
1208 class hunk(object):
1208 class hunk(object):
1209 def __init__(self, desc, num, lr, context):
1209 def __init__(self, desc, num, lr, context):
1210 self.number = num
1210 self.number = num
1211 self.desc = desc
1211 self.desc = desc
1212 self.hunk = [desc]
1212 self.hunk = [desc]
1213 self.a = []
1213 self.a = []
1214 self.b = []
1214 self.b = []
1215 self.starta = self.lena = None
1215 self.starta = self.lena = None
1216 self.startb = self.lenb = None
1216 self.startb = self.lenb = None
1217 if lr is not None:
1217 if lr is not None:
1218 if context:
1218 if context:
1219 self.read_context_hunk(lr)
1219 self.read_context_hunk(lr)
1220 else:
1220 else:
1221 self.read_unified_hunk(lr)
1221 self.read_unified_hunk(lr)
1222
1222
1223 def getnormalized(self):
1223 def getnormalized(self):
1224 """Return a copy with line endings normalized to LF."""
1224 """Return a copy with line endings normalized to LF."""
1225
1225
1226 def normalize(lines):
1226 def normalize(lines):
1227 nlines = []
1227 nlines = []
1228 for line in lines:
1228 for line in lines:
1229 if line.endswith('\r\n'):
1229 if line.endswith('\r\n'):
1230 line = line[:-2] + '\n'
1230 line = line[:-2] + '\n'
1231 nlines.append(line)
1231 nlines.append(line)
1232 return nlines
1232 return nlines
1233
1233
1234 # Dummy object, it is rebuilt manually
1234 # Dummy object, it is rebuilt manually
1235 nh = hunk(self.desc, self.number, None, None)
1235 nh = hunk(self.desc, self.number, None, None)
1236 nh.number = self.number
1236 nh.number = self.number
1237 nh.desc = self.desc
1237 nh.desc = self.desc
1238 nh.hunk = self.hunk
1238 nh.hunk = self.hunk
1239 nh.a = normalize(self.a)
1239 nh.a = normalize(self.a)
1240 nh.b = normalize(self.b)
1240 nh.b = normalize(self.b)
1241 nh.starta = self.starta
1241 nh.starta = self.starta
1242 nh.startb = self.startb
1242 nh.startb = self.startb
1243 nh.lena = self.lena
1243 nh.lena = self.lena
1244 nh.lenb = self.lenb
1244 nh.lenb = self.lenb
1245 return nh
1245 return nh
1246
1246
1247 def read_unified_hunk(self, lr):
1247 def read_unified_hunk(self, lr):
1248 m = unidesc.match(self.desc)
1248 m = unidesc.match(self.desc)
1249 if not m:
1249 if not m:
1250 raise PatchError(_("bad hunk #%d") % self.number)
1250 raise PatchError(_("bad hunk #%d") % self.number)
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 if self.lena is None:
1252 if self.lena is None:
1253 self.lena = 1
1253 self.lena = 1
1254 else:
1254 else:
1255 self.lena = int(self.lena)
1255 self.lena = int(self.lena)
1256 if self.lenb is None:
1256 if self.lenb is None:
1257 self.lenb = 1
1257 self.lenb = 1
1258 else:
1258 else:
1259 self.lenb = int(self.lenb)
1259 self.lenb = int(self.lenb)
1260 self.starta = int(self.starta)
1260 self.starta = int(self.starta)
1261 self.startb = int(self.startb)
1261 self.startb = int(self.startb)
1262 try:
1262 try:
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 self.a, self.b)
1264 self.a, self.b)
1265 except error.ParseError as e:
1265 except error.ParseError as e:
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 # if we hit eof before finishing out the hunk, the last line will
1267 # if we hit eof before finishing out the hunk, the last line will
1268 # be zero length. Lets try to fix it up.
1268 # be zero length. Lets try to fix it up.
1269 while len(self.hunk[-1]) == 0:
1269 while len(self.hunk[-1]) == 0:
1270 del self.hunk[-1]
1270 del self.hunk[-1]
1271 del self.a[-1]
1271 del self.a[-1]
1272 del self.b[-1]
1272 del self.b[-1]
1273 self.lena -= 1
1273 self.lena -= 1
1274 self.lenb -= 1
1274 self.lenb -= 1
1275 self._fixnewline(lr)
1275 self._fixnewline(lr)
1276
1276
1277 def read_context_hunk(self, lr):
1277 def read_context_hunk(self, lr):
1278 self.desc = lr.readline()
1278 self.desc = lr.readline()
1279 m = contextdesc.match(self.desc)
1279 m = contextdesc.match(self.desc)
1280 if not m:
1280 if not m:
1281 raise PatchError(_("bad hunk #%d") % self.number)
1281 raise PatchError(_("bad hunk #%d") % self.number)
1282 self.starta, aend = m.groups()
1282 self.starta, aend = m.groups()
1283 self.starta = int(self.starta)
1283 self.starta = int(self.starta)
1284 if aend is None:
1284 if aend is None:
1285 aend = self.starta
1285 aend = self.starta
1286 self.lena = int(aend) - self.starta
1286 self.lena = int(aend) - self.starta
1287 if self.starta:
1287 if self.starta:
1288 self.lena += 1
1288 self.lena += 1
1289 for x in pycompat.xrange(self.lena):
1289 for x in pycompat.xrange(self.lena):
1290 l = lr.readline()
1290 l = lr.readline()
1291 if l.startswith('---'):
1291 if l.startswith('---'):
1292 # lines addition, old block is empty
1292 # lines addition, old block is empty
1293 lr.push(l)
1293 lr.push(l)
1294 break
1294 break
1295 s = l[2:]
1295 s = l[2:]
1296 if l.startswith('- ') or l.startswith('! '):
1296 if l.startswith('- ') or l.startswith('! '):
1297 u = '-' + s
1297 u = '-' + s
1298 elif l.startswith(' '):
1298 elif l.startswith(' '):
1299 u = ' ' + s
1299 u = ' ' + s
1300 else:
1300 else:
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1302 (self.number, x))
1302 (self.number, x))
1303 self.a.append(u)
1303 self.a.append(u)
1304 self.hunk.append(u)
1304 self.hunk.append(u)
1305
1305
1306 l = lr.readline()
1306 l = lr.readline()
1307 if l.startswith(br'\ '):
1307 if l.startswith(br'\ '):
1308 s = self.a[-1][:-1]
1308 s = self.a[-1][:-1]
1309 self.a[-1] = s
1309 self.a[-1] = s
1310 self.hunk[-1] = s
1310 self.hunk[-1] = s
1311 l = lr.readline()
1311 l = lr.readline()
1312 m = contextdesc.match(l)
1312 m = contextdesc.match(l)
1313 if not m:
1313 if not m:
1314 raise PatchError(_("bad hunk #%d") % self.number)
1314 raise PatchError(_("bad hunk #%d") % self.number)
1315 self.startb, bend = m.groups()
1315 self.startb, bend = m.groups()
1316 self.startb = int(self.startb)
1316 self.startb = int(self.startb)
1317 if bend is None:
1317 if bend is None:
1318 bend = self.startb
1318 bend = self.startb
1319 self.lenb = int(bend) - self.startb
1319 self.lenb = int(bend) - self.startb
1320 if self.startb:
1320 if self.startb:
1321 self.lenb += 1
1321 self.lenb += 1
1322 hunki = 1
1322 hunki = 1
1323 for x in pycompat.xrange(self.lenb):
1323 for x in pycompat.xrange(self.lenb):
1324 l = lr.readline()
1324 l = lr.readline()
1325 if l.startswith(br'\ '):
1325 if l.startswith(br'\ '):
1326 # XXX: the only way to hit this is with an invalid line range.
1326 # XXX: the only way to hit this is with an invalid line range.
1327 # The no-eol marker is not counted in the line range, but I
1327 # The no-eol marker is not counted in the line range, but I
1328 # guess there are diff(1) out there which behave differently.
1328 # guess there are diff(1) out there which behave differently.
1329 s = self.b[-1][:-1]
1329 s = self.b[-1][:-1]
1330 self.b[-1] = s
1330 self.b[-1] = s
1331 self.hunk[hunki - 1] = s
1331 self.hunk[hunki - 1] = s
1332 continue
1332 continue
1333 if not l:
1333 if not l:
1334 # line deletions, new block is empty and we hit EOF
1334 # line deletions, new block is empty and we hit EOF
1335 lr.push(l)
1335 lr.push(l)
1336 break
1336 break
1337 s = l[2:]
1337 s = l[2:]
1338 if l.startswith('+ ') or l.startswith('! '):
1338 if l.startswith('+ ') or l.startswith('! '):
1339 u = '+' + s
1339 u = '+' + s
1340 elif l.startswith(' '):
1340 elif l.startswith(' '):
1341 u = ' ' + s
1341 u = ' ' + s
1342 elif len(self.b) == 0:
1342 elif len(self.b) == 0:
1343 # line deletions, new block is empty
1343 # line deletions, new block is empty
1344 lr.push(l)
1344 lr.push(l)
1345 break
1345 break
1346 else:
1346 else:
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1348 (self.number, x))
1348 (self.number, x))
1349 self.b.append(s)
1349 self.b.append(s)
1350 while True:
1350 while True:
1351 if hunki >= len(self.hunk):
1351 if hunki >= len(self.hunk):
1352 h = ""
1352 h = ""
1353 else:
1353 else:
1354 h = self.hunk[hunki]
1354 h = self.hunk[hunki]
1355 hunki += 1
1355 hunki += 1
1356 if h == u:
1356 if h == u:
1357 break
1357 break
1358 elif h.startswith('-'):
1358 elif h.startswith('-'):
1359 continue
1359 continue
1360 else:
1360 else:
1361 self.hunk.insert(hunki - 1, u)
1361 self.hunk.insert(hunki - 1, u)
1362 break
1362 break
1363
1363
1364 if not self.a:
1364 if not self.a:
1365 # this happens when lines were only added to the hunk
1365 # this happens when lines were only added to the hunk
1366 for x in self.hunk:
1366 for x in self.hunk:
1367 if x.startswith('-') or x.startswith(' '):
1367 if x.startswith('-') or x.startswith(' '):
1368 self.a.append(x)
1368 self.a.append(x)
1369 if not self.b:
1369 if not self.b:
1370 # this happens when lines were only deleted from the hunk
1370 # this happens when lines were only deleted from the hunk
1371 for x in self.hunk:
1371 for x in self.hunk:
1372 if x.startswith('+') or x.startswith(' '):
1372 if x.startswith('+') or x.startswith(' '):
1373 self.b.append(x[1:])
1373 self.b.append(x[1:])
1374 # @@ -start,len +start,len @@
1374 # @@ -start,len +start,len @@
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 self.startb, self.lenb)
1376 self.startb, self.lenb)
1377 self.hunk[0] = self.desc
1377 self.hunk[0] = self.desc
1378 self._fixnewline(lr)
1378 self._fixnewline(lr)
1379
1379
1380 def _fixnewline(self, lr):
1380 def _fixnewline(self, lr):
1381 l = lr.readline()
1381 l = lr.readline()
1382 if l.startswith(br'\ '):
1382 if l.startswith(br'\ '):
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 else:
1384 else:
1385 lr.push(l)
1385 lr.push(l)
1386
1386
1387 def complete(self):
1387 def complete(self):
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1389
1389
1390 def _fuzzit(self, old, new, fuzz, toponly):
1390 def _fuzzit(self, old, new, fuzz, toponly):
1391 # this removes context lines from the top and bottom of list 'l'. It
1391 # this removes context lines from the top and bottom of list 'l'. It
1392 # checks the hunk to make sure only context lines are removed, and then
1392 # checks the hunk to make sure only context lines are removed, and then
1393 # returns a new shortened list of lines.
1393 # returns a new shortened list of lines.
1394 fuzz = min(fuzz, len(old))
1394 fuzz = min(fuzz, len(old))
1395 if fuzz:
1395 if fuzz:
1396 top = 0
1396 top = 0
1397 bot = 0
1397 bot = 0
1398 hlen = len(self.hunk)
1398 hlen = len(self.hunk)
1399 for x in pycompat.xrange(hlen - 1):
1399 for x in pycompat.xrange(hlen - 1):
1400 # the hunk starts with the @@ line, so use x+1
1400 # the hunk starts with the @@ line, so use x+1
1401 if self.hunk[x + 1].startswith(' '):
1401 if self.hunk[x + 1].startswith(' '):
1402 top += 1
1402 top += 1
1403 else:
1403 else:
1404 break
1404 break
1405 if not toponly:
1405 if not toponly:
1406 for x in pycompat.xrange(hlen - 1):
1406 for x in pycompat.xrange(hlen - 1):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1408 bot += 1
1408 bot += 1
1409 else:
1409 else:
1410 break
1410 break
1411
1411
1412 bot = min(fuzz, bot)
1412 bot = min(fuzz, bot)
1413 top = min(fuzz, top)
1413 top = min(fuzz, top)
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 return old, new, 0
1415 return old, new, 0
1416
1416
1417 def fuzzit(self, fuzz, toponly):
1417 def fuzzit(self, fuzz, toponly):
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 oldstart = self.starta + top
1419 oldstart = self.starta + top
1420 newstart = self.startb + top
1420 newstart = self.startb + top
1421 # zero length hunk ranges already have their start decremented
1421 # zero length hunk ranges already have their start decremented
1422 if self.lena and oldstart > 0:
1422 if self.lena and oldstart > 0:
1423 oldstart -= 1
1423 oldstart -= 1
1424 if self.lenb and newstart > 0:
1424 if self.lenb and newstart > 0:
1425 newstart -= 1
1425 newstart -= 1
1426 return old, oldstart, new, newstart
1426 return old, oldstart, new, newstart
1427
1427
1428 class binhunk(object):
1428 class binhunk(object):
1429 'A binary patch file.'
1429 'A binary patch file.'
1430 def __init__(self, lr, fname):
1430 def __init__(self, lr, fname):
1431 self.text = None
1431 self.text = None
1432 self.delta = False
1432 self.delta = False
1433 self.hunk = ['GIT binary patch\n']
1433 self.hunk = ['GIT binary patch\n']
1434 self._fname = fname
1434 self._fname = fname
1435 self._read(lr)
1435 self._read(lr)
1436
1436
1437 def complete(self):
1437 def complete(self):
1438 return self.text is not None
1438 return self.text is not None
1439
1439
1440 def new(self, lines):
1440 def new(self, lines):
1441 if self.delta:
1441 if self.delta:
1442 return [applybindelta(self.text, ''.join(lines))]
1442 return [applybindelta(self.text, ''.join(lines))]
1443 return [self.text]
1443 return [self.text]
1444
1444
1445 def _read(self, lr):
1445 def _read(self, lr):
1446 def getline(lr, hunk):
1446 def getline(lr, hunk):
1447 l = lr.readline()
1447 l = lr.readline()
1448 hunk.append(l)
1448 hunk.append(l)
1449 return l.rstrip('\r\n')
1449 return l.rstrip('\r\n')
1450
1450
1451 while True:
1451 while True:
1452 line = getline(lr, self.hunk)
1452 line = getline(lr, self.hunk)
1453 if not line:
1453 if not line:
1454 raise PatchError(_('could not extract "%s" binary data')
1454 raise PatchError(_('could not extract "%s" binary data')
1455 % self._fname)
1455 % self._fname)
1456 if line.startswith('literal '):
1456 if line.startswith('literal '):
1457 size = int(line[8:].rstrip())
1457 size = int(line[8:].rstrip())
1458 break
1458 break
1459 if line.startswith('delta '):
1459 if line.startswith('delta '):
1460 size = int(line[6:].rstrip())
1460 size = int(line[6:].rstrip())
1461 self.delta = True
1461 self.delta = True
1462 break
1462 break
1463 dec = []
1463 dec = []
1464 line = getline(lr, self.hunk)
1464 line = getline(lr, self.hunk)
1465 while len(line) > 1:
1465 while len(line) > 1:
1466 l = line[0:1]
1466 l = line[0:1]
1467 if l <= 'Z' and l >= 'A':
1467 if l <= 'Z' and l >= 'A':
1468 l = ord(l) - ord('A') + 1
1468 l = ord(l) - ord('A') + 1
1469 else:
1469 else:
1470 l = ord(l) - ord('a') + 27
1470 l = ord(l) - ord('a') + 27
1471 try:
1471 try:
1472 dec.append(util.b85decode(line[1:])[:l])
1472 dec.append(util.b85decode(line[1:])[:l])
1473 except ValueError as e:
1473 except ValueError as e:
1474 raise PatchError(_('could not decode "%s" binary patch: %s')
1474 raise PatchError(_('could not decode "%s" binary patch: %s')
1475 % (self._fname, stringutil.forcebytestr(e)))
1475 % (self._fname, stringutil.forcebytestr(e)))
1476 line = getline(lr, self.hunk)
1476 line = getline(lr, self.hunk)
1477 text = zlib.decompress(''.join(dec))
1477 text = zlib.decompress(''.join(dec))
1478 if len(text) != size:
1478 if len(text) != size:
1479 raise PatchError(_('"%s" length is %d bytes, should be %d')
1479 raise PatchError(_('"%s" length is %d bytes, should be %d')
1480 % (self._fname, len(text), size))
1480 % (self._fname, len(text), size))
1481 self.text = text
1481 self.text = text
1482
1482
1483 def parsefilename(str):
1483 def parsefilename(str):
1484 # --- filename \t|space stuff
1484 # --- filename \t|space stuff
1485 s = str[4:].rstrip('\r\n')
1485 s = str[4:].rstrip('\r\n')
1486 i = s.find('\t')
1486 i = s.find('\t')
1487 if i < 0:
1487 if i < 0:
1488 i = s.find(' ')
1488 i = s.find(' ')
1489 if i < 0:
1489 if i < 0:
1490 return s
1490 return s
1491 return s[:i]
1491 return s[:i]
1492
1492
1493 def reversehunks(hunks):
1493 def reversehunks(hunks):
1494 '''reverse the signs in the hunks given as argument
1494 '''reverse the signs in the hunks given as argument
1495
1495
1496 This function operates on hunks coming out of patch.filterpatch, that is
1496 This function operates on hunks coming out of patch.filterpatch, that is
1497 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1497 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1498
1498
1499 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1499 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1500 ... --- a/folder1/g
1500 ... --- a/folder1/g
1501 ... +++ b/folder1/g
1501 ... +++ b/folder1/g
1502 ... @@ -1,7 +1,7 @@
1502 ... @@ -1,7 +1,7 @@
1503 ... +firstline
1503 ... +firstline
1504 ... c
1504 ... c
1505 ... 1
1505 ... 1
1506 ... 2
1506 ... 2
1507 ... + 3
1507 ... + 3
1508 ... -4
1508 ... -4
1509 ... 5
1509 ... 5
1510 ... d
1510 ... d
1511 ... +lastline"""
1511 ... +lastline"""
1512 >>> hunks = parsepatch([rawpatch])
1512 >>> hunks = parsepatch([rawpatch])
1513 >>> hunkscomingfromfilterpatch = []
1513 >>> hunkscomingfromfilterpatch = []
1514 >>> for h in hunks:
1514 >>> for h in hunks:
1515 ... hunkscomingfromfilterpatch.append(h)
1515 ... hunkscomingfromfilterpatch.append(h)
1516 ... hunkscomingfromfilterpatch.extend(h.hunks)
1516 ... hunkscomingfromfilterpatch.extend(h.hunks)
1517
1517
1518 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1518 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1519 >>> from . import util
1519 >>> from . import util
1520 >>> fp = util.stringio()
1520 >>> fp = util.stringio()
1521 >>> for c in reversedhunks:
1521 >>> for c in reversedhunks:
1522 ... c.write(fp)
1522 ... c.write(fp)
1523 >>> fp.seek(0) or None
1523 >>> fp.seek(0) or None
1524 >>> reversedpatch = fp.read()
1524 >>> reversedpatch = fp.read()
1525 >>> print(pycompat.sysstr(reversedpatch))
1525 >>> print(pycompat.sysstr(reversedpatch))
1526 diff --git a/folder1/g b/folder1/g
1526 diff --git a/folder1/g b/folder1/g
1527 --- a/folder1/g
1527 --- a/folder1/g
1528 +++ b/folder1/g
1528 +++ b/folder1/g
1529 @@ -1,4 +1,3 @@
1529 @@ -1,4 +1,3 @@
1530 -firstline
1530 -firstline
1531 c
1531 c
1532 1
1532 1
1533 2
1533 2
1534 @@ -2,6 +1,6 @@
1534 @@ -2,6 +1,6 @@
1535 c
1535 c
1536 1
1536 1
1537 2
1537 2
1538 - 3
1538 - 3
1539 +4
1539 +4
1540 5
1540 5
1541 d
1541 d
1542 @@ -6,3 +5,2 @@
1542 @@ -6,3 +5,2 @@
1543 5
1543 5
1544 d
1544 d
1545 -lastline
1545 -lastline
1546
1546
1547 '''
1547 '''
1548
1548
1549 newhunks = []
1549 newhunks = []
1550 for c in hunks:
1550 for c in hunks:
1551 if util.safehasattr(c, 'reversehunk'):
1551 if util.safehasattr(c, 'reversehunk'):
1552 c = c.reversehunk()
1552 c = c.reversehunk()
1553 newhunks.append(c)
1553 newhunks.append(c)
1554 return newhunks
1554 return newhunks
1555
1555
1556 def parsepatch(originalchunks, maxcontext=None):
1556 def parsepatch(originalchunks, maxcontext=None):
1557 """patch -> [] of headers -> [] of hunks
1557 """patch -> [] of headers -> [] of hunks
1558
1558
1559 If maxcontext is not None, trim context lines if necessary.
1559 If maxcontext is not None, trim context lines if necessary.
1560
1560
1561 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1561 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1562 ... --- a/folder1/g
1562 ... --- a/folder1/g
1563 ... +++ b/folder1/g
1563 ... +++ b/folder1/g
1564 ... @@ -1,8 +1,10 @@
1564 ... @@ -1,8 +1,10 @@
1565 ... 1
1565 ... 1
1566 ... 2
1566 ... 2
1567 ... -3
1567 ... -3
1568 ... 4
1568 ... 4
1569 ... 5
1569 ... 5
1570 ... 6
1570 ... 6
1571 ... +6.1
1571 ... +6.1
1572 ... +6.2
1572 ... +6.2
1573 ... 7
1573 ... 7
1574 ... 8
1574 ... 8
1575 ... +9'''
1575 ... +9'''
1576 >>> out = util.stringio()
1576 >>> out = util.stringio()
1577 >>> headers = parsepatch([rawpatch], maxcontext=1)
1577 >>> headers = parsepatch([rawpatch], maxcontext=1)
1578 >>> for header in headers:
1578 >>> for header in headers:
1579 ... header.write(out)
1579 ... header.write(out)
1580 ... for hunk in header.hunks:
1580 ... for hunk in header.hunks:
1581 ... hunk.write(out)
1581 ... hunk.write(out)
1582 >>> print(pycompat.sysstr(out.getvalue()))
1582 >>> print(pycompat.sysstr(out.getvalue()))
1583 diff --git a/folder1/g b/folder1/g
1583 diff --git a/folder1/g b/folder1/g
1584 --- a/folder1/g
1584 --- a/folder1/g
1585 +++ b/folder1/g
1585 +++ b/folder1/g
1586 @@ -2,3 +2,2 @@
1586 @@ -2,3 +2,2 @@
1587 2
1587 2
1588 -3
1588 -3
1589 4
1589 4
1590 @@ -6,2 +5,4 @@
1590 @@ -6,2 +5,4 @@
1591 6
1591 6
1592 +6.1
1592 +6.1
1593 +6.2
1593 +6.2
1594 7
1594 7
1595 @@ -8,1 +9,2 @@
1595 @@ -8,1 +9,2 @@
1596 8
1596 8
1597 +9
1597 +9
1598 """
1598 """
1599 class parser(object):
1599 class parser(object):
1600 """patch parsing state machine"""
1600 """patch parsing state machine"""
1601 def __init__(self):
1601 def __init__(self):
1602 self.fromline = 0
1602 self.fromline = 0
1603 self.toline = 0
1603 self.toline = 0
1604 self.proc = ''
1604 self.proc = ''
1605 self.header = None
1605 self.header = None
1606 self.context = []
1606 self.context = []
1607 self.before = []
1607 self.before = []
1608 self.hunk = []
1608 self.hunk = []
1609 self.headers = []
1609 self.headers = []
1610
1610
1611 def addrange(self, limits):
1611 def addrange(self, limits):
1612 self.addcontext([])
1612 self.addcontext([])
1613 fromstart, fromend, tostart, toend, proc = limits
1613 fromstart, fromend, tostart, toend, proc = limits
1614 self.fromline = int(fromstart)
1614 self.fromline = int(fromstart)
1615 self.toline = int(tostart)
1615 self.toline = int(tostart)
1616 self.proc = proc
1616 self.proc = proc
1617
1617
1618 def addcontext(self, context):
1618 def addcontext(self, context):
1619 if self.hunk:
1619 if self.hunk:
1620 h = recordhunk(self.header, self.fromline, self.toline,
1620 h = recordhunk(self.header, self.fromline, self.toline,
1621 self.proc, self.before, self.hunk, context, maxcontext)
1621 self.proc, self.before, self.hunk, context, maxcontext)
1622 self.header.hunks.append(h)
1622 self.header.hunks.append(h)
1623 self.fromline += len(self.before) + h.removed
1623 self.fromline += len(self.before) + h.removed
1624 self.toline += len(self.before) + h.added
1624 self.toline += len(self.before) + h.added
1625 self.before = []
1625 self.before = []
1626 self.hunk = []
1626 self.hunk = []
1627 self.context = context
1627 self.context = context
1628
1628
1629 def addhunk(self, hunk):
1629 def addhunk(self, hunk):
1630 if self.context:
1630 if self.context:
1631 self.before = self.context
1631 self.before = self.context
1632 self.context = []
1632 self.context = []
1633 if self.hunk:
1633 if self.hunk:
1634 self.addcontext([])
1634 self.addcontext([])
1635 self.hunk = hunk
1635 self.hunk = hunk
1636
1636
1637 def newfile(self, hdr):
1637 def newfile(self, hdr):
1638 self.addcontext([])
1638 self.addcontext([])
1639 h = header(hdr)
1639 h = header(hdr)
1640 self.headers.append(h)
1640 self.headers.append(h)
1641 self.header = h
1641 self.header = h
1642
1642
1643 def addother(self, line):
1643 def addother(self, line):
1644 pass # 'other' lines are ignored
1644 pass # 'other' lines are ignored
1645
1645
1646 def finished(self):
1646 def finished(self):
1647 self.addcontext([])
1647 self.addcontext([])
1648 return self.headers
1648 return self.headers
1649
1649
1650 transitions = {
1650 transitions = {
1651 'file': {'context': addcontext,
1651 'file': {'context': addcontext,
1652 'file': newfile,
1652 'file': newfile,
1653 'hunk': addhunk,
1653 'hunk': addhunk,
1654 'range': addrange},
1654 'range': addrange},
1655 'context': {'file': newfile,
1655 'context': {'file': newfile,
1656 'hunk': addhunk,
1656 'hunk': addhunk,
1657 'range': addrange,
1657 'range': addrange,
1658 'other': addother},
1658 'other': addother},
1659 'hunk': {'context': addcontext,
1659 'hunk': {'context': addcontext,
1660 'file': newfile,
1660 'file': newfile,
1661 'range': addrange},
1661 'range': addrange},
1662 'range': {'context': addcontext,
1662 'range': {'context': addcontext,
1663 'hunk': addhunk},
1663 'hunk': addhunk},
1664 'other': {'other': addother},
1664 'other': {'other': addother},
1665 }
1665 }
1666
1666
1667 p = parser()
1667 p = parser()
1668 fp = stringio()
1668 fp = stringio()
1669 fp.write(''.join(originalchunks))
1669 fp.write(''.join(originalchunks))
1670 fp.seek(0)
1670 fp.seek(0)
1671
1671
1672 state = 'context'
1672 state = 'context'
1673 for newstate, data in scanpatch(fp):
1673 for newstate, data in scanpatch(fp):
1674 try:
1674 try:
1675 p.transitions[state][newstate](p, data)
1675 p.transitions[state][newstate](p, data)
1676 except KeyError:
1676 except KeyError:
1677 raise PatchError('unhandled transition: %s -> %s' %
1677 raise PatchError('unhandled transition: %s -> %s' %
1678 (state, newstate))
1678 (state, newstate))
1679 state = newstate
1679 state = newstate
1680 del fp
1680 del fp
1681 return p.finished()
1681 return p.finished()
1682
1682
1683 def pathtransform(path, strip, prefix):
1683 def pathtransform(path, strip, prefix):
1684 '''turn a path from a patch into a path suitable for the repository
1684 '''turn a path from a patch into a path suitable for the repository
1685
1685
1686 prefix, if not empty, is expected to be normalized with a / at the end.
1686 prefix, if not empty, is expected to be normalized with a / at the end.
1687
1687
1688 Returns (stripped components, path in repository).
1688 Returns (stripped components, path in repository).
1689
1689
1690 >>> pathtransform(b'a/b/c', 0, b'')
1690 >>> pathtransform(b'a/b/c', 0, b'')
1691 ('', 'a/b/c')
1691 ('', 'a/b/c')
1692 >>> pathtransform(b' a/b/c ', 0, b'')
1692 >>> pathtransform(b' a/b/c ', 0, b'')
1693 ('', ' a/b/c')
1693 ('', ' a/b/c')
1694 >>> pathtransform(b' a/b/c ', 2, b'')
1694 >>> pathtransform(b' a/b/c ', 2, b'')
1695 ('a/b/', 'c')
1695 ('a/b/', 'c')
1696 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1696 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1697 ('', 'd/e/a/b/c')
1697 ('', 'd/e/a/b/c')
1698 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1698 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1699 ('a//b/', 'd/e/c')
1699 ('a//b/', 'd/e/c')
1700 >>> pathtransform(b'a/b/c', 3, b'')
1700 >>> pathtransform(b'a/b/c', 3, b'')
1701 Traceback (most recent call last):
1701 Traceback (most recent call last):
1702 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1702 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1703 '''
1703 '''
1704 pathlen = len(path)
1704 pathlen = len(path)
1705 i = 0
1705 i = 0
1706 if strip == 0:
1706 if strip == 0:
1707 return '', prefix + path.rstrip()
1707 return '', prefix + path.rstrip()
1708 count = strip
1708 count = strip
1709 while count > 0:
1709 while count > 0:
1710 i = path.find('/', i)
1710 i = path.find('/', i)
1711 if i == -1:
1711 if i == -1:
1712 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1712 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1713 (count, strip, path))
1713 (count, strip, path))
1714 i += 1
1714 i += 1
1715 # consume '//' in the path
1715 # consume '//' in the path
1716 while i < pathlen - 1 and path[i:i + 1] == '/':
1716 while i < pathlen - 1 and path[i:i + 1] == '/':
1717 i += 1
1717 i += 1
1718 count -= 1
1718 count -= 1
1719 return path[:i].lstrip(), prefix + path[i:].rstrip()
1719 return path[:i].lstrip(), prefix + path[i:].rstrip()
1720
1720
1721 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1721 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1722 nulla = afile_orig == "/dev/null"
1722 nulla = afile_orig == "/dev/null"
1723 nullb = bfile_orig == "/dev/null"
1723 nullb = bfile_orig == "/dev/null"
1724 create = nulla and hunk.starta == 0 and hunk.lena == 0
1724 create = nulla and hunk.starta == 0 and hunk.lena == 0
1725 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1725 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1726 abase, afile = pathtransform(afile_orig, strip, prefix)
1726 abase, afile = pathtransform(afile_orig, strip, prefix)
1727 gooda = not nulla and backend.exists(afile)
1727 gooda = not nulla and backend.exists(afile)
1728 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1728 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1729 if afile == bfile:
1729 if afile == bfile:
1730 goodb = gooda
1730 goodb = gooda
1731 else:
1731 else:
1732 goodb = not nullb and backend.exists(bfile)
1732 goodb = not nullb and backend.exists(bfile)
1733 missing = not goodb and not gooda and not create
1733 missing = not goodb and not gooda and not create
1734
1734
1735 # some diff programs apparently produce patches where the afile is
1735 # some diff programs apparently produce patches where the afile is
1736 # not /dev/null, but afile starts with bfile
1736 # not /dev/null, but afile starts with bfile
1737 abasedir = afile[:afile.rfind('/') + 1]
1737 abasedir = afile[:afile.rfind('/') + 1]
1738 bbasedir = bfile[:bfile.rfind('/') + 1]
1738 bbasedir = bfile[:bfile.rfind('/') + 1]
1739 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1739 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1740 and hunk.starta == 0 and hunk.lena == 0):
1740 and hunk.starta == 0 and hunk.lena == 0):
1741 create = True
1741 create = True
1742 missing = False
1742 missing = False
1743
1743
1744 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1744 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1745 # diff is between a file and its backup. In this case, the original
1745 # diff is between a file and its backup. In this case, the original
1746 # file should be patched (see original mpatch code).
1746 # file should be patched (see original mpatch code).
1747 isbackup = (abase == bbase and bfile.startswith(afile))
1747 isbackup = (abase == bbase and bfile.startswith(afile))
1748 fname = None
1748 fname = None
1749 if not missing:
1749 if not missing:
1750 if gooda and goodb:
1750 if gooda and goodb:
1751 if isbackup:
1751 if isbackup:
1752 fname = afile
1752 fname = afile
1753 else:
1753 else:
1754 fname = bfile
1754 fname = bfile
1755 elif gooda:
1755 elif gooda:
1756 fname = afile
1756 fname = afile
1757
1757
1758 if not fname:
1758 if not fname:
1759 if not nullb:
1759 if not nullb:
1760 if isbackup:
1760 if isbackup:
1761 fname = afile
1761 fname = afile
1762 else:
1762 else:
1763 fname = bfile
1763 fname = bfile
1764 elif not nulla:
1764 elif not nulla:
1765 fname = afile
1765 fname = afile
1766 else:
1766 else:
1767 raise PatchError(_("undefined source and destination files"))
1767 raise PatchError(_("undefined source and destination files"))
1768
1768
1769 gp = patchmeta(fname)
1769 gp = patchmeta(fname)
1770 if create:
1770 if create:
1771 gp.op = 'ADD'
1771 gp.op = 'ADD'
1772 elif remove:
1772 elif remove:
1773 gp.op = 'DELETE'
1773 gp.op = 'DELETE'
1774 return gp
1774 return gp
1775
1775
1776 def scanpatch(fp):
1776 def scanpatch(fp):
1777 """like patch.iterhunks, but yield different events
1777 """like patch.iterhunks, but yield different events
1778
1778
1779 - ('file', [header_lines + fromfile + tofile])
1779 - ('file', [header_lines + fromfile + tofile])
1780 - ('context', [context_lines])
1780 - ('context', [context_lines])
1781 - ('hunk', [hunk_lines])
1781 - ('hunk', [hunk_lines])
1782 - ('range', (-start,len, +start,len, proc))
1782 - ('range', (-start,len, +start,len, proc))
1783 """
1783 """
1784 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1784 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1785 lr = linereader(fp)
1785 lr = linereader(fp)
1786
1786
1787 def scanwhile(first, p):
1787 def scanwhile(first, p):
1788 """scan lr while predicate holds"""
1788 """scan lr while predicate holds"""
1789 lines = [first]
1789 lines = [first]
1790 for line in iter(lr.readline, ''):
1790 for line in iter(lr.readline, ''):
1791 if p(line):
1791 if p(line):
1792 lines.append(line)
1792 lines.append(line)
1793 else:
1793 else:
1794 lr.push(line)
1794 lr.push(line)
1795 break
1795 break
1796 return lines
1796 return lines
1797
1797
1798 for line in iter(lr.readline, ''):
1798 for line in iter(lr.readline, ''):
1799 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1799 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1800 def notheader(line):
1800 def notheader(line):
1801 s = line.split(None, 1)
1801 s = line.split(None, 1)
1802 return not s or s[0] not in ('---', 'diff')
1802 return not s or s[0] not in ('---', 'diff')
1803 header = scanwhile(line, notheader)
1803 header = scanwhile(line, notheader)
1804 fromfile = lr.readline()
1804 fromfile = lr.readline()
1805 if fromfile.startswith('---'):
1805 if fromfile.startswith('---'):
1806 tofile = lr.readline()
1806 tofile = lr.readline()
1807 header += [fromfile, tofile]
1807 header += [fromfile, tofile]
1808 else:
1808 else:
1809 lr.push(fromfile)
1809 lr.push(fromfile)
1810 yield 'file', header
1810 yield 'file', header
1811 elif line.startswith(' '):
1811 elif line.startswith(' '):
1812 cs = (' ', '\\')
1812 cs = (' ', '\\')
1813 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1813 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1814 elif line.startswith(('-', '+')):
1814 elif line.startswith(('-', '+')):
1815 cs = ('-', '+', '\\')
1815 cs = ('-', '+', '\\')
1816 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1816 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1817 else:
1817 else:
1818 m = lines_re.match(line)
1818 m = lines_re.match(line)
1819 if m:
1819 if m:
1820 yield 'range', m.groups()
1820 yield 'range', m.groups()
1821 else:
1821 else:
1822 yield 'other', line
1822 yield 'other', line
1823
1823
1824 def scangitpatch(lr, firstline):
1824 def scangitpatch(lr, firstline):
1825 """
1825 """
1826 Git patches can emit:
1826 Git patches can emit:
1827 - rename a to b
1827 - rename a to b
1828 - change b
1828 - change b
1829 - copy a to c
1829 - copy a to c
1830 - change c
1830 - change c
1831
1831
1832 We cannot apply this sequence as-is, the renamed 'a' could not be
1832 We cannot apply this sequence as-is, the renamed 'a' could not be
1833 found for it would have been renamed already. And we cannot copy
1833 found for it would have been renamed already. And we cannot copy
1834 from 'b' instead because 'b' would have been changed already. So
1834 from 'b' instead because 'b' would have been changed already. So
1835 we scan the git patch for copy and rename commands so we can
1835 we scan the git patch for copy and rename commands so we can
1836 perform the copies ahead of time.
1836 perform the copies ahead of time.
1837 """
1837 """
1838 pos = 0
1838 pos = 0
1839 try:
1839 try:
1840 pos = lr.fp.tell()
1840 pos = lr.fp.tell()
1841 fp = lr.fp
1841 fp = lr.fp
1842 except IOError:
1842 except IOError:
1843 fp = stringio(lr.fp.read())
1843 fp = stringio(lr.fp.read())
1844 gitlr = linereader(fp)
1844 gitlr = linereader(fp)
1845 gitlr.push(firstline)
1845 gitlr.push(firstline)
1846 gitpatches = readgitpatch(gitlr)
1846 gitpatches = readgitpatch(gitlr)
1847 fp.seek(pos)
1847 fp.seek(pos)
1848 return gitpatches
1848 return gitpatches
1849
1849
1850 def iterhunks(fp):
1850 def iterhunks(fp):
1851 """Read a patch and yield the following events:
1851 """Read a patch and yield the following events:
1852 - ("file", afile, bfile, firsthunk): select a new target file.
1852 - ("file", afile, bfile, firsthunk): select a new target file.
1853 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1853 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1854 "file" event.
1854 "file" event.
1855 - ("git", gitchanges): current diff is in git format, gitchanges
1855 - ("git", gitchanges): current diff is in git format, gitchanges
1856 maps filenames to gitpatch records. Unique event.
1856 maps filenames to gitpatch records. Unique event.
1857 """
1857 """
1858 afile = ""
1858 afile = ""
1859 bfile = ""
1859 bfile = ""
1860 state = None
1860 state = None
1861 hunknum = 0
1861 hunknum = 0
1862 emitfile = newfile = False
1862 emitfile = newfile = False
1863 gitpatches = None
1863 gitpatches = None
1864
1864
1865 # our states
1865 # our states
1866 BFILE = 1
1866 BFILE = 1
1867 context = None
1867 context = None
1868 lr = linereader(fp)
1868 lr = linereader(fp)
1869
1869
1870 for x in iter(lr.readline, ''):
1870 for x in iter(lr.readline, ''):
1871 if state == BFILE and (
1871 if state == BFILE and (
1872 (not context and x.startswith('@'))
1872 (not context and x.startswith('@'))
1873 or (context is not False and x.startswith('***************'))
1873 or (context is not False and x.startswith('***************'))
1874 or x.startswith('GIT binary patch')):
1874 or x.startswith('GIT binary patch')):
1875 gp = None
1875 gp = None
1876 if (gitpatches and
1876 if (gitpatches and
1877 gitpatches[-1].ispatching(afile, bfile)):
1877 gitpatches[-1].ispatching(afile, bfile)):
1878 gp = gitpatches.pop()
1878 gp = gitpatches.pop()
1879 if x.startswith('GIT binary patch'):
1879 if x.startswith('GIT binary patch'):
1880 h = binhunk(lr, gp.path)
1880 h = binhunk(lr, gp.path)
1881 else:
1881 else:
1882 if context is None and x.startswith('***************'):
1882 if context is None and x.startswith('***************'):
1883 context = True
1883 context = True
1884 h = hunk(x, hunknum + 1, lr, context)
1884 h = hunk(x, hunknum + 1, lr, context)
1885 hunknum += 1
1885 hunknum += 1
1886 if emitfile:
1886 if emitfile:
1887 emitfile = False
1887 emitfile = False
1888 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1888 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1889 yield 'hunk', h
1889 yield 'hunk', h
1890 elif x.startswith('diff --git a/'):
1890 elif x.startswith('diff --git a/'):
1891 m = gitre.match(x.rstrip(' \r\n'))
1891 m = gitre.match(x.rstrip(' \r\n'))
1892 if not m:
1892 if not m:
1893 continue
1893 continue
1894 if gitpatches is None:
1894 if gitpatches is None:
1895 # scan whole input for git metadata
1895 # scan whole input for git metadata
1896 gitpatches = scangitpatch(lr, x)
1896 gitpatches = scangitpatch(lr, x)
1897 yield 'git', [g.copy() for g in gitpatches
1897 yield 'git', [g.copy() for g in gitpatches
1898 if g.op in ('COPY', 'RENAME')]
1898 if g.op in ('COPY', 'RENAME')]
1899 gitpatches.reverse()
1899 gitpatches.reverse()
1900 afile = 'a/' + m.group(1)
1900 afile = 'a/' + m.group(1)
1901 bfile = 'b/' + m.group(2)
1901 bfile = 'b/' + m.group(2)
1902 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1902 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1903 gp = gitpatches.pop()
1903 gp = gitpatches.pop()
1904 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1904 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1905 if not gitpatches:
1905 if not gitpatches:
1906 raise PatchError(_('failed to synchronize metadata for "%s"')
1906 raise PatchError(_('failed to synchronize metadata for "%s"')
1907 % afile[2:])
1907 % afile[2:])
1908 newfile = True
1908 newfile = True
1909 elif x.startswith('---'):
1909 elif x.startswith('---'):
1910 # check for a unified diff
1910 # check for a unified diff
1911 l2 = lr.readline()
1911 l2 = lr.readline()
1912 if not l2.startswith('+++'):
1912 if not l2.startswith('+++'):
1913 lr.push(l2)
1913 lr.push(l2)
1914 continue
1914 continue
1915 newfile = True
1915 newfile = True
1916 context = False
1916 context = False
1917 afile = parsefilename(x)
1917 afile = parsefilename(x)
1918 bfile = parsefilename(l2)
1918 bfile = parsefilename(l2)
1919 elif x.startswith('***'):
1919 elif x.startswith('***'):
1920 # check for a context diff
1920 # check for a context diff
1921 l2 = lr.readline()
1921 l2 = lr.readline()
1922 if not l2.startswith('---'):
1922 if not l2.startswith('---'):
1923 lr.push(l2)
1923 lr.push(l2)
1924 continue
1924 continue
1925 l3 = lr.readline()
1925 l3 = lr.readline()
1926 lr.push(l3)
1926 lr.push(l3)
1927 if not l3.startswith("***************"):
1927 if not l3.startswith("***************"):
1928 lr.push(l2)
1928 lr.push(l2)
1929 continue
1929 continue
1930 newfile = True
1930 newfile = True
1931 context = True
1931 context = True
1932 afile = parsefilename(x)
1932 afile = parsefilename(x)
1933 bfile = parsefilename(l2)
1933 bfile = parsefilename(l2)
1934
1934
1935 if newfile:
1935 if newfile:
1936 newfile = False
1936 newfile = False
1937 emitfile = True
1937 emitfile = True
1938 state = BFILE
1938 state = BFILE
1939 hunknum = 0
1939 hunknum = 0
1940
1940
1941 while gitpatches:
1941 while gitpatches:
1942 gp = gitpatches.pop()
1942 gp = gitpatches.pop()
1943 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1943 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1944
1944
1945 def applybindelta(binchunk, data):
1945 def applybindelta(binchunk, data):
1946 """Apply a binary delta hunk
1946 """Apply a binary delta hunk
1947 The algorithm used is the algorithm from git's patch-delta.c
1947 The algorithm used is the algorithm from git's patch-delta.c
1948 """
1948 """
1949 def deltahead(binchunk):
1949 def deltahead(binchunk):
1950 i = 0
1950 i = 0
1951 for c in pycompat.bytestr(binchunk):
1951 for c in pycompat.bytestr(binchunk):
1952 i += 1
1952 i += 1
1953 if not (ord(c) & 0x80):
1953 if not (ord(c) & 0x80):
1954 return i
1954 return i
1955 return i
1955 return i
1956 out = ""
1956 out = ""
1957 s = deltahead(binchunk)
1957 s = deltahead(binchunk)
1958 binchunk = binchunk[s:]
1958 binchunk = binchunk[s:]
1959 s = deltahead(binchunk)
1959 s = deltahead(binchunk)
1960 binchunk = binchunk[s:]
1960 binchunk = binchunk[s:]
1961 i = 0
1961 i = 0
1962 while i < len(binchunk):
1962 while i < len(binchunk):
1963 cmd = ord(binchunk[i:i + 1])
1963 cmd = ord(binchunk[i:i + 1])
1964 i += 1
1964 i += 1
1965 if (cmd & 0x80):
1965 if (cmd & 0x80):
1966 offset = 0
1966 offset = 0
1967 size = 0
1967 size = 0
1968 if (cmd & 0x01):
1968 if (cmd & 0x01):
1969 offset = ord(binchunk[i:i + 1])
1969 offset = ord(binchunk[i:i + 1])
1970 i += 1
1970 i += 1
1971 if (cmd & 0x02):
1971 if (cmd & 0x02):
1972 offset |= ord(binchunk[i:i + 1]) << 8
1972 offset |= ord(binchunk[i:i + 1]) << 8
1973 i += 1
1973 i += 1
1974 if (cmd & 0x04):
1974 if (cmd & 0x04):
1975 offset |= ord(binchunk[i:i + 1]) << 16
1975 offset |= ord(binchunk[i:i + 1]) << 16
1976 i += 1
1976 i += 1
1977 if (cmd & 0x08):
1977 if (cmd & 0x08):
1978 offset |= ord(binchunk[i:i + 1]) << 24
1978 offset |= ord(binchunk[i:i + 1]) << 24
1979 i += 1
1979 i += 1
1980 if (cmd & 0x10):
1980 if (cmd & 0x10):
1981 size = ord(binchunk[i:i + 1])
1981 size = ord(binchunk[i:i + 1])
1982 i += 1
1982 i += 1
1983 if (cmd & 0x20):
1983 if (cmd & 0x20):
1984 size |= ord(binchunk[i:i + 1]) << 8
1984 size |= ord(binchunk[i:i + 1]) << 8
1985 i += 1
1985 i += 1
1986 if (cmd & 0x40):
1986 if (cmd & 0x40):
1987 size |= ord(binchunk[i:i + 1]) << 16
1987 size |= ord(binchunk[i:i + 1]) << 16
1988 i += 1
1988 i += 1
1989 if size == 0:
1989 if size == 0:
1990 size = 0x10000
1990 size = 0x10000
1991 offset_end = offset + size
1991 offset_end = offset + size
1992 out += data[offset:offset_end]
1992 out += data[offset:offset_end]
1993 elif cmd != 0:
1993 elif cmd != 0:
1994 offset_end = i + cmd
1994 offset_end = i + cmd
1995 out += binchunk[i:offset_end]
1995 out += binchunk[i:offset_end]
1996 i += cmd
1996 i += cmd
1997 else:
1997 else:
1998 raise PatchError(_('unexpected delta opcode 0'))
1998 raise PatchError(_('unexpected delta opcode 0'))
1999 return out
1999 return out
2000
2000
2001 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2001 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2002 """Reads a patch from fp and tries to apply it.
2002 """Reads a patch from fp and tries to apply it.
2003
2003
2004 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2004 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2005 there was any fuzz.
2005 there was any fuzz.
2006
2006
2007 If 'eolmode' is 'strict', the patch content and patched file are
2007 If 'eolmode' is 'strict', the patch content and patched file are
2008 read in binary mode. Otherwise, line endings are ignored when
2008 read in binary mode. Otherwise, line endings are ignored when
2009 patching then normalized according to 'eolmode'.
2009 patching then normalized according to 'eolmode'.
2010 """
2010 """
2011 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2011 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2012 prefix=prefix, eolmode=eolmode)
2012 prefix=prefix, eolmode=eolmode)
2013
2013
2014 def _canonprefix(repo, prefix):
2014 def _canonprefix(repo, prefix):
2015 if prefix:
2015 if prefix:
2016 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2016 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2017 if prefix != '':
2017 if prefix != '':
2018 prefix += '/'
2018 prefix += '/'
2019 return prefix
2019 return prefix
2020
2020
2021 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2021 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2022 eolmode='strict'):
2022 eolmode='strict'):
2023 prefix = _canonprefix(backend.repo, prefix)
2023 prefix = _canonprefix(backend.repo, prefix)
2024 def pstrip(p):
2024 def pstrip(p):
2025 return pathtransform(p, strip - 1, prefix)[1]
2025 return pathtransform(p, strip - 1, prefix)[1]
2026
2026
2027 rejects = 0
2027 rejects = 0
2028 err = 0
2028 err = 0
2029 current_file = None
2029 current_file = None
2030
2030
2031 for state, values in iterhunks(fp):
2031 for state, values in iterhunks(fp):
2032 if state == 'hunk':
2032 if state == 'hunk':
2033 if not current_file:
2033 if not current_file:
2034 continue
2034 continue
2035 ret = current_file.apply(values)
2035 ret = current_file.apply(values)
2036 if ret > 0:
2036 if ret > 0:
2037 err = 1
2037 err = 1
2038 elif state == 'file':
2038 elif state == 'file':
2039 if current_file:
2039 if current_file:
2040 rejects += current_file.close()
2040 rejects += current_file.close()
2041 current_file = None
2041 current_file = None
2042 afile, bfile, first_hunk, gp = values
2042 afile, bfile, first_hunk, gp = values
2043 if gp:
2043 if gp:
2044 gp.path = pstrip(gp.path)
2044 gp.path = pstrip(gp.path)
2045 if gp.oldpath:
2045 if gp.oldpath:
2046 gp.oldpath = pstrip(gp.oldpath)
2046 gp.oldpath = pstrip(gp.oldpath)
2047 else:
2047 else:
2048 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2048 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2049 prefix)
2049 prefix)
2050 if gp.op == 'RENAME':
2050 if gp.op == 'RENAME':
2051 backend.unlink(gp.oldpath)
2051 backend.unlink(gp.oldpath)
2052 if not first_hunk:
2052 if not first_hunk:
2053 if gp.op == 'DELETE':
2053 if gp.op == 'DELETE':
2054 backend.unlink(gp.path)
2054 backend.unlink(gp.path)
2055 continue
2055 continue
2056 data, mode = None, None
2056 data, mode = None, None
2057 if gp.op in ('RENAME', 'COPY'):
2057 if gp.op in ('RENAME', 'COPY'):
2058 data, mode = store.getfile(gp.oldpath)[:2]
2058 data, mode = store.getfile(gp.oldpath)[:2]
2059 if data is None:
2059 if data is None:
2060 # This means that the old path does not exist
2060 # This means that the old path does not exist
2061 raise PatchError(_("source file '%s' does not exist")
2061 raise PatchError(_("source file '%s' does not exist")
2062 % gp.oldpath)
2062 % gp.oldpath)
2063 if gp.mode:
2063 if gp.mode:
2064 mode = gp.mode
2064 mode = gp.mode
2065 if gp.op == 'ADD':
2065 if gp.op == 'ADD':
2066 # Added files without content have no hunk and
2066 # Added files without content have no hunk and
2067 # must be created
2067 # must be created
2068 data = ''
2068 data = ''
2069 if data or mode:
2069 if data or mode:
2070 if (gp.op in ('ADD', 'RENAME', 'COPY')
2070 if (gp.op in ('ADD', 'RENAME', 'COPY')
2071 and backend.exists(gp.path)):
2071 and backend.exists(gp.path)):
2072 raise PatchError(_("cannot create %s: destination "
2072 raise PatchError(_("cannot create %s: destination "
2073 "already exists") % gp.path)
2073 "already exists") % gp.path)
2074 backend.setfile(gp.path, data, mode, gp.oldpath)
2074 backend.setfile(gp.path, data, mode, gp.oldpath)
2075 continue
2075 continue
2076 try:
2076 try:
2077 current_file = patcher(ui, gp, backend, store,
2077 current_file = patcher(ui, gp, backend, store,
2078 eolmode=eolmode)
2078 eolmode=eolmode)
2079 except PatchError as inst:
2079 except PatchError as inst:
2080 ui.warn(str(inst) + '\n')
2080 ui.warn(str(inst) + '\n')
2081 current_file = None
2081 current_file = None
2082 rejects += 1
2082 rejects += 1
2083 continue
2083 continue
2084 elif state == 'git':
2084 elif state == 'git':
2085 for gp in values:
2085 for gp in values:
2086 path = pstrip(gp.oldpath)
2086 path = pstrip(gp.oldpath)
2087 data, mode = backend.getfile(path)
2087 data, mode = backend.getfile(path)
2088 if data is None:
2088 if data is None:
2089 # The error ignored here will trigger a getfile()
2089 # The error ignored here will trigger a getfile()
2090 # error in a place more appropriate for error
2090 # error in a place more appropriate for error
2091 # handling, and will not interrupt the patching
2091 # handling, and will not interrupt the patching
2092 # process.
2092 # process.
2093 pass
2093 pass
2094 else:
2094 else:
2095 store.setfile(path, data, mode)
2095 store.setfile(path, data, mode)
2096 else:
2096 else:
2097 raise error.Abort(_('unsupported parser state: %s') % state)
2097 raise error.Abort(_('unsupported parser state: %s') % state)
2098
2098
2099 if current_file:
2099 if current_file:
2100 rejects += current_file.close()
2100 rejects += current_file.close()
2101
2101
2102 if rejects:
2102 if rejects:
2103 return -1
2103 return -1
2104 return err
2104 return err
2105
2105
2106 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2106 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2107 similarity):
2107 similarity):
2108 """use <patcher> to apply <patchname> to the working directory.
2108 """use <patcher> to apply <patchname> to the working directory.
2109 returns whether patch was applied with fuzz factor."""
2109 returns whether patch was applied with fuzz factor."""
2110
2110
2111 fuzz = False
2111 fuzz = False
2112 args = []
2112 args = []
2113 cwd = repo.root
2113 cwd = repo.root
2114 if cwd:
2114 if cwd:
2115 args.append('-d %s' % procutil.shellquote(cwd))
2115 args.append('-d %s' % procutil.shellquote(cwd))
2116 cmd = ('%s %s -p%d < %s'
2116 cmd = ('%s %s -p%d < %s'
2117 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2117 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2118 ui.debug('Using external patch tool: %s\n' % cmd)
2118 ui.debug('Using external patch tool: %s\n' % cmd)
2119 fp = procutil.popen(cmd, 'rb')
2119 fp = procutil.popen(cmd, 'rb')
2120 try:
2120 try:
2121 for line in util.iterfile(fp):
2121 for line in util.iterfile(fp):
2122 line = line.rstrip()
2122 line = line.rstrip()
2123 ui.note(line + '\n')
2123 ui.note(line + '\n')
2124 if line.startswith('patching file '):
2124 if line.startswith('patching file '):
2125 pf = util.parsepatchoutput(line)
2125 pf = util.parsepatchoutput(line)
2126 printed_file = False
2126 printed_file = False
2127 files.add(pf)
2127 files.add(pf)
2128 elif line.find('with fuzz') >= 0:
2128 elif line.find('with fuzz') >= 0:
2129 fuzz = True
2129 fuzz = True
2130 if not printed_file:
2130 if not printed_file:
2131 ui.warn(pf + '\n')
2131 ui.warn(pf + '\n')
2132 printed_file = True
2132 printed_file = True
2133 ui.warn(line + '\n')
2133 ui.warn(line + '\n')
2134 elif line.find('saving rejects to file') >= 0:
2134 elif line.find('saving rejects to file') >= 0:
2135 ui.warn(line + '\n')
2135 ui.warn(line + '\n')
2136 elif line.find('FAILED') >= 0:
2136 elif line.find('FAILED') >= 0:
2137 if not printed_file:
2137 if not printed_file:
2138 ui.warn(pf + '\n')
2138 ui.warn(pf + '\n')
2139 printed_file = True
2139 printed_file = True
2140 ui.warn(line + '\n')
2140 ui.warn(line + '\n')
2141 finally:
2141 finally:
2142 if files:
2142 if files:
2143 scmutil.marktouched(repo, files, similarity)
2143 scmutil.marktouched(repo, files, similarity)
2144 code = fp.close()
2144 code = fp.close()
2145 if code:
2145 if code:
2146 raise PatchError(_("patch command failed: %s") %
2146 raise PatchError(_("patch command failed: %s") %
2147 procutil.explainexit(code))
2147 procutil.explainexit(code))
2148 return fuzz
2148 return fuzz
2149
2149
2150 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2150 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2151 eolmode='strict'):
2151 eolmode='strict'):
2152 if files is None:
2152 if files is None:
2153 files = set()
2153 files = set()
2154 if eolmode is None:
2154 if eolmode is None:
2155 eolmode = ui.config('patch', 'eol')
2155 eolmode = ui.config('patch', 'eol')
2156 if eolmode.lower() not in eolmodes:
2156 if eolmode.lower() not in eolmodes:
2157 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2157 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2158 eolmode = eolmode.lower()
2158 eolmode = eolmode.lower()
2159
2159
2160 store = filestore()
2160 store = filestore()
2161 try:
2161 try:
2162 fp = open(patchobj, 'rb')
2162 fp = open(patchobj, 'rb')
2163 except TypeError:
2163 except TypeError:
2164 fp = patchobj
2164 fp = patchobj
2165 try:
2165 try:
2166 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2166 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2167 eolmode=eolmode)
2167 eolmode=eolmode)
2168 finally:
2168 finally:
2169 if fp != patchobj:
2169 if fp != patchobj:
2170 fp.close()
2170 fp.close()
2171 files.update(backend.close())
2171 files.update(backend.close())
2172 store.close()
2172 store.close()
2173 if ret < 0:
2173 if ret < 0:
2174 raise PatchError(_('patch failed to apply'))
2174 raise PatchError(_('patch failed to apply'))
2175 return ret > 0
2175 return ret > 0
2176
2176
2177 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2177 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2178 eolmode='strict', similarity=0):
2178 eolmode='strict', similarity=0):
2179 """use builtin patch to apply <patchobj> to the working directory.
2179 """use builtin patch to apply <patchobj> to the working directory.
2180 returns whether patch was applied with fuzz factor."""
2180 returns whether patch was applied with fuzz factor."""
2181 backend = workingbackend(ui, repo, similarity)
2181 backend = workingbackend(ui, repo, similarity)
2182 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2182 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2183
2183
2184 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2184 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2185 eolmode='strict'):
2185 eolmode='strict'):
2186 backend = repobackend(ui, repo, ctx, store)
2186 backend = repobackend(ui, repo, ctx, store)
2187 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2187 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2188
2188
2189 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2189 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2190 similarity=0):
2190 similarity=0):
2191 """Apply <patchname> to the working directory.
2191 """Apply <patchname> to the working directory.
2192
2192
2193 'eolmode' specifies how end of lines should be handled. It can be:
2193 'eolmode' specifies how end of lines should be handled. It can be:
2194 - 'strict': inputs are read in binary mode, EOLs are preserved
2194 - 'strict': inputs are read in binary mode, EOLs are preserved
2195 - 'crlf': EOLs are ignored when patching and reset to CRLF
2195 - 'crlf': EOLs are ignored when patching and reset to CRLF
2196 - 'lf': EOLs are ignored when patching and reset to LF
2196 - 'lf': EOLs are ignored when patching and reset to LF
2197 - None: get it from user settings, default to 'strict'
2197 - None: get it from user settings, default to 'strict'
2198 'eolmode' is ignored when using an external patcher program.
2198 'eolmode' is ignored when using an external patcher program.
2199
2199
2200 Returns whether patch was applied with fuzz factor.
2200 Returns whether patch was applied with fuzz factor.
2201 """
2201 """
2202 patcher = ui.config('ui', 'patch')
2202 patcher = ui.config('ui', 'patch')
2203 if files is None:
2203 if files is None:
2204 files = set()
2204 files = set()
2205 if patcher:
2205 if patcher:
2206 return _externalpatch(ui, repo, patcher, patchname, strip,
2206 return _externalpatch(ui, repo, patcher, patchname, strip,
2207 files, similarity)
2207 files, similarity)
2208 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2208 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2209 similarity)
2209 similarity)
2210
2210
2211 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2211 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2212 backend = fsbackend(ui, repo.root)
2212 backend = fsbackend(ui, repo.root)
2213 prefix = _canonprefix(repo, prefix)
2213 prefix = _canonprefix(repo, prefix)
2214 with open(patchpath, 'rb') as fp:
2214 with open(patchpath, 'rb') as fp:
2215 changed = set()
2215 changed = set()
2216 for state, values in iterhunks(fp):
2216 for state, values in iterhunks(fp):
2217 if state == 'file':
2217 if state == 'file':
2218 afile, bfile, first_hunk, gp = values
2218 afile, bfile, first_hunk, gp = values
2219 if gp:
2219 if gp:
2220 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2220 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2221 if gp.oldpath:
2221 if gp.oldpath:
2222 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2222 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2223 prefix)[1]
2223 prefix)[1]
2224 else:
2224 else:
2225 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2225 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2226 prefix)
2226 prefix)
2227 changed.add(gp.path)
2227 changed.add(gp.path)
2228 if gp.op == 'RENAME':
2228 if gp.op == 'RENAME':
2229 changed.add(gp.oldpath)
2229 changed.add(gp.oldpath)
2230 elif state not in ('hunk', 'git'):
2230 elif state not in ('hunk', 'git'):
2231 raise error.Abort(_('unsupported parser state: %s') % state)
2231 raise error.Abort(_('unsupported parser state: %s') % state)
2232 return changed
2232 return changed
2233
2233
2234 class GitDiffRequired(Exception):
2234 class GitDiffRequired(Exception):
2235 pass
2235 pass
2236
2236
2237 diffopts = diffutil.diffallopts
2237 diffopts = diffutil.diffallopts
2238 diffallopts = diffutil.diffallopts
2238 diffallopts = diffutil.diffallopts
2239 difffeatureopts = diffutil.difffeatureopts
2239 difffeatureopts = diffutil.difffeatureopts
2240
2240
2241 def diff(repo, node1=None, node2=None, match=None, changes=None,
2241 def diff(repo, node1=None, node2=None, match=None, changes=None,
2242 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2242 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2243 hunksfilterfn=None):
2243 copysourcematch=None, hunksfilterfn=None):
2244 '''yields diff of changes to files between two nodes, or node and
2244 '''yields diff of changes to files between two nodes, or node and
2245 working directory.
2245 working directory.
2246
2246
2247 if node1 is None, use first dirstate parent instead.
2247 if node1 is None, use first dirstate parent instead.
2248 if node2 is None, compare node1 with working directory.
2248 if node2 is None, compare node1 with working directory.
2249
2249
2250 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2250 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2251 every time some change cannot be represented with the current
2251 every time some change cannot be represented with the current
2252 patch format. Return False to upgrade to git patch format, True to
2252 patch format. Return False to upgrade to git patch format, True to
2253 accept the loss or raise an exception to abort the diff. It is
2253 accept the loss or raise an exception to abort the diff. It is
2254 called with the name of current file being diffed as 'fn'. If set
2254 called with the name of current file being diffed as 'fn'. If set
2255 to None, patches will always be upgraded to git format when
2255 to None, patches will always be upgraded to git format when
2256 necessary.
2256 necessary.
2257
2257
2258 prefix is a filename prefix that is prepended to all filenames on
2258 prefix is a filename prefix that is prepended to all filenames on
2259 display (used for subrepos).
2259 display (used for subrepos).
2260
2260
2261 relroot, if not empty, must be normalized with a trailing /. Any match
2261 relroot, if not empty, must be normalized with a trailing /. Any match
2262 patterns that fall outside it will be ignored.
2262 patterns that fall outside it will be ignored.
2263
2263
2264 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2264 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2265 information.
2265 information.
2266
2266
2267 if copysourcematch is not None, then copy sources will be filtered by this
2268 matcher
2269
2267 hunksfilterfn, if not None, should be a function taking a filectx and
2270 hunksfilterfn, if not None, should be a function taking a filectx and
2268 hunks generator that may yield filtered hunks.
2271 hunks generator that may yield filtered hunks.
2269 '''
2272 '''
2270 if not node1 and not node2:
2273 if not node1 and not node2:
2271 node1 = repo.dirstate.p1()
2274 node1 = repo.dirstate.p1()
2272
2275
2273 ctx1 = repo[node1]
2276 ctx1 = repo[node1]
2274 ctx2 = repo[node2]
2277 ctx2 = repo[node2]
2275
2278
2276 for fctx1, fctx2, hdr, hunks in diffhunks(
2279 for fctx1, fctx2, hdr, hunks in diffhunks(
2277 repo, ctx1=ctx1, ctx2=ctx2,
2280 repo, ctx1=ctx1, ctx2=ctx2,
2278 match=match, changes=changes, opts=opts,
2281 match=match, changes=changes, opts=opts,
2279 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2282 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2280 ):
2283 copysourcematch=copysourcematch):
2281 if hunksfilterfn is not None:
2284 if hunksfilterfn is not None:
2282 # If the file has been removed, fctx2 is None; but this should
2285 # If the file has been removed, fctx2 is None; but this should
2283 # not occur here since we catch removed files early in
2286 # not occur here since we catch removed files early in
2284 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2287 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2285 assert fctx2 is not None, \
2288 assert fctx2 is not None, \
2286 'fctx2 unexpectly None in diff hunks filtering'
2289 'fctx2 unexpectly None in diff hunks filtering'
2287 hunks = hunksfilterfn(fctx2, hunks)
2290 hunks = hunksfilterfn(fctx2, hunks)
2288 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2291 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2289 if hdr and (text or len(hdr) > 1):
2292 if hdr and (text or len(hdr) > 1):
2290 yield '\n'.join(hdr) + '\n'
2293 yield '\n'.join(hdr) + '\n'
2291 if text:
2294 if text:
2292 yield text
2295 yield text
2293
2296
2294 def diffhunks(repo, ctx1, ctx2, match=None, changes=None,
2297 def diffhunks(repo, ctx1, ctx2, match=None, changes=None,
2295 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2298 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2299 copysourcematch=None):
2296 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2300 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2297 where `header` is a list of diff headers and `hunks` is an iterable of
2301 where `header` is a list of diff headers and `hunks` is an iterable of
2298 (`hunkrange`, `hunklines`) tuples.
2302 (`hunkrange`, `hunklines`) tuples.
2299
2303
2300 See diff() for the meaning of parameters.
2304 See diff() for the meaning of parameters.
2301 """
2305 """
2302
2306
2303 if opts is None:
2307 if opts is None:
2304 opts = mdiff.defaultopts
2308 opts = mdiff.defaultopts
2305
2309
2306 def lrugetfilectx():
2310 def lrugetfilectx():
2307 cache = {}
2311 cache = {}
2308 order = collections.deque()
2312 order = collections.deque()
2309 def getfilectx(f, ctx):
2313 def getfilectx(f, ctx):
2310 fctx = ctx.filectx(f, filelog=cache.get(f))
2314 fctx = ctx.filectx(f, filelog=cache.get(f))
2311 if f not in cache:
2315 if f not in cache:
2312 if len(cache) > 20:
2316 if len(cache) > 20:
2313 del cache[order.popleft()]
2317 del cache[order.popleft()]
2314 cache[f] = fctx.filelog()
2318 cache[f] = fctx.filelog()
2315 else:
2319 else:
2316 order.remove(f)
2320 order.remove(f)
2317 order.append(f)
2321 order.append(f)
2318 return fctx
2322 return fctx
2319 return getfilectx
2323 return getfilectx
2320 getfilectx = lrugetfilectx()
2324 getfilectx = lrugetfilectx()
2321
2325
2322 if not changes:
2326 if not changes:
2323 changes = ctx1.status(ctx2, match=match)
2327 changes = ctx1.status(ctx2, match=match)
2324 modified, added, removed = changes[:3]
2328 modified, added, removed = changes[:3]
2325
2329
2326 if not modified and not added and not removed:
2330 if not modified and not added and not removed:
2327 return []
2331 return []
2328
2332
2329 if repo.ui.debugflag:
2333 if repo.ui.debugflag:
2330 hexfunc = hex
2334 hexfunc = hex
2331 else:
2335 else:
2332 hexfunc = short
2336 hexfunc = short
2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2337 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334
2338
2335 if copy is None:
2339 if copy is None:
2336 copy = {}
2340 copy = {}
2337 if opts.git or opts.upgrade:
2341 if opts.git or opts.upgrade:
2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2342 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339
2343
2340 if relroot:
2344 if copysourcematch:
2341 # filter out copies where source side isn't inside the relative root
2345 # filter out copies where source side isn't inside the matcher
2342 # (copies.pathcopies() already filtered out the destination)
2346 # (copies.pathcopies() already filtered out the destination)
2343 copy = {dst: src for dst, src in copy.iteritems()
2347 copy = {dst: src for dst, src in copy.iteritems()
2344 if src.startswith(relroot)}
2348 if copysourcematch(src)}
2345
2349
2346 modifiedset = set(modified)
2350 modifiedset = set(modified)
2347 addedset = set(added)
2351 addedset = set(added)
2348 removedset = set(removed)
2352 removedset = set(removed)
2349 for f in modified:
2353 for f in modified:
2350 if f not in ctx1:
2354 if f not in ctx1:
2351 # Fix up added, since merged-in additions appear as
2355 # Fix up added, since merged-in additions appear as
2352 # modifications during merges
2356 # modifications during merges
2353 modifiedset.remove(f)
2357 modifiedset.remove(f)
2354 addedset.add(f)
2358 addedset.add(f)
2355 for f in removed:
2359 for f in removed:
2356 if f not in ctx1:
2360 if f not in ctx1:
2357 # Merged-in additions that are then removed are reported as removed.
2361 # Merged-in additions that are then removed are reported as removed.
2358 # They are not in ctx1, so We don't want to show them in the diff.
2362 # They are not in ctx1, so We don't want to show them in the diff.
2359 removedset.remove(f)
2363 removedset.remove(f)
2360 modified = sorted(modifiedset)
2364 modified = sorted(modifiedset)
2361 added = sorted(addedset)
2365 added = sorted(addedset)
2362 removed = sorted(removedset)
2366 removed = sorted(removedset)
2363 for dst, src in list(copy.items()):
2367 for dst, src in list(copy.items()):
2364 if src not in ctx1:
2368 if src not in ctx1:
2365 # Files merged in during a merge and then copied/renamed are
2369 # Files merged in during a merge and then copied/renamed are
2366 # reported as copies. We want to show them in the diff as additions.
2370 # reported as copies. We want to show them in the diff as additions.
2367 del copy[dst]
2371 del copy[dst]
2368
2372
2369 prefetchmatch = scmutil.matchfiles(
2373 prefetchmatch = scmutil.matchfiles(
2370 repo, list(modifiedset | addedset | removedset))
2374 repo, list(modifiedset | addedset | removedset))
2371 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2375 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2372
2376
2373 def difffn(opts, losedata):
2377 def difffn(opts, losedata):
2374 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2378 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2375 copy, getfilectx, opts, losedata, prefix, relroot)
2379 copy, getfilectx, opts, losedata, prefix, relroot)
2376 if opts.upgrade and not opts.git:
2380 if opts.upgrade and not opts.git:
2377 try:
2381 try:
2378 def losedata(fn):
2382 def losedata(fn):
2379 if not losedatafn or not losedatafn(fn=fn):
2383 if not losedatafn or not losedatafn(fn=fn):
2380 raise GitDiffRequired
2384 raise GitDiffRequired
2381 # Buffer the whole output until we are sure it can be generated
2385 # Buffer the whole output until we are sure it can be generated
2382 return list(difffn(opts.copy(git=False), losedata))
2386 return list(difffn(opts.copy(git=False), losedata))
2383 except GitDiffRequired:
2387 except GitDiffRequired:
2384 return difffn(opts.copy(git=True), None)
2388 return difffn(opts.copy(git=True), None)
2385 else:
2389 else:
2386 return difffn(opts, None)
2390 return difffn(opts, None)
2387
2391
2388 def diffsinglehunk(hunklines):
2392 def diffsinglehunk(hunklines):
2389 """yield tokens for a list of lines in a single hunk"""
2393 """yield tokens for a list of lines in a single hunk"""
2390 for line in hunklines:
2394 for line in hunklines:
2391 # chomp
2395 # chomp
2392 chompline = line.rstrip('\r\n')
2396 chompline = line.rstrip('\r\n')
2393 # highlight tabs and trailing whitespace
2397 # highlight tabs and trailing whitespace
2394 stripline = chompline.rstrip()
2398 stripline = chompline.rstrip()
2395 if line.startswith('-'):
2399 if line.startswith('-'):
2396 label = 'diff.deleted'
2400 label = 'diff.deleted'
2397 elif line.startswith('+'):
2401 elif line.startswith('+'):
2398 label = 'diff.inserted'
2402 label = 'diff.inserted'
2399 else:
2403 else:
2400 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2404 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2401 for token in tabsplitter.findall(stripline):
2405 for token in tabsplitter.findall(stripline):
2402 if token.startswith('\t'):
2406 if token.startswith('\t'):
2403 yield (token, 'diff.tab')
2407 yield (token, 'diff.tab')
2404 else:
2408 else:
2405 yield (token, label)
2409 yield (token, label)
2406
2410
2407 if chompline != stripline:
2411 if chompline != stripline:
2408 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2412 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2409 if chompline != line:
2413 if chompline != line:
2410 yield (line[len(chompline):], '')
2414 yield (line[len(chompline):], '')
2411
2415
2412 def diffsinglehunkinline(hunklines):
2416 def diffsinglehunkinline(hunklines):
2413 """yield tokens for a list of lines in a single hunk, with inline colors"""
2417 """yield tokens for a list of lines in a single hunk, with inline colors"""
2414 # prepare deleted, and inserted content
2418 # prepare deleted, and inserted content
2415 a = ''
2419 a = ''
2416 b = ''
2420 b = ''
2417 for line in hunklines:
2421 for line in hunklines:
2418 if line[0:1] == '-':
2422 if line[0:1] == '-':
2419 a += line[1:]
2423 a += line[1:]
2420 elif line[0:1] == '+':
2424 elif line[0:1] == '+':
2421 b += line[1:]
2425 b += line[1:]
2422 else:
2426 else:
2423 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2427 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2424 # fast path: if either side is empty, use diffsinglehunk
2428 # fast path: if either side is empty, use diffsinglehunk
2425 if not a or not b:
2429 if not a or not b:
2426 for t in diffsinglehunk(hunklines):
2430 for t in diffsinglehunk(hunklines):
2427 yield t
2431 yield t
2428 return
2432 return
2429 # re-split the content into words
2433 # re-split the content into words
2430 al = wordsplitter.findall(a)
2434 al = wordsplitter.findall(a)
2431 bl = wordsplitter.findall(b)
2435 bl = wordsplitter.findall(b)
2432 # re-arrange the words to lines since the diff algorithm is line-based
2436 # re-arrange the words to lines since the diff algorithm is line-based
2433 aln = [s if s == '\n' else s + '\n' for s in al]
2437 aln = [s if s == '\n' else s + '\n' for s in al]
2434 bln = [s if s == '\n' else s + '\n' for s in bl]
2438 bln = [s if s == '\n' else s + '\n' for s in bl]
2435 an = ''.join(aln)
2439 an = ''.join(aln)
2436 bn = ''.join(bln)
2440 bn = ''.join(bln)
2437 # run the diff algorithm, prepare atokens and btokens
2441 # run the diff algorithm, prepare atokens and btokens
2438 atokens = []
2442 atokens = []
2439 btokens = []
2443 btokens = []
2440 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2444 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2441 for (a1, a2, b1, b2), btype in blocks:
2445 for (a1, a2, b1, b2), btype in blocks:
2442 changed = btype == '!'
2446 changed = btype == '!'
2443 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2447 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2444 atokens.append((changed, token))
2448 atokens.append((changed, token))
2445 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2449 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2446 btokens.append((changed, token))
2450 btokens.append((changed, token))
2447
2451
2448 # yield deleted tokens, then inserted ones
2452 # yield deleted tokens, then inserted ones
2449 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2453 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2450 ('+', 'diff.inserted', btokens)]:
2454 ('+', 'diff.inserted', btokens)]:
2451 nextisnewline = True
2455 nextisnewline = True
2452 for changed, token in tokens:
2456 for changed, token in tokens:
2453 if nextisnewline:
2457 if nextisnewline:
2454 yield (prefix, label)
2458 yield (prefix, label)
2455 nextisnewline = False
2459 nextisnewline = False
2456 # special handling line end
2460 # special handling line end
2457 isendofline = token.endswith('\n')
2461 isendofline = token.endswith('\n')
2458 if isendofline:
2462 if isendofline:
2459 chomp = token[:-1] # chomp
2463 chomp = token[:-1] # chomp
2460 if chomp.endswith('\r'):
2464 if chomp.endswith('\r'):
2461 chomp = chomp[:-1]
2465 chomp = chomp[:-1]
2462 endofline = token[len(chomp):]
2466 endofline = token[len(chomp):]
2463 token = chomp.rstrip() # detect spaces at the end
2467 token = chomp.rstrip() # detect spaces at the end
2464 endspaces = chomp[len(token):]
2468 endspaces = chomp[len(token):]
2465 # scan tabs
2469 # scan tabs
2466 for maybetab in tabsplitter.findall(token):
2470 for maybetab in tabsplitter.findall(token):
2467 if b'\t' == maybetab[0:1]:
2471 if b'\t' == maybetab[0:1]:
2468 currentlabel = 'diff.tab'
2472 currentlabel = 'diff.tab'
2469 else:
2473 else:
2470 if changed:
2474 if changed:
2471 currentlabel = label + '.changed'
2475 currentlabel = label + '.changed'
2472 else:
2476 else:
2473 currentlabel = label + '.unchanged'
2477 currentlabel = label + '.unchanged'
2474 yield (maybetab, currentlabel)
2478 yield (maybetab, currentlabel)
2475 if isendofline:
2479 if isendofline:
2476 if endspaces:
2480 if endspaces:
2477 yield (endspaces, 'diff.trailingwhitespace')
2481 yield (endspaces, 'diff.trailingwhitespace')
2478 yield (endofline, '')
2482 yield (endofline, '')
2479 nextisnewline = True
2483 nextisnewline = True
2480
2484
2481 def difflabel(func, *args, **kw):
2485 def difflabel(func, *args, **kw):
2482 '''yields 2-tuples of (output, label) based on the output of func()'''
2486 '''yields 2-tuples of (output, label) based on the output of func()'''
2483 if kw.get(r'opts') and kw[r'opts'].worddiff:
2487 if kw.get(r'opts') and kw[r'opts'].worddiff:
2484 dodiffhunk = diffsinglehunkinline
2488 dodiffhunk = diffsinglehunkinline
2485 else:
2489 else:
2486 dodiffhunk = diffsinglehunk
2490 dodiffhunk = diffsinglehunk
2487 headprefixes = [('diff', 'diff.diffline'),
2491 headprefixes = [('diff', 'diff.diffline'),
2488 ('copy', 'diff.extended'),
2492 ('copy', 'diff.extended'),
2489 ('rename', 'diff.extended'),
2493 ('rename', 'diff.extended'),
2490 ('old', 'diff.extended'),
2494 ('old', 'diff.extended'),
2491 ('new', 'diff.extended'),
2495 ('new', 'diff.extended'),
2492 ('deleted', 'diff.extended'),
2496 ('deleted', 'diff.extended'),
2493 ('index', 'diff.extended'),
2497 ('index', 'diff.extended'),
2494 ('similarity', 'diff.extended'),
2498 ('similarity', 'diff.extended'),
2495 ('---', 'diff.file_a'),
2499 ('---', 'diff.file_a'),
2496 ('+++', 'diff.file_b')]
2500 ('+++', 'diff.file_b')]
2497 textprefixes = [('@', 'diff.hunk'),
2501 textprefixes = [('@', 'diff.hunk'),
2498 # - and + are handled by diffsinglehunk
2502 # - and + are handled by diffsinglehunk
2499 ]
2503 ]
2500 head = False
2504 head = False
2501
2505
2502 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2506 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2503 hunkbuffer = []
2507 hunkbuffer = []
2504 def consumehunkbuffer():
2508 def consumehunkbuffer():
2505 if hunkbuffer:
2509 if hunkbuffer:
2506 for token in dodiffhunk(hunkbuffer):
2510 for token in dodiffhunk(hunkbuffer):
2507 yield token
2511 yield token
2508 hunkbuffer[:] = []
2512 hunkbuffer[:] = []
2509
2513
2510 for chunk in func(*args, **kw):
2514 for chunk in func(*args, **kw):
2511 lines = chunk.split('\n')
2515 lines = chunk.split('\n')
2512 linecount = len(lines)
2516 linecount = len(lines)
2513 for i, line in enumerate(lines):
2517 for i, line in enumerate(lines):
2514 if head:
2518 if head:
2515 if line.startswith('@'):
2519 if line.startswith('@'):
2516 head = False
2520 head = False
2517 else:
2521 else:
2518 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2522 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2519 head = True
2523 head = True
2520 diffline = False
2524 diffline = False
2521 if not head and line and line.startswith(('+', '-')):
2525 if not head and line and line.startswith(('+', '-')):
2522 diffline = True
2526 diffline = True
2523
2527
2524 prefixes = textprefixes
2528 prefixes = textprefixes
2525 if head:
2529 if head:
2526 prefixes = headprefixes
2530 prefixes = headprefixes
2527 if diffline:
2531 if diffline:
2528 # buffered
2532 # buffered
2529 bufferedline = line
2533 bufferedline = line
2530 if i + 1 < linecount:
2534 if i + 1 < linecount:
2531 bufferedline += "\n"
2535 bufferedline += "\n"
2532 hunkbuffer.append(bufferedline)
2536 hunkbuffer.append(bufferedline)
2533 else:
2537 else:
2534 # unbuffered
2538 # unbuffered
2535 for token in consumehunkbuffer():
2539 for token in consumehunkbuffer():
2536 yield token
2540 yield token
2537 stripline = line.rstrip()
2541 stripline = line.rstrip()
2538 for prefix, label in prefixes:
2542 for prefix, label in prefixes:
2539 if stripline.startswith(prefix):
2543 if stripline.startswith(prefix):
2540 yield (stripline, label)
2544 yield (stripline, label)
2541 if line != stripline:
2545 if line != stripline:
2542 yield (line[len(stripline):],
2546 yield (line[len(stripline):],
2543 'diff.trailingwhitespace')
2547 'diff.trailingwhitespace')
2544 break
2548 break
2545 else:
2549 else:
2546 yield (line, '')
2550 yield (line, '')
2547 if i + 1 < linecount:
2551 if i + 1 < linecount:
2548 yield ('\n', '')
2552 yield ('\n', '')
2549 for token in consumehunkbuffer():
2553 for token in consumehunkbuffer():
2550 yield token
2554 yield token
2551
2555
2552 def diffui(*args, **kw):
2556 def diffui(*args, **kw):
2553 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2557 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2554 return difflabel(diff, *args, **kw)
2558 return difflabel(diff, *args, **kw)
2555
2559
2556 def _filepairs(modified, added, removed, copy, opts):
2560 def _filepairs(modified, added, removed, copy, opts):
2557 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2561 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2558 before and f2 is the the name after. For added files, f1 will be None,
2562 before and f2 is the the name after. For added files, f1 will be None,
2559 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2563 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2560 or 'rename' (the latter two only if opts.git is set).'''
2564 or 'rename' (the latter two only if opts.git is set).'''
2561 gone = set()
2565 gone = set()
2562
2566
2563 copyto = dict([(v, k) for k, v in copy.items()])
2567 copyto = dict([(v, k) for k, v in copy.items()])
2564
2568
2565 addedset, removedset = set(added), set(removed)
2569 addedset, removedset = set(added), set(removed)
2566
2570
2567 for f in sorted(modified + added + removed):
2571 for f in sorted(modified + added + removed):
2568 copyop = None
2572 copyop = None
2569 f1, f2 = f, f
2573 f1, f2 = f, f
2570 if f in addedset:
2574 if f in addedset:
2571 f1 = None
2575 f1 = None
2572 if f in copy:
2576 if f in copy:
2573 if opts.git:
2577 if opts.git:
2574 f1 = copy[f]
2578 f1 = copy[f]
2575 if f1 in removedset and f1 not in gone:
2579 if f1 in removedset and f1 not in gone:
2576 copyop = 'rename'
2580 copyop = 'rename'
2577 gone.add(f1)
2581 gone.add(f1)
2578 else:
2582 else:
2579 copyop = 'copy'
2583 copyop = 'copy'
2580 elif f in removedset:
2584 elif f in removedset:
2581 f2 = None
2585 f2 = None
2582 if opts.git:
2586 if opts.git:
2583 # have we already reported a copy above?
2587 # have we already reported a copy above?
2584 if (f in copyto and copyto[f] in addedset
2588 if (f in copyto and copyto[f] in addedset
2585 and copy[copyto[f]] == f):
2589 and copy[copyto[f]] == f):
2586 continue
2590 continue
2587 yield f1, f2, copyop
2591 yield f1, f2, copyop
2588
2592
2589 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2593 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2590 copy, getfilectx, opts, losedatafn, prefix, relroot):
2594 copy, getfilectx, opts, losedatafn, prefix, relroot):
2591 '''given input data, generate a diff and yield it in blocks
2595 '''given input data, generate a diff and yield it in blocks
2592
2596
2593 If generating a diff would lose data like flags or binary data and
2597 If generating a diff would lose data like flags or binary data and
2594 losedatafn is not None, it will be called.
2598 losedatafn is not None, it will be called.
2595
2599
2596 relroot is removed and prefix is added to every path in the diff output.
2600 relroot is removed and prefix is added to every path in the diff output.
2597
2601
2598 If relroot is not empty, this function expects every path in modified,
2602 If relroot is not empty, this function expects every path in modified,
2599 added, removed and copy to start with it.'''
2603 added, removed and copy to start with it.'''
2600
2604
2601 def gitindex(text):
2605 def gitindex(text):
2602 if not text:
2606 if not text:
2603 text = ""
2607 text = ""
2604 l = len(text)
2608 l = len(text)
2605 s = hashlib.sha1('blob %d\0' % l)
2609 s = hashlib.sha1('blob %d\0' % l)
2606 s.update(text)
2610 s.update(text)
2607 return hex(s.digest())
2611 return hex(s.digest())
2608
2612
2609 if opts.noprefix:
2613 if opts.noprefix:
2610 aprefix = bprefix = ''
2614 aprefix = bprefix = ''
2611 else:
2615 else:
2612 aprefix = 'a/'
2616 aprefix = 'a/'
2613 bprefix = 'b/'
2617 bprefix = 'b/'
2614
2618
2615 def diffline(f, revs):
2619 def diffline(f, revs):
2616 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2620 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2617 return 'diff %s %s' % (revinfo, f)
2621 return 'diff %s %s' % (revinfo, f)
2618
2622
2619 def isempty(fctx):
2623 def isempty(fctx):
2620 return fctx is None or fctx.size() == 0
2624 return fctx is None or fctx.size() == 0
2621
2625
2622 date1 = dateutil.datestr(ctx1.date())
2626 date1 = dateutil.datestr(ctx1.date())
2623 date2 = dateutil.datestr(ctx2.date())
2627 date2 = dateutil.datestr(ctx2.date())
2624
2628
2625 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2629 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2626
2630
2627 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2631 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2628 or repo.ui.configbool('devel', 'check-relroot')):
2632 or repo.ui.configbool('devel', 'check-relroot')):
2629 for f in modified + added + removed + list(copy) + list(copy.values()):
2633 for f in modified + added + removed + list(copy) + list(copy.values()):
2630 if f is not None and not f.startswith(relroot):
2634 if f is not None and not f.startswith(relroot):
2631 raise AssertionError(
2635 raise AssertionError(
2632 "file %s doesn't start with relroot %s" % (f, relroot))
2636 "file %s doesn't start with relroot %s" % (f, relroot))
2633
2637
2634 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2638 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2635 content1 = None
2639 content1 = None
2636 content2 = None
2640 content2 = None
2637 fctx1 = None
2641 fctx1 = None
2638 fctx2 = None
2642 fctx2 = None
2639 flag1 = None
2643 flag1 = None
2640 flag2 = None
2644 flag2 = None
2641 if f1:
2645 if f1:
2642 fctx1 = getfilectx(f1, ctx1)
2646 fctx1 = getfilectx(f1, ctx1)
2643 if opts.git or losedatafn:
2647 if opts.git or losedatafn:
2644 flag1 = ctx1.flags(f1)
2648 flag1 = ctx1.flags(f1)
2645 if f2:
2649 if f2:
2646 fctx2 = getfilectx(f2, ctx2)
2650 fctx2 = getfilectx(f2, ctx2)
2647 if opts.git or losedatafn:
2651 if opts.git or losedatafn:
2648 flag2 = ctx2.flags(f2)
2652 flag2 = ctx2.flags(f2)
2649 # if binary is True, output "summary" or "base85", but not "text diff"
2653 # if binary is True, output "summary" or "base85", but not "text diff"
2650 if opts.text:
2654 if opts.text:
2651 binary = False
2655 binary = False
2652 else:
2656 else:
2653 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2657 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2654
2658
2655 if losedatafn and not opts.git:
2659 if losedatafn and not opts.git:
2656 if (binary or
2660 if (binary or
2657 # copy/rename
2661 # copy/rename
2658 f2 in copy or
2662 f2 in copy or
2659 # empty file creation
2663 # empty file creation
2660 (not f1 and isempty(fctx2)) or
2664 (not f1 and isempty(fctx2)) or
2661 # empty file deletion
2665 # empty file deletion
2662 (isempty(fctx1) and not f2) or
2666 (isempty(fctx1) and not f2) or
2663 # create with flags
2667 # create with flags
2664 (not f1 and flag2) or
2668 (not f1 and flag2) or
2665 # change flags
2669 # change flags
2666 (f1 and f2 and flag1 != flag2)):
2670 (f1 and f2 and flag1 != flag2)):
2667 losedatafn(f2 or f1)
2671 losedatafn(f2 or f1)
2668
2672
2669 path1 = f1 or f2
2673 path1 = f1 or f2
2670 path2 = f2 or f1
2674 path2 = f2 or f1
2671 path1 = posixpath.join(prefix, path1[len(relroot):])
2675 path1 = posixpath.join(prefix, path1[len(relroot):])
2672 path2 = posixpath.join(prefix, path2[len(relroot):])
2676 path2 = posixpath.join(prefix, path2[len(relroot):])
2673 header = []
2677 header = []
2674 if opts.git:
2678 if opts.git:
2675 header.append('diff --git %s%s %s%s' %
2679 header.append('diff --git %s%s %s%s' %
2676 (aprefix, path1, bprefix, path2))
2680 (aprefix, path1, bprefix, path2))
2677 if not f1: # added
2681 if not f1: # added
2678 header.append('new file mode %s' % gitmode[flag2])
2682 header.append('new file mode %s' % gitmode[flag2])
2679 elif not f2: # removed
2683 elif not f2: # removed
2680 header.append('deleted file mode %s' % gitmode[flag1])
2684 header.append('deleted file mode %s' % gitmode[flag1])
2681 else: # modified/copied/renamed
2685 else: # modified/copied/renamed
2682 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2686 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2683 if mode1 != mode2:
2687 if mode1 != mode2:
2684 header.append('old mode %s' % mode1)
2688 header.append('old mode %s' % mode1)
2685 header.append('new mode %s' % mode2)
2689 header.append('new mode %s' % mode2)
2686 if copyop is not None:
2690 if copyop is not None:
2687 if opts.showsimilarity:
2691 if opts.showsimilarity:
2688 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2692 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2689 header.append('similarity index %d%%' % sim)
2693 header.append('similarity index %d%%' % sim)
2690 header.append('%s from %s' % (copyop, path1))
2694 header.append('%s from %s' % (copyop, path1))
2691 header.append('%s to %s' % (copyop, path2))
2695 header.append('%s to %s' % (copyop, path2))
2692 elif revs and not repo.ui.quiet:
2696 elif revs and not repo.ui.quiet:
2693 header.append(diffline(path1, revs))
2697 header.append(diffline(path1, revs))
2694
2698
2695 # fctx.is | diffopts | what to | is fctx.data()
2699 # fctx.is | diffopts | what to | is fctx.data()
2696 # binary() | text nobinary git index | output? | outputted?
2700 # binary() | text nobinary git index | output? | outputted?
2697 # ------------------------------------|----------------------------
2701 # ------------------------------------|----------------------------
2698 # yes | no no no * | summary | no
2702 # yes | no no no * | summary | no
2699 # yes | no no yes * | base85 | yes
2703 # yes | no no yes * | base85 | yes
2700 # yes | no yes no * | summary | no
2704 # yes | no yes no * | summary | no
2701 # yes | no yes yes 0 | summary | no
2705 # yes | no yes yes 0 | summary | no
2702 # yes | no yes yes >0 | summary | semi [1]
2706 # yes | no yes yes >0 | summary | semi [1]
2703 # yes | yes * * * | text diff | yes
2707 # yes | yes * * * | text diff | yes
2704 # no | * * * * | text diff | yes
2708 # no | * * * * | text diff | yes
2705 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2709 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2706 if binary and (not opts.git or (opts.git and opts.nobinary and not
2710 if binary and (not opts.git or (opts.git and opts.nobinary and not
2707 opts.index)):
2711 opts.index)):
2708 # fast path: no binary content will be displayed, content1 and
2712 # fast path: no binary content will be displayed, content1 and
2709 # content2 are only used for equivalent test. cmp() could have a
2713 # content2 are only used for equivalent test. cmp() could have a
2710 # fast path.
2714 # fast path.
2711 if fctx1 is not None:
2715 if fctx1 is not None:
2712 content1 = b'\0'
2716 content1 = b'\0'
2713 if fctx2 is not None:
2717 if fctx2 is not None:
2714 if fctx1 is not None and not fctx1.cmp(fctx2):
2718 if fctx1 is not None and not fctx1.cmp(fctx2):
2715 content2 = b'\0' # not different
2719 content2 = b'\0' # not different
2716 else:
2720 else:
2717 content2 = b'\0\0'
2721 content2 = b'\0\0'
2718 else:
2722 else:
2719 # normal path: load contents
2723 # normal path: load contents
2720 if fctx1 is not None:
2724 if fctx1 is not None:
2721 content1 = fctx1.data()
2725 content1 = fctx1.data()
2722 if fctx2 is not None:
2726 if fctx2 is not None:
2723 content2 = fctx2.data()
2727 content2 = fctx2.data()
2724
2728
2725 if binary and opts.git and not opts.nobinary:
2729 if binary and opts.git and not opts.nobinary:
2726 text = mdiff.b85diff(content1, content2)
2730 text = mdiff.b85diff(content1, content2)
2727 if text:
2731 if text:
2728 header.append('index %s..%s' %
2732 header.append('index %s..%s' %
2729 (gitindex(content1), gitindex(content2)))
2733 (gitindex(content1), gitindex(content2)))
2730 hunks = (None, [text]),
2734 hunks = (None, [text]),
2731 else:
2735 else:
2732 if opts.git and opts.index > 0:
2736 if opts.git and opts.index > 0:
2733 flag = flag1
2737 flag = flag1
2734 if flag is None:
2738 if flag is None:
2735 flag = flag2
2739 flag = flag2
2736 header.append('index %s..%s %s' %
2740 header.append('index %s..%s %s' %
2737 (gitindex(content1)[0:opts.index],
2741 (gitindex(content1)[0:opts.index],
2738 gitindex(content2)[0:opts.index],
2742 gitindex(content2)[0:opts.index],
2739 gitmode[flag]))
2743 gitmode[flag]))
2740
2744
2741 uheaders, hunks = mdiff.unidiff(content1, date1,
2745 uheaders, hunks = mdiff.unidiff(content1, date1,
2742 content2, date2,
2746 content2, date2,
2743 path1, path2,
2747 path1, path2,
2744 binary=binary, opts=opts)
2748 binary=binary, opts=opts)
2745 header.extend(uheaders)
2749 header.extend(uheaders)
2746 yield fctx1, fctx2, header, hunks
2750 yield fctx1, fctx2, header, hunks
2747
2751
2748 def diffstatsum(stats):
2752 def diffstatsum(stats):
2749 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2753 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2750 for f, a, r, b in stats:
2754 for f, a, r, b in stats:
2751 maxfile = max(maxfile, encoding.colwidth(f))
2755 maxfile = max(maxfile, encoding.colwidth(f))
2752 maxtotal = max(maxtotal, a + r)
2756 maxtotal = max(maxtotal, a + r)
2753 addtotal += a
2757 addtotal += a
2754 removetotal += r
2758 removetotal += r
2755 binary = binary or b
2759 binary = binary or b
2756
2760
2757 return maxfile, maxtotal, addtotal, removetotal, binary
2761 return maxfile, maxtotal, addtotal, removetotal, binary
2758
2762
2759 def diffstatdata(lines):
2763 def diffstatdata(lines):
2760 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2764 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2761
2765
2762 results = []
2766 results = []
2763 filename, adds, removes, isbinary = None, 0, 0, False
2767 filename, adds, removes, isbinary = None, 0, 0, False
2764
2768
2765 def addresult():
2769 def addresult():
2766 if filename:
2770 if filename:
2767 results.append((filename, adds, removes, isbinary))
2771 results.append((filename, adds, removes, isbinary))
2768
2772
2769 # inheader is used to track if a line is in the
2773 # inheader is used to track if a line is in the
2770 # header portion of the diff. This helps properly account
2774 # header portion of the diff. This helps properly account
2771 # for lines that start with '--' or '++'
2775 # for lines that start with '--' or '++'
2772 inheader = False
2776 inheader = False
2773
2777
2774 for line in lines:
2778 for line in lines:
2775 if line.startswith('diff'):
2779 if line.startswith('diff'):
2776 addresult()
2780 addresult()
2777 # starting a new file diff
2781 # starting a new file diff
2778 # set numbers to 0 and reset inheader
2782 # set numbers to 0 and reset inheader
2779 inheader = True
2783 inheader = True
2780 adds, removes, isbinary = 0, 0, False
2784 adds, removes, isbinary = 0, 0, False
2781 if line.startswith('diff --git a/'):
2785 if line.startswith('diff --git a/'):
2782 filename = gitre.search(line).group(2)
2786 filename = gitre.search(line).group(2)
2783 elif line.startswith('diff -r'):
2787 elif line.startswith('diff -r'):
2784 # format: "diff -r ... -r ... filename"
2788 # format: "diff -r ... -r ... filename"
2785 filename = diffre.search(line).group(1)
2789 filename = diffre.search(line).group(1)
2786 elif line.startswith('@@'):
2790 elif line.startswith('@@'):
2787 inheader = False
2791 inheader = False
2788 elif line.startswith('+') and not inheader:
2792 elif line.startswith('+') and not inheader:
2789 adds += 1
2793 adds += 1
2790 elif line.startswith('-') and not inheader:
2794 elif line.startswith('-') and not inheader:
2791 removes += 1
2795 removes += 1
2792 elif (line.startswith('GIT binary patch') or
2796 elif (line.startswith('GIT binary patch') or
2793 line.startswith('Binary file')):
2797 line.startswith('Binary file')):
2794 isbinary = True
2798 isbinary = True
2795 elif line.startswith('rename from'):
2799 elif line.startswith('rename from'):
2796 filename = line[12:]
2800 filename = line[12:]
2797 elif line.startswith('rename to'):
2801 elif line.startswith('rename to'):
2798 filename += ' => %s' % line[10:]
2802 filename += ' => %s' % line[10:]
2799 addresult()
2803 addresult()
2800 return results
2804 return results
2801
2805
2802 def diffstat(lines, width=80):
2806 def diffstat(lines, width=80):
2803 output = []
2807 output = []
2804 stats = diffstatdata(lines)
2808 stats = diffstatdata(lines)
2805 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2809 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2806
2810
2807 countwidth = len(str(maxtotal))
2811 countwidth = len(str(maxtotal))
2808 if hasbinary and countwidth < 3:
2812 if hasbinary and countwidth < 3:
2809 countwidth = 3
2813 countwidth = 3
2810 graphwidth = width - countwidth - maxname - 6
2814 graphwidth = width - countwidth - maxname - 6
2811 if graphwidth < 10:
2815 if graphwidth < 10:
2812 graphwidth = 10
2816 graphwidth = 10
2813
2817
2814 def scale(i):
2818 def scale(i):
2815 if maxtotal <= graphwidth:
2819 if maxtotal <= graphwidth:
2816 return i
2820 return i
2817 # If diffstat runs out of room it doesn't print anything,
2821 # If diffstat runs out of room it doesn't print anything,
2818 # which isn't very useful, so always print at least one + or -
2822 # which isn't very useful, so always print at least one + or -
2819 # if there were at least some changes.
2823 # if there were at least some changes.
2820 return max(i * graphwidth // maxtotal, int(bool(i)))
2824 return max(i * graphwidth // maxtotal, int(bool(i)))
2821
2825
2822 for filename, adds, removes, isbinary in stats:
2826 for filename, adds, removes, isbinary in stats:
2823 if isbinary:
2827 if isbinary:
2824 count = 'Bin'
2828 count = 'Bin'
2825 else:
2829 else:
2826 count = '%d' % (adds + removes)
2830 count = '%d' % (adds + removes)
2827 pluses = '+' * scale(adds)
2831 pluses = '+' * scale(adds)
2828 minuses = '-' * scale(removes)
2832 minuses = '-' * scale(removes)
2829 output.append(' %s%s | %*s %s%s\n' %
2833 output.append(' %s%s | %*s %s%s\n' %
2830 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2834 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2831 countwidth, count, pluses, minuses))
2835 countwidth, count, pluses, minuses))
2832
2836
2833 if stats:
2837 if stats:
2834 output.append(_(' %d files changed, %d insertions(+), '
2838 output.append(_(' %d files changed, %d insertions(+), '
2835 '%d deletions(-)\n')
2839 '%d deletions(-)\n')
2836 % (len(stats), totaladds, totalremoves))
2840 % (len(stats), totaladds, totalremoves))
2837
2841
2838 return ''.join(output)
2842 return ''.join(output)
2839
2843
2840 def diffstatui(*args, **kw):
2844 def diffstatui(*args, **kw):
2841 '''like diffstat(), but yields 2-tuples of (output, label) for
2845 '''like diffstat(), but yields 2-tuples of (output, label) for
2842 ui.write()
2846 ui.write()
2843 '''
2847 '''
2844
2848
2845 for line in diffstat(*args, **kw).splitlines():
2849 for line in diffstat(*args, **kw).splitlines():
2846 if line and line[-1] in '+-':
2850 if line and line[-1] in '+-':
2847 name, graph = line.rsplit(' ', 1)
2851 name, graph = line.rsplit(' ', 1)
2848 yield (name + ' ', '')
2852 yield (name + ' ', '')
2849 m = re.search(br'\++', graph)
2853 m = re.search(br'\++', graph)
2850 if m:
2854 if m:
2851 yield (m.group(0), 'diffstat.inserted')
2855 yield (m.group(0), 'diffstat.inserted')
2852 m = re.search(br'-+', graph)
2856 m = re.search(br'-+', graph)
2853 if m:
2857 if m:
2854 yield (m.group(0), 'diffstat.deleted')
2858 yield (m.group(0), 'diffstat.deleted')
2855 else:
2859 else:
2856 yield (line, '')
2860 yield (line, '')
2857 yield ('\n', '')
2861 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now