##// END OF EJS Templates
status: handle more node indicators in buildstatus...
Durham Goode -
r31258:c414e339 default
parent child Browse files
Show More
@@ -1,2116 +1,2117
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 newnodeid,
21 newnodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 )
27 )
27 from . import (
28 from . import (
28 encoding,
29 encoding,
29 error,
30 error,
30 fileset,
31 fileset,
31 match as matchmod,
32 match as matchmod,
32 mdiff,
33 mdiff,
33 obsolete as obsmod,
34 obsolete as obsmod,
34 patch,
35 patch,
35 phases,
36 phases,
36 repoview,
37 repoview,
37 revlog,
38 revlog,
38 scmutil,
39 scmutil,
39 subrepo,
40 subrepo,
40 util,
41 util,
41 )
42 )
42
43
43 propertycache = util.propertycache
44 propertycache = util.propertycache
44
45
45 nonascii = re.compile(r'[^\x21-\x7f]').search
46 nonascii = re.compile(r'[^\x21-\x7f]').search
46
47
47 class basectx(object):
48 class basectx(object):
48 """A basectx object represents the common logic for its children:
49 """A basectx object represents the common logic for its children:
49 changectx: read-only context that is already present in the repo,
50 changectx: read-only context that is already present in the repo,
50 workingctx: a context that represents the working directory and can
51 workingctx: a context that represents the working directory and can
51 be committed,
52 be committed,
52 memctx: a context that represents changes in-memory and can also
53 memctx: a context that represents changes in-memory and can also
53 be committed."""
54 be committed."""
54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 if isinstance(changeid, basectx):
56 if isinstance(changeid, basectx):
56 return changeid
57 return changeid
57
58
58 o = super(basectx, cls).__new__(cls)
59 o = super(basectx, cls).__new__(cls)
59
60
60 o._repo = repo
61 o._repo = repo
61 o._rev = nullrev
62 o._rev = nullrev
62 o._node = nullid
63 o._node = nullid
63
64
64 return o
65 return o
65
66
66 def __str__(self):
67 def __str__(self):
67 return short(self.node())
68 return short(self.node())
68
69
69 def __int__(self):
70 def __int__(self):
70 return self.rev()
71 return self.rev()
71
72
72 def __repr__(self):
73 def __repr__(self):
73 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
74
75
75 def __eq__(self, other):
76 def __eq__(self, other):
76 try:
77 try:
77 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
79 except AttributeError:
79 return False
80 return False
80
81
81 def __ne__(self, other):
82 def __ne__(self, other):
82 return not (self == other)
83 return not (self == other)
83
84
84 def __contains__(self, key):
85 def __contains__(self, key):
85 return key in self._manifest
86 return key in self._manifest
86
87
87 def __getitem__(self, key):
88 def __getitem__(self, key):
88 return self.filectx(key)
89 return self.filectx(key)
89
90
90 def __iter__(self):
91 def __iter__(self):
91 return iter(self._manifest)
92 return iter(self._manifest)
92
93
93 def _manifestmatches(self, match, s):
94 def _manifestmatches(self, match, s):
94 """generate a new manifest filtered by the match argument
95 """generate a new manifest filtered by the match argument
95
96
96 This method is for internal use only and mainly exists to provide an
97 This method is for internal use only and mainly exists to provide an
97 object oriented way for other contexts to customize the manifest
98 object oriented way for other contexts to customize the manifest
98 generation.
99 generation.
99 """
100 """
100 return self.manifest().matches(match)
101 return self.manifest().matches(match)
101
102
102 def _matchstatus(self, other, match):
103 def _matchstatus(self, other, match):
103 """return match.always if match is none
104 """return match.always if match is none
104
105
105 This internal method provides a way for child objects to override the
106 This internal method provides a way for child objects to override the
106 match operator.
107 match operator.
107 """
108 """
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109
110
110 def _buildstatus(self, other, s, match, listignored, listclean,
111 def _buildstatus(self, other, s, match, listignored, listclean,
111 listunknown):
112 listunknown):
112 """build a status with respect to another context"""
113 """build a status with respect to another context"""
113 # Load earliest manifest first for caching reasons. More specifically,
114 # Load earliest manifest first for caching reasons. More specifically,
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta application.
119 # delta application.
119 if self.rev() is not None and self.rev() < other.rev():
120 if self.rev() is not None and self.rev() < other.rev():
120 self.manifest()
121 self.manifest()
121 mf1 = other._manifestmatches(match, s)
122 mf1 = other._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
123 mf2 = self._manifestmatches(match, s)
123
124
124 modified, added = [], []
125 modified, added = [], []
125 removed = []
126 removed = []
126 clean = []
127 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
129 deletedset = set(deleted)
129 d = mf1.diff(mf2, clean=listclean)
130 d = mf1.diff(mf2, clean=listclean)
130 for fn, value in d.iteritems():
131 for fn, value in d.iteritems():
131 if fn in deletedset:
132 if fn in deletedset:
132 continue
133 continue
133 if value is None:
134 if value is None:
134 clean.append(fn)
135 clean.append(fn)
135 continue
136 continue
136 (node1, flag1), (node2, flag2) = value
137 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
138 if node1 is None:
138 added.append(fn)
139 added.append(fn)
139 elif node2 is None:
140 elif node2 is None:
140 removed.append(fn)
141 removed.append(fn)
141 elif flag1 != flag2:
142 elif flag1 != flag2:
142 modified.append(fn)
143 modified.append(fn)
143 elif node2 != newnodeid:
144 elif node2 not in wdirnodes:
144 # When comparing files between two commits, we save time by
145 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
146 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
147 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
148 # to a file as a modification.
148 modified.append(fn)
149 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
150 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
151 modified.append(fn)
151 else:
152 else:
152 clean.append(fn)
153 clean.append(fn)
153
154
154 if removed:
155 if removed:
155 # need to filter files if they are already reported as removed
156 # need to filter files if they are already reported as removed
156 unknown = [fn for fn in unknown if fn not in mf1]
157 unknown = [fn for fn in unknown if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
158 ignored = [fn for fn in ignored if fn not in mf1]
158 # if they're deleted, don't report them as removed
159 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
160 removed = [fn for fn in removed if fn not in deletedset]
160
161
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
163 ignored, clean)
163
164
164 @propertycache
165 @propertycache
165 def substate(self):
166 def substate(self):
166 return subrepo.state(self, self._repo.ui)
167 return subrepo.state(self, self._repo.ui)
167
168
168 def subrev(self, subpath):
169 def subrev(self, subpath):
169 return self.substate[subpath][1]
170 return self.substate[subpath][1]
170
171
171 def rev(self):
172 def rev(self):
172 return self._rev
173 return self._rev
173 def node(self):
174 def node(self):
174 return self._node
175 return self._node
175 def hex(self):
176 def hex(self):
176 return hex(self.node())
177 return hex(self.node())
177 def manifest(self):
178 def manifest(self):
178 return self._manifest
179 return self._manifest
179 def manifestctx(self):
180 def manifestctx(self):
180 return self._manifestctx
181 return self._manifestctx
181 def repo(self):
182 def repo(self):
182 return self._repo
183 return self._repo
183 def phasestr(self):
184 def phasestr(self):
184 return phases.phasenames[self.phase()]
185 return phases.phasenames[self.phase()]
185 def mutable(self):
186 def mutable(self):
186 return self.phase() > phases.public
187 return self.phase() > phases.public
187
188
188 def getfileset(self, expr):
189 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
190 return fileset.getfileset(self, expr)
190
191
191 def obsolete(self):
192 def obsolete(self):
192 """True if the changeset is obsolete"""
193 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
195
195 def extinct(self):
196 def extinct(self):
196 """True if the changeset is extinct"""
197 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
199
199 def unstable(self):
200 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
201 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
203
203 def bumped(self):
204 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
205 """True if the changeset try to be a successor of a public changeset
205
206
206 Only non-public and non-obsolete changesets may be bumped.
207 Only non-public and non-obsolete changesets may be bumped.
207 """
208 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
210
210 def divergent(self):
211 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
212 """Is a successors of a changeset with multiple possible successors set
212
213
213 Only non-public and non-obsolete changesets may be divergent.
214 Only non-public and non-obsolete changesets may be divergent.
214 """
215 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
217
217 def troubled(self):
218 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
219 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
220 return self.unstable() or self.bumped() or self.divergent()
220
221
221 def troubles(self):
222 def troubles(self):
222 """return the list of troubles affecting this changesets.
223 """return the list of troubles affecting this changesets.
223
224
224 Troubles are returned as strings. possible values are:
225 Troubles are returned as strings. possible values are:
225 - unstable,
226 - unstable,
226 - bumped,
227 - bumped,
227 - divergent.
228 - divergent.
228 """
229 """
229 troubles = []
230 troubles = []
230 if self.unstable():
231 if self.unstable():
231 troubles.append('unstable')
232 troubles.append('unstable')
232 if self.bumped():
233 if self.bumped():
233 troubles.append('bumped')
234 troubles.append('bumped')
234 if self.divergent():
235 if self.divergent():
235 troubles.append('divergent')
236 troubles.append('divergent')
236 return troubles
237 return troubles
237
238
238 def parents(self):
239 def parents(self):
239 """return contexts for each parent changeset"""
240 """return contexts for each parent changeset"""
240 return self._parents
241 return self._parents
241
242
242 def p1(self):
243 def p1(self):
243 return self._parents[0]
244 return self._parents[0]
244
245
245 def p2(self):
246 def p2(self):
246 parents = self._parents
247 parents = self._parents
247 if len(parents) == 2:
248 if len(parents) == 2:
248 return parents[1]
249 return parents[1]
249 return changectx(self._repo, nullrev)
250 return changectx(self._repo, nullrev)
250
251
251 def _fileinfo(self, path):
252 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
253 if '_manifest' in self.__dict__:
253 try:
254 try:
254 return self._manifest[path], self._manifest.flags(path)
255 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
256 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
257 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
258 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
260 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
261 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
262 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
263 mfl = self._repo.manifestlog
263 try:
264 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
265 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
266 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
267 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
268 _('not found in manifest'))
268
269
269 return node, flag
270 return node, flag
270
271
271 def filenode(self, path):
272 def filenode(self, path):
272 return self._fileinfo(path)[0]
273 return self._fileinfo(path)[0]
273
274
274 def flags(self, path):
275 def flags(self, path):
275 try:
276 try:
276 return self._fileinfo(path)[1]
277 return self._fileinfo(path)[1]
277 except error.LookupError:
278 except error.LookupError:
278 return ''
279 return ''
279
280
280 def sub(self, path, allowcreate=True):
281 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
284
284 def nullsub(self, path, pctx):
285 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
286 return subrepo.nullsubrepo(self, path, pctx)
286
287
287 def workingsub(self, path):
288 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
290 context.
290 '''
291 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
292 return subrepo.subrepo(self, path, allowwdir=True)
292
293
293 def match(self, pats=[], include=None, exclude=None, default='glob',
294 def match(self, pats=[], include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
295 listsubrepos=False, badfn=None):
295 r = self._repo
296 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
297 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
298 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
299 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
300 listsubrepos=listsubrepos, badfn=badfn)
300
301
301 def diff(self, ctx2=None, match=None, **opts):
302 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
303 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
304 if ctx2 is None:
304 ctx2 = self.p1()
305 ctx2 = self.p1()
305 if ctx2 is not None:
306 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
307 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, opts)
308 diffopts = patch.diffopts(self._repo.ui, opts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
310
310 def dirs(self):
311 def dirs(self):
311 return self._manifest.dirs()
312 return self._manifest.dirs()
312
313
313 def hasdir(self, dir):
314 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
315 return self._manifest.hasdir(dir)
315
316
316 def dirty(self, missing=False, merge=True, branch=True):
317 def dirty(self, missing=False, merge=True, branch=True):
317 return False
318 return False
318
319
319 def status(self, other=None, match=None, listignored=False,
320 def status(self, other=None, match=None, listignored=False,
320 listclean=False, listunknown=False, listsubrepos=False):
321 listclean=False, listunknown=False, listsubrepos=False):
321 """return status of files between two nodes or node and working
322 """return status of files between two nodes or node and working
322 directory.
323 directory.
323
324
324 If other is None, compare this node with working directory.
325 If other is None, compare this node with working directory.
325
326
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 """
328 """
328
329
329 ctx1 = self
330 ctx1 = self
330 ctx2 = self._repo[other]
331 ctx2 = self._repo[other]
331
332
332 # This next code block is, admittedly, fragile logic that tests for
333 # This next code block is, admittedly, fragile logic that tests for
333 # reversing the contexts and wouldn't need to exist if it weren't for
334 # reversing the contexts and wouldn't need to exist if it weren't for
334 # the fast (and common) code path of comparing the working directory
335 # the fast (and common) code path of comparing the working directory
335 # with its first parent.
336 # with its first parent.
336 #
337 #
337 # What we're aiming for here is the ability to call:
338 # What we're aiming for here is the ability to call:
338 #
339 #
339 # workingctx.status(parentctx)
340 # workingctx.status(parentctx)
340 #
341 #
341 # If we always built the manifest for each context and compared those,
342 # If we always built the manifest for each context and compared those,
342 # then we'd be done. But the special case of the above call means we
343 # then we'd be done. But the special case of the above call means we
343 # just copy the manifest of the parent.
344 # just copy the manifest of the parent.
344 reversed = False
345 reversed = False
345 if (not isinstance(ctx1, changectx)
346 if (not isinstance(ctx1, changectx)
346 and isinstance(ctx2, changectx)):
347 and isinstance(ctx2, changectx)):
347 reversed = True
348 reversed = True
348 ctx1, ctx2 = ctx2, ctx1
349 ctx1, ctx2 = ctx2, ctx1
349
350
350 match = ctx2._matchstatus(ctx1, match)
351 match = ctx2._matchstatus(ctx1, match)
351 r = scmutil.status([], [], [], [], [], [], [])
352 r = scmutil.status([], [], [], [], [], [], [])
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 listunknown)
354 listunknown)
354
355
355 if reversed:
356 if reversed:
356 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # these make no sense to reverse.
358 # these make no sense to reverse.
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r.clean)
360 r.clean)
360
361
361 if listsubrepos:
362 if listsubrepos:
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 try:
364 try:
364 rev2 = ctx2.subrev(subpath)
365 rev2 = ctx2.subrev(subpath)
365 except KeyError:
366 except KeyError:
366 # A subrepo that existed in node1 was deleted between
367 # A subrepo that existed in node1 was deleted between
367 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # won't contain that subpath. The best we can do ignore it.
369 # won't contain that subpath. The best we can do ignore it.
369 rev2 = None
370 rev2 = None
370 submatch = matchmod.subdirmatcher(subpath, match)
371 submatch = matchmod.subdirmatcher(subpath, match)
371 s = sub.status(rev2, match=submatch, ignored=listignored,
372 s = sub.status(rev2, match=submatch, ignored=listignored,
372 clean=listclean, unknown=listunknown,
373 clean=listclean, unknown=listunknown,
373 listsubrepos=True)
374 listsubrepos=True)
374 for rfiles, sfiles in zip(r, s):
375 for rfiles, sfiles in zip(r, s):
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376
377
377 for l in r:
378 for l in r:
378 l.sort()
379 l.sort()
379
380
380 return r
381 return r
381
382
382
383
383 def makememctx(repo, parents, text, user, date, branch, files, store,
384 def makememctx(repo, parents, text, user, date, branch, files, store,
384 editor=None, extra=None):
385 editor=None, extra=None):
385 def getfilectx(repo, memctx, path):
386 def getfilectx(repo, memctx, path):
386 data, mode, copied = store.getfile(path)
387 data, mode, copied = store.getfile(path)
387 if data is None:
388 if data is None:
388 return None
389 return None
389 islink, isexec = mode
390 islink, isexec = mode
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 copied=copied, memctx=memctx)
392 copied=copied, memctx=memctx)
392 if extra is None:
393 if extra is None:
393 extra = {}
394 extra = {}
394 if branch:
395 if branch:
395 extra['branch'] = encoding.fromlocal(branch)
396 extra['branch'] = encoding.fromlocal(branch)
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 date, extra, editor)
398 date, extra, editor)
398 return ctx
399 return ctx
399
400
400 class changectx(basectx):
401 class changectx(basectx):
401 """A changecontext object makes access to data related to a particular
402 """A changecontext object makes access to data related to a particular
402 changeset convenient. It represents a read-only context already present in
403 changeset convenient. It represents a read-only context already present in
403 the repo."""
404 the repo."""
404 def __init__(self, repo, changeid=''):
405 def __init__(self, repo, changeid=''):
405 """changeid is a revision number, node, or tag"""
406 """changeid is a revision number, node, or tag"""
406
407
407 # since basectx.__new__ already took care of copying the object, we
408 # since basectx.__new__ already took care of copying the object, we
408 # don't need to do anything in __init__, so we just exit here
409 # don't need to do anything in __init__, so we just exit here
409 if isinstance(changeid, basectx):
410 if isinstance(changeid, basectx):
410 return
411 return
411
412
412 if changeid == '':
413 if changeid == '':
413 changeid = '.'
414 changeid = '.'
414 self._repo = repo
415 self._repo = repo
415
416
416 try:
417 try:
417 if isinstance(changeid, int):
418 if isinstance(changeid, int):
418 self._node = repo.changelog.node(changeid)
419 self._node = repo.changelog.node(changeid)
419 self._rev = changeid
420 self._rev = changeid
420 return
421 return
421 if isinstance(changeid, long):
422 if isinstance(changeid, long):
422 changeid = str(changeid)
423 changeid = str(changeid)
423 if changeid == 'null':
424 if changeid == 'null':
424 self._node = nullid
425 self._node = nullid
425 self._rev = nullrev
426 self._rev = nullrev
426 return
427 return
427 if changeid == 'tip':
428 if changeid == 'tip':
428 self._node = repo.changelog.tip()
429 self._node = repo.changelog.tip()
429 self._rev = repo.changelog.rev(self._node)
430 self._rev = repo.changelog.rev(self._node)
430 return
431 return
431 if changeid == '.' or changeid == repo.dirstate.p1():
432 if changeid == '.' or changeid == repo.dirstate.p1():
432 # this is a hack to delay/avoid loading obsmarkers
433 # this is a hack to delay/avoid loading obsmarkers
433 # when we know that '.' won't be hidden
434 # when we know that '.' won't be hidden
434 self._node = repo.dirstate.p1()
435 self._node = repo.dirstate.p1()
435 self._rev = repo.unfiltered().changelog.rev(self._node)
436 self._rev = repo.unfiltered().changelog.rev(self._node)
436 return
437 return
437 if len(changeid) == 20:
438 if len(changeid) == 20:
438 try:
439 try:
439 self._node = changeid
440 self._node = changeid
440 self._rev = repo.changelog.rev(changeid)
441 self._rev = repo.changelog.rev(changeid)
441 return
442 return
442 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
443 raise
444 raise
444 except LookupError:
445 except LookupError:
445 pass
446 pass
446
447
447 try:
448 try:
448 r = int(changeid)
449 r = int(changeid)
449 if str(r) != changeid:
450 if str(r) != changeid:
450 raise ValueError
451 raise ValueError
451 l = len(repo.changelog)
452 l = len(repo.changelog)
452 if r < 0:
453 if r < 0:
453 r += l
454 r += l
454 if r < 0 or r >= l:
455 if r < 0 or r >= l:
455 raise ValueError
456 raise ValueError
456 self._rev = r
457 self._rev = r
457 self._node = repo.changelog.node(r)
458 self._node = repo.changelog.node(r)
458 return
459 return
459 except error.FilteredIndexError:
460 except error.FilteredIndexError:
460 raise
461 raise
461 except (ValueError, OverflowError, IndexError):
462 except (ValueError, OverflowError, IndexError):
462 pass
463 pass
463
464
464 if len(changeid) == 40:
465 if len(changeid) == 40:
465 try:
466 try:
466 self._node = bin(changeid)
467 self._node = bin(changeid)
467 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
468 return
469 return
469 except error.FilteredLookupError:
470 except error.FilteredLookupError:
470 raise
471 raise
471 except (TypeError, LookupError):
472 except (TypeError, LookupError):
472 pass
473 pass
473
474
474 # lookup bookmarks through the name interface
475 # lookup bookmarks through the name interface
475 try:
476 try:
476 self._node = repo.names.singlenode(repo, changeid)
477 self._node = repo.names.singlenode(repo, changeid)
477 self._rev = repo.changelog.rev(self._node)
478 self._rev = repo.changelog.rev(self._node)
478 return
479 return
479 except KeyError:
480 except KeyError:
480 pass
481 pass
481 except error.FilteredRepoLookupError:
482 except error.FilteredRepoLookupError:
482 raise
483 raise
483 except error.RepoLookupError:
484 except error.RepoLookupError:
484 pass
485 pass
485
486
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 if self._node is not None:
488 if self._node is not None:
488 self._rev = repo.changelog.rev(self._node)
489 self._rev = repo.changelog.rev(self._node)
489 return
490 return
490
491
491 # lookup failed
492 # lookup failed
492 # check if it might have come from damaged dirstate
493 # check if it might have come from damaged dirstate
493 #
494 #
494 # XXX we could avoid the unfiltered if we had a recognizable
495 # XXX we could avoid the unfiltered if we had a recognizable
495 # exception for filtered changeset access
496 # exception for filtered changeset access
496 if changeid in repo.unfiltered().dirstate.parents():
497 if changeid in repo.unfiltered().dirstate.parents():
497 msg = _("working directory has unknown parent '%s'!")
498 msg = _("working directory has unknown parent '%s'!")
498 raise error.Abort(msg % short(changeid))
499 raise error.Abort(msg % short(changeid))
499 try:
500 try:
500 if len(changeid) == 20 and nonascii(changeid):
501 if len(changeid) == 20 and nonascii(changeid):
501 changeid = hex(changeid)
502 changeid = hex(changeid)
502 except TypeError:
503 except TypeError:
503 pass
504 pass
504 except (error.FilteredIndexError, error.FilteredLookupError,
505 except (error.FilteredIndexError, error.FilteredLookupError,
505 error.FilteredRepoLookupError):
506 error.FilteredRepoLookupError):
506 if repo.filtername.startswith('visible'):
507 if repo.filtername.startswith('visible'):
507 msg = _("hidden revision '%s'") % changeid
508 msg = _("hidden revision '%s'") % changeid
508 hint = _('use --hidden to access hidden revisions')
509 hint = _('use --hidden to access hidden revisions')
509 raise error.FilteredRepoLookupError(msg, hint=hint)
510 raise error.FilteredRepoLookupError(msg, hint=hint)
510 msg = _("filtered revision '%s' (not in '%s' subset)")
511 msg = _("filtered revision '%s' (not in '%s' subset)")
511 msg %= (changeid, repo.filtername)
512 msg %= (changeid, repo.filtername)
512 raise error.FilteredRepoLookupError(msg)
513 raise error.FilteredRepoLookupError(msg)
513 except IndexError:
514 except IndexError:
514 pass
515 pass
515 raise error.RepoLookupError(
516 raise error.RepoLookupError(
516 _("unknown revision '%s'") % changeid)
517 _("unknown revision '%s'") % changeid)
517
518
518 def __hash__(self):
519 def __hash__(self):
519 try:
520 try:
520 return hash(self._rev)
521 return hash(self._rev)
521 except AttributeError:
522 except AttributeError:
522 return id(self)
523 return id(self)
523
524
524 def __nonzero__(self):
525 def __nonzero__(self):
525 return self._rev != nullrev
526 return self._rev != nullrev
526
527
527 @propertycache
528 @propertycache
528 def _changeset(self):
529 def _changeset(self):
529 return self._repo.changelog.changelogrevision(self.rev())
530 return self._repo.changelog.changelogrevision(self.rev())
530
531
531 @propertycache
532 @propertycache
532 def _manifest(self):
533 def _manifest(self):
533 return self._manifestctx.read()
534 return self._manifestctx.read()
534
535
535 @propertycache
536 @propertycache
536 def _manifestctx(self):
537 def _manifestctx(self):
537 return self._repo.manifestlog[self._changeset.manifest]
538 return self._repo.manifestlog[self._changeset.manifest]
538
539
539 @propertycache
540 @propertycache
540 def _manifestdelta(self):
541 def _manifestdelta(self):
541 return self._manifestctx.readdelta()
542 return self._manifestctx.readdelta()
542
543
543 @propertycache
544 @propertycache
544 def _parents(self):
545 def _parents(self):
545 repo = self._repo
546 repo = self._repo
546 p1, p2 = repo.changelog.parentrevs(self._rev)
547 p1, p2 = repo.changelog.parentrevs(self._rev)
547 if p2 == nullrev:
548 if p2 == nullrev:
548 return [changectx(repo, p1)]
549 return [changectx(repo, p1)]
549 return [changectx(repo, p1), changectx(repo, p2)]
550 return [changectx(repo, p1), changectx(repo, p2)]
550
551
551 def changeset(self):
552 def changeset(self):
552 c = self._changeset
553 c = self._changeset
553 return (
554 return (
554 c.manifest,
555 c.manifest,
555 c.user,
556 c.user,
556 c.date,
557 c.date,
557 c.files,
558 c.files,
558 c.description,
559 c.description,
559 c.extra,
560 c.extra,
560 )
561 )
561 def manifestnode(self):
562 def manifestnode(self):
562 return self._changeset.manifest
563 return self._changeset.manifest
563
564
564 def user(self):
565 def user(self):
565 return self._changeset.user
566 return self._changeset.user
566 def date(self):
567 def date(self):
567 return self._changeset.date
568 return self._changeset.date
568 def files(self):
569 def files(self):
569 return self._changeset.files
570 return self._changeset.files
570 def description(self):
571 def description(self):
571 return self._changeset.description
572 return self._changeset.description
572 def branch(self):
573 def branch(self):
573 return encoding.tolocal(self._changeset.extra.get("branch"))
574 return encoding.tolocal(self._changeset.extra.get("branch"))
574 def closesbranch(self):
575 def closesbranch(self):
575 return 'close' in self._changeset.extra
576 return 'close' in self._changeset.extra
576 def extra(self):
577 def extra(self):
577 return self._changeset.extra
578 return self._changeset.extra
578 def tags(self):
579 def tags(self):
579 return self._repo.nodetags(self._node)
580 return self._repo.nodetags(self._node)
580 def bookmarks(self):
581 def bookmarks(self):
581 return self._repo.nodebookmarks(self._node)
582 return self._repo.nodebookmarks(self._node)
582 def phase(self):
583 def phase(self):
583 return self._repo._phasecache.phase(self._repo, self._rev)
584 return self._repo._phasecache.phase(self._repo, self._rev)
584 def hidden(self):
585 def hidden(self):
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
586 return self._rev in repoview.filterrevs(self._repo, 'visible')
586
587
587 def children(self):
588 def children(self):
588 """return contexts for each child changeset"""
589 """return contexts for each child changeset"""
589 c = self._repo.changelog.children(self._node)
590 c = self._repo.changelog.children(self._node)
590 return [changectx(self._repo, x) for x in c]
591 return [changectx(self._repo, x) for x in c]
591
592
592 def ancestors(self):
593 def ancestors(self):
593 for a in self._repo.changelog.ancestors([self._rev]):
594 for a in self._repo.changelog.ancestors([self._rev]):
594 yield changectx(self._repo, a)
595 yield changectx(self._repo, a)
595
596
596 def descendants(self):
597 def descendants(self):
597 for d in self._repo.changelog.descendants([self._rev]):
598 for d in self._repo.changelog.descendants([self._rev]):
598 yield changectx(self._repo, d)
599 yield changectx(self._repo, d)
599
600
600 def filectx(self, path, fileid=None, filelog=None):
601 def filectx(self, path, fileid=None, filelog=None):
601 """get a file context from this changeset"""
602 """get a file context from this changeset"""
602 if fileid is None:
603 if fileid is None:
603 fileid = self.filenode(path)
604 fileid = self.filenode(path)
604 return filectx(self._repo, path, fileid=fileid,
605 return filectx(self._repo, path, fileid=fileid,
605 changectx=self, filelog=filelog)
606 changectx=self, filelog=filelog)
606
607
607 def ancestor(self, c2, warn=False):
608 def ancestor(self, c2, warn=False):
608 """return the "best" ancestor context of self and c2
609 """return the "best" ancestor context of self and c2
609
610
610 If there are multiple candidates, it will show a message and check
611 If there are multiple candidates, it will show a message and check
611 merge.preferancestor configuration before falling back to the
612 merge.preferancestor configuration before falling back to the
612 revlog ancestor."""
613 revlog ancestor."""
613 # deal with workingctxs
614 # deal with workingctxs
614 n2 = c2._node
615 n2 = c2._node
615 if n2 is None:
616 if n2 is None:
616 n2 = c2._parents[0]._node
617 n2 = c2._parents[0]._node
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 if not cahs:
619 if not cahs:
619 anc = nullid
620 anc = nullid
620 elif len(cahs) == 1:
621 elif len(cahs) == 1:
621 anc = cahs[0]
622 anc = cahs[0]
622 else:
623 else:
623 # experimental config: merge.preferancestor
624 # experimental config: merge.preferancestor
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 try:
626 try:
626 ctx = changectx(self._repo, r)
627 ctx = changectx(self._repo, r)
627 except error.RepoLookupError:
628 except error.RepoLookupError:
628 continue
629 continue
629 anc = ctx.node()
630 anc = ctx.node()
630 if anc in cahs:
631 if anc in cahs:
631 break
632 break
632 else:
633 else:
633 anc = self._repo.changelog.ancestor(self._node, n2)
634 anc = self._repo.changelog.ancestor(self._node, n2)
634 if warn:
635 if warn:
635 self._repo.ui.status(
636 self._repo.ui.status(
636 (_("note: using %s as ancestor of %s and %s\n") %
637 (_("note: using %s as ancestor of %s and %s\n") %
637 (short(anc), short(self._node), short(n2))) +
638 (short(anc), short(self._node), short(n2))) +
638 ''.join(_(" alternatively, use --config "
639 ''.join(_(" alternatively, use --config "
639 "merge.preferancestor=%s\n") %
640 "merge.preferancestor=%s\n") %
640 short(n) for n in sorted(cahs) if n != anc))
641 short(n) for n in sorted(cahs) if n != anc))
641 return changectx(self._repo, anc)
642 return changectx(self._repo, anc)
642
643
643 def descendant(self, other):
644 def descendant(self, other):
644 """True if other is descendant of this changeset"""
645 """True if other is descendant of this changeset"""
645 return self._repo.changelog.descendant(self._rev, other._rev)
646 return self._repo.changelog.descendant(self._rev, other._rev)
646
647
647 def walk(self, match):
648 def walk(self, match):
648 '''Generates matching file names.'''
649 '''Generates matching file names.'''
649
650
650 # Wrap match.bad method to have message with nodeid
651 # Wrap match.bad method to have message with nodeid
651 def bad(fn, msg):
652 def bad(fn, msg):
652 # The manifest doesn't know about subrepos, so don't complain about
653 # The manifest doesn't know about subrepos, so don't complain about
653 # paths into valid subrepos.
654 # paths into valid subrepos.
654 if any(fn == s or fn.startswith(s + '/')
655 if any(fn == s or fn.startswith(s + '/')
655 for s in self.substate):
656 for s in self.substate):
656 return
657 return
657 match.bad(fn, _('no such file in rev %s') % self)
658 match.bad(fn, _('no such file in rev %s') % self)
658
659
659 m = matchmod.badmatch(match, bad)
660 m = matchmod.badmatch(match, bad)
660 return self._manifest.walk(m)
661 return self._manifest.walk(m)
661
662
662 def matches(self, match):
663 def matches(self, match):
663 return self.walk(match)
664 return self.walk(match)
664
665
665 class basefilectx(object):
666 class basefilectx(object):
666 """A filecontext object represents the common logic for its children:
667 """A filecontext object represents the common logic for its children:
667 filectx: read-only access to a filerevision that is already present
668 filectx: read-only access to a filerevision that is already present
668 in the repo,
669 in the repo,
669 workingfilectx: a filecontext that represents files from the working
670 workingfilectx: a filecontext that represents files from the working
670 directory,
671 directory,
671 memfilectx: a filecontext that represents files in-memory."""
672 memfilectx: a filecontext that represents files in-memory."""
672 def __new__(cls, repo, path, *args, **kwargs):
673 def __new__(cls, repo, path, *args, **kwargs):
673 return super(basefilectx, cls).__new__(cls)
674 return super(basefilectx, cls).__new__(cls)
674
675
675 @propertycache
676 @propertycache
676 def _filelog(self):
677 def _filelog(self):
677 return self._repo.file(self._path)
678 return self._repo.file(self._path)
678
679
679 @propertycache
680 @propertycache
680 def _changeid(self):
681 def _changeid(self):
681 if '_changeid' in self.__dict__:
682 if '_changeid' in self.__dict__:
682 return self._changeid
683 return self._changeid
683 elif '_changectx' in self.__dict__:
684 elif '_changectx' in self.__dict__:
684 return self._changectx.rev()
685 return self._changectx.rev()
685 elif '_descendantrev' in self.__dict__:
686 elif '_descendantrev' in self.__dict__:
686 # this file context was created from a revision with a known
687 # this file context was created from a revision with a known
687 # descendant, we can (lazily) correct for linkrev aliases
688 # descendant, we can (lazily) correct for linkrev aliases
688 return self._adjustlinkrev(self._descendantrev)
689 return self._adjustlinkrev(self._descendantrev)
689 else:
690 else:
690 return self._filelog.linkrev(self._filerev)
691 return self._filelog.linkrev(self._filerev)
691
692
692 @propertycache
693 @propertycache
693 def _filenode(self):
694 def _filenode(self):
694 if '_fileid' in self.__dict__:
695 if '_fileid' in self.__dict__:
695 return self._filelog.lookup(self._fileid)
696 return self._filelog.lookup(self._fileid)
696 else:
697 else:
697 return self._changectx.filenode(self._path)
698 return self._changectx.filenode(self._path)
698
699
699 @propertycache
700 @propertycache
700 def _filerev(self):
701 def _filerev(self):
701 return self._filelog.rev(self._filenode)
702 return self._filelog.rev(self._filenode)
702
703
703 @propertycache
704 @propertycache
704 def _repopath(self):
705 def _repopath(self):
705 return self._path
706 return self._path
706
707
707 def __nonzero__(self):
708 def __nonzero__(self):
708 try:
709 try:
709 self._filenode
710 self._filenode
710 return True
711 return True
711 except error.LookupError:
712 except error.LookupError:
712 # file is missing
713 # file is missing
713 return False
714 return False
714
715
715 def __str__(self):
716 def __str__(self):
716 try:
717 try:
717 return "%s@%s" % (self.path(), self._changectx)
718 return "%s@%s" % (self.path(), self._changectx)
718 except error.LookupError:
719 except error.LookupError:
719 return "%s@???" % self.path()
720 return "%s@???" % self.path()
720
721
721 def __repr__(self):
722 def __repr__(self):
722 return "<%s %s>" % (type(self).__name__, str(self))
723 return "<%s %s>" % (type(self).__name__, str(self))
723
724
724 def __hash__(self):
725 def __hash__(self):
725 try:
726 try:
726 return hash((self._path, self._filenode))
727 return hash((self._path, self._filenode))
727 except AttributeError:
728 except AttributeError:
728 return id(self)
729 return id(self)
729
730
730 def __eq__(self, other):
731 def __eq__(self, other):
731 try:
732 try:
732 return (type(self) == type(other) and self._path == other._path
733 return (type(self) == type(other) and self._path == other._path
733 and self._filenode == other._filenode)
734 and self._filenode == other._filenode)
734 except AttributeError:
735 except AttributeError:
735 return False
736 return False
736
737
737 def __ne__(self, other):
738 def __ne__(self, other):
738 return not (self == other)
739 return not (self == other)
739
740
740 def filerev(self):
741 def filerev(self):
741 return self._filerev
742 return self._filerev
742 def filenode(self):
743 def filenode(self):
743 return self._filenode
744 return self._filenode
744 def flags(self):
745 def flags(self):
745 return self._changectx.flags(self._path)
746 return self._changectx.flags(self._path)
746 def filelog(self):
747 def filelog(self):
747 return self._filelog
748 return self._filelog
748 def rev(self):
749 def rev(self):
749 return self._changeid
750 return self._changeid
750 def linkrev(self):
751 def linkrev(self):
751 return self._filelog.linkrev(self._filerev)
752 return self._filelog.linkrev(self._filerev)
752 def node(self):
753 def node(self):
753 return self._changectx.node()
754 return self._changectx.node()
754 def hex(self):
755 def hex(self):
755 return self._changectx.hex()
756 return self._changectx.hex()
756 def user(self):
757 def user(self):
757 return self._changectx.user()
758 return self._changectx.user()
758 def date(self):
759 def date(self):
759 return self._changectx.date()
760 return self._changectx.date()
760 def files(self):
761 def files(self):
761 return self._changectx.files()
762 return self._changectx.files()
762 def description(self):
763 def description(self):
763 return self._changectx.description()
764 return self._changectx.description()
764 def branch(self):
765 def branch(self):
765 return self._changectx.branch()
766 return self._changectx.branch()
766 def extra(self):
767 def extra(self):
767 return self._changectx.extra()
768 return self._changectx.extra()
768 def phase(self):
769 def phase(self):
769 return self._changectx.phase()
770 return self._changectx.phase()
770 def phasestr(self):
771 def phasestr(self):
771 return self._changectx.phasestr()
772 return self._changectx.phasestr()
772 def manifest(self):
773 def manifest(self):
773 return self._changectx.manifest()
774 return self._changectx.manifest()
774 def changectx(self):
775 def changectx(self):
775 return self._changectx
776 return self._changectx
776 def repo(self):
777 def repo(self):
777 return self._repo
778 return self._repo
778
779
779 def path(self):
780 def path(self):
780 return self._path
781 return self._path
781
782
782 def isbinary(self):
783 def isbinary(self):
783 try:
784 try:
784 return util.binary(self.data())
785 return util.binary(self.data())
785 except IOError:
786 except IOError:
786 return False
787 return False
787 def isexec(self):
788 def isexec(self):
788 return 'x' in self.flags()
789 return 'x' in self.flags()
789 def islink(self):
790 def islink(self):
790 return 'l' in self.flags()
791 return 'l' in self.flags()
791
792
792 def isabsent(self):
793 def isabsent(self):
793 """whether this filectx represents a file not in self._changectx
794 """whether this filectx represents a file not in self._changectx
794
795
795 This is mainly for merge code to detect change/delete conflicts. This is
796 This is mainly for merge code to detect change/delete conflicts. This is
796 expected to be True for all subclasses of basectx."""
797 expected to be True for all subclasses of basectx."""
797 return False
798 return False
798
799
799 _customcmp = False
800 _customcmp = False
800 def cmp(self, fctx):
801 def cmp(self, fctx):
801 """compare with other file context
802 """compare with other file context
802
803
803 returns True if different than fctx.
804 returns True if different than fctx.
804 """
805 """
805 if fctx._customcmp:
806 if fctx._customcmp:
806 return fctx.cmp(self)
807 return fctx.cmp(self)
807
808
808 if (fctx._filenode is None
809 if (fctx._filenode is None
809 and (self._repo._encodefilterpats
810 and (self._repo._encodefilterpats
810 # if file data starts with '\1\n', empty metadata block is
811 # if file data starts with '\1\n', empty metadata block is
811 # prepended, which adds 4 bytes to filelog.size().
812 # prepended, which adds 4 bytes to filelog.size().
812 or self.size() - 4 == fctx.size())
813 or self.size() - 4 == fctx.size())
813 or self.size() == fctx.size()):
814 or self.size() == fctx.size()):
814 return self._filelog.cmp(self._filenode, fctx.data())
815 return self._filelog.cmp(self._filenode, fctx.data())
815
816
816 return True
817 return True
817
818
818 def _adjustlinkrev(self, srcrev, inclusive=False):
819 def _adjustlinkrev(self, srcrev, inclusive=False):
819 """return the first ancestor of <srcrev> introducing <fnode>
820 """return the first ancestor of <srcrev> introducing <fnode>
820
821
821 If the linkrev of the file revision does not point to an ancestor of
822 If the linkrev of the file revision does not point to an ancestor of
822 srcrev, we'll walk down the ancestors until we find one introducing
823 srcrev, we'll walk down the ancestors until we find one introducing
823 this file revision.
824 this file revision.
824
825
825 :srcrev: the changeset revision we search ancestors from
826 :srcrev: the changeset revision we search ancestors from
826 :inclusive: if true, the src revision will also be checked
827 :inclusive: if true, the src revision will also be checked
827 """
828 """
828 repo = self._repo
829 repo = self._repo
829 cl = repo.unfiltered().changelog
830 cl = repo.unfiltered().changelog
830 mfl = repo.manifestlog
831 mfl = repo.manifestlog
831 # fetch the linkrev
832 # fetch the linkrev
832 lkr = self.linkrev()
833 lkr = self.linkrev()
833 # hack to reuse ancestor computation when searching for renames
834 # hack to reuse ancestor computation when searching for renames
834 memberanc = getattr(self, '_ancestrycontext', None)
835 memberanc = getattr(self, '_ancestrycontext', None)
835 iteranc = None
836 iteranc = None
836 if srcrev is None:
837 if srcrev is None:
837 # wctx case, used by workingfilectx during mergecopy
838 # wctx case, used by workingfilectx during mergecopy
838 revs = [p.rev() for p in self._repo[None].parents()]
839 revs = [p.rev() for p in self._repo[None].parents()]
839 inclusive = True # we skipped the real (revless) source
840 inclusive = True # we skipped the real (revless) source
840 else:
841 else:
841 revs = [srcrev]
842 revs = [srcrev]
842 if memberanc is None:
843 if memberanc is None:
843 memberanc = iteranc = cl.ancestors(revs, lkr,
844 memberanc = iteranc = cl.ancestors(revs, lkr,
844 inclusive=inclusive)
845 inclusive=inclusive)
845 # check if this linkrev is an ancestor of srcrev
846 # check if this linkrev is an ancestor of srcrev
846 if lkr not in memberanc:
847 if lkr not in memberanc:
847 if iteranc is None:
848 if iteranc is None:
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 fnode = self._filenode
850 fnode = self._filenode
850 path = self._path
851 path = self._path
851 for a in iteranc:
852 for a in iteranc:
852 ac = cl.read(a) # get changeset data (we avoid object creation)
853 ac = cl.read(a) # get changeset data (we avoid object creation)
853 if path in ac[3]: # checking the 'files' field.
854 if path in ac[3]: # checking the 'files' field.
854 # The file has been touched, check if the content is
855 # The file has been touched, check if the content is
855 # similar to the one we search for.
856 # similar to the one we search for.
856 if fnode == mfl[ac[0]].readfast().get(path):
857 if fnode == mfl[ac[0]].readfast().get(path):
857 return a
858 return a
858 # In theory, we should never get out of that loop without a result.
859 # In theory, we should never get out of that loop without a result.
859 # But if manifest uses a buggy file revision (not children of the
860 # But if manifest uses a buggy file revision (not children of the
860 # one it replaces) we could. Such a buggy situation will likely
861 # one it replaces) we could. Such a buggy situation will likely
861 # result is crash somewhere else at to some point.
862 # result is crash somewhere else at to some point.
862 return lkr
863 return lkr
863
864
864 def introrev(self):
865 def introrev(self):
865 """return the rev of the changeset which introduced this file revision
866 """return the rev of the changeset which introduced this file revision
866
867
867 This method is different from linkrev because it take into account the
868 This method is different from linkrev because it take into account the
868 changeset the filectx was created from. It ensures the returned
869 changeset the filectx was created from. It ensures the returned
869 revision is one of its ancestors. This prevents bugs from
870 revision is one of its ancestors. This prevents bugs from
870 'linkrev-shadowing' when a file revision is used by multiple
871 'linkrev-shadowing' when a file revision is used by multiple
871 changesets.
872 changesets.
872 """
873 """
873 lkr = self.linkrev()
874 lkr = self.linkrev()
874 attrs = vars(self)
875 attrs = vars(self)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 if noctx or self.rev() == lkr:
877 if noctx or self.rev() == lkr:
877 return self.linkrev()
878 return self.linkrev()
878 return self._adjustlinkrev(self.rev(), inclusive=True)
879 return self._adjustlinkrev(self.rev(), inclusive=True)
879
880
880 def _parentfilectx(self, path, fileid, filelog):
881 def _parentfilectx(self, path, fileid, filelog):
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 # If self is associated with a changeset (probably explicitly
885 # If self is associated with a changeset (probably explicitly
885 # fed), ensure the created filectx is associated with a
886 # fed), ensure the created filectx is associated with a
886 # changeset that is an ancestor of self.changectx.
887 # changeset that is an ancestor of self.changectx.
887 # This lets us later use _adjustlinkrev to get a correct link.
888 # This lets us later use _adjustlinkrev to get a correct link.
888 fctx._descendantrev = self.rev()
889 fctx._descendantrev = self.rev()
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 elif '_descendantrev' in vars(self):
891 elif '_descendantrev' in vars(self):
891 # Otherwise propagate _descendantrev if we have one associated.
892 # Otherwise propagate _descendantrev if we have one associated.
892 fctx._descendantrev = self._descendantrev
893 fctx._descendantrev = self._descendantrev
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 return fctx
895 return fctx
895
896
896 def parents(self):
897 def parents(self):
897 _path = self._path
898 _path = self._path
898 fl = self._filelog
899 fl = self._filelog
899 parents = self._filelog.parents(self._filenode)
900 parents = self._filelog.parents(self._filenode)
900 pl = [(_path, node, fl) for node in parents if node != nullid]
901 pl = [(_path, node, fl) for node in parents if node != nullid]
901
902
902 r = fl.renamed(self._filenode)
903 r = fl.renamed(self._filenode)
903 if r:
904 if r:
904 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In case of merge, only one of the parent is null id and should
906 # - In case of merge, only one of the parent is null id and should
906 # be replaced with the rename information. This parent is -always-
907 # be replaced with the rename information. This parent is -always-
907 # the first one.
908 # the first one.
908 #
909 #
909 # As null id have always been filtered out in the previous list
910 # As null id have always been filtered out in the previous list
910 # comprehension, inserting to 0 will always result in "replacing
911 # comprehension, inserting to 0 will always result in "replacing
911 # first nullid parent with rename information.
912 # first nullid parent with rename information.
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913
914
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915
916
916 def p1(self):
917 def p1(self):
917 return self.parents()[0]
918 return self.parents()[0]
918
919
919 def p2(self):
920 def p2(self):
920 p = self.parents()
921 p = self.parents()
921 if len(p) == 2:
922 if len(p) == 2:
922 return p[1]
923 return p[1]
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924
925
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 '''returns a list of tuples of ((ctx, number), line) for each line
927 '''returns a list of tuples of ((ctx, number), line) for each line
927 in the file, where ctx is the filectx of the node where
928 in the file, where ctx is the filectx of the node where
928 that line was last changed; if linenumber parameter is true, number is
929 that line was last changed; if linenumber parameter is true, number is
929 the line number at the first appearance in the managed file, otherwise,
930 the line number at the first appearance in the managed file, otherwise,
930 number has a fixed value of False.
931 number has a fixed value of False.
931 '''
932 '''
932
933
933 def lines(text):
934 def lines(text):
934 if text.endswith("\n"):
935 if text.endswith("\n"):
935 return text.count("\n")
936 return text.count("\n")
936 return text.count("\n") + int(bool(text))
937 return text.count("\n") + int(bool(text))
937
938
938 if linenumber:
939 if linenumber:
939 def decorate(text, rev):
940 def decorate(text, rev):
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 else:
942 else:
942 def decorate(text, rev):
943 def decorate(text, rev):
943 return ([(rev, False)] * lines(text), text)
944 return ([(rev, False)] * lines(text), text)
944
945
945 def pair(parent, child):
946 def pair(parent, child):
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 for (a1, a2, b1, b2), t in blocks:
948 for (a1, a2, b1, b2), t in blocks:
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 # belong to the child.
950 # belong to the child.
950 if t == '=':
951 if t == '=':
951 child[0][b1:b2] = parent[0][a1:a2]
952 child[0][b1:b2] = parent[0][a1:a2]
952 return child
953 return child
953
954
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955
956
956 def parents(f):
957 def parents(f):
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 # isn't an ancestor of the srcrev.
961 # isn't an ancestor of the srcrev.
961 f._changeid
962 f._changeid
962 pl = f.parents()
963 pl = f.parents()
963
964
964 # Don't return renamed parents if we aren't following.
965 # Don't return renamed parents if we aren't following.
965 if not follow:
966 if not follow:
966 pl = [p for p in pl if p.path() == f.path()]
967 pl = [p for p in pl if p.path() == f.path()]
967
968
968 # renamed filectx won't have a filelog yet, so set it
969 # renamed filectx won't have a filelog yet, so set it
969 # from the cache to save time
970 # from the cache to save time
970 for p in pl:
971 for p in pl:
971 if not '_filelog' in p.__dict__:
972 if not '_filelog' in p.__dict__:
972 p._filelog = getlog(p.path())
973 p._filelog = getlog(p.path())
973
974
974 return pl
975 return pl
975
976
976 # use linkrev to find the first changeset where self appeared
977 # use linkrev to find the first changeset where self appeared
977 base = self
978 base = self
978 introrev = self.introrev()
979 introrev = self.introrev()
979 if self.rev() != introrev:
980 if self.rev() != introrev:
980 base = self.filectx(self.filenode(), changeid=introrev)
981 base = self.filectx(self.filenode(), changeid=introrev)
981 if getattr(base, '_ancestrycontext', None) is None:
982 if getattr(base, '_ancestrycontext', None) is None:
982 cl = self._repo.changelog
983 cl = self._repo.changelog
983 if introrev is None:
984 if introrev is None:
984 # wctx is not inclusive, but works because _ancestrycontext
985 # wctx is not inclusive, but works because _ancestrycontext
985 # is used to test filelog revisions
986 # is used to test filelog revisions
986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 ac = cl.ancestors([p.rev() for p in base.parents()],
987 inclusive=True)
988 inclusive=True)
988 else:
989 else:
989 ac = cl.ancestors([introrev], inclusive=True)
990 ac = cl.ancestors([introrev], inclusive=True)
990 base._ancestrycontext = ac
991 base._ancestrycontext = ac
991
992
992 # This algorithm would prefer to be recursive, but Python is a
993 # This algorithm would prefer to be recursive, but Python is a
993 # bit recursion-hostile. Instead we do an iterative
994 # bit recursion-hostile. Instead we do an iterative
994 # depth-first search.
995 # depth-first search.
995
996
996 # 1st DFS pre-calculates pcache and needed
997 # 1st DFS pre-calculates pcache and needed
997 visit = [base]
998 visit = [base]
998 pcache = {}
999 pcache = {}
999 needed = {base: 1}
1000 needed = {base: 1}
1000 while visit:
1001 while visit:
1001 f = visit.pop()
1002 f = visit.pop()
1002 if f in pcache:
1003 if f in pcache:
1003 continue
1004 continue
1004 pl = parents(f)
1005 pl = parents(f)
1005 pcache[f] = pl
1006 pcache[f] = pl
1006 for p in pl:
1007 for p in pl:
1007 needed[p] = needed.get(p, 0) + 1
1008 needed[p] = needed.get(p, 0) + 1
1008 if p not in pcache:
1009 if p not in pcache:
1009 visit.append(p)
1010 visit.append(p)
1010
1011
1011 # 2nd DFS does the actual annotate
1012 # 2nd DFS does the actual annotate
1012 visit[:] = [base]
1013 visit[:] = [base]
1013 hist = {}
1014 hist = {}
1014 while visit:
1015 while visit:
1015 f = visit[-1]
1016 f = visit[-1]
1016 if f in hist:
1017 if f in hist:
1017 visit.pop()
1018 visit.pop()
1018 continue
1019 continue
1019
1020
1020 ready = True
1021 ready = True
1021 pl = pcache[f]
1022 pl = pcache[f]
1022 for p in pl:
1023 for p in pl:
1023 if p not in hist:
1024 if p not in hist:
1024 ready = False
1025 ready = False
1025 visit.append(p)
1026 visit.append(p)
1026 if ready:
1027 if ready:
1027 visit.pop()
1028 visit.pop()
1028 curr = decorate(f.data(), f)
1029 curr = decorate(f.data(), f)
1029 for p in pl:
1030 for p in pl:
1030 curr = pair(hist[p], curr)
1031 curr = pair(hist[p], curr)
1031 if needed[p] == 1:
1032 if needed[p] == 1:
1032 del hist[p]
1033 del hist[p]
1033 del needed[p]
1034 del needed[p]
1034 else:
1035 else:
1035 needed[p] -= 1
1036 needed[p] -= 1
1036
1037
1037 hist[f] = curr
1038 hist[f] = curr
1038 del pcache[f]
1039 del pcache[f]
1039
1040
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1041
1042
1042 def ancestors(self, followfirst=False):
1043 def ancestors(self, followfirst=False):
1043 visit = {}
1044 visit = {}
1044 c = self
1045 c = self
1045 if followfirst:
1046 if followfirst:
1046 cut = 1
1047 cut = 1
1047 else:
1048 else:
1048 cut = None
1049 cut = None
1049
1050
1050 while True:
1051 while True:
1051 for parent in c.parents()[:cut]:
1052 for parent in c.parents()[:cut]:
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1053 if not visit:
1054 if not visit:
1054 break
1055 break
1055 c = visit.pop(max(visit))
1056 c = visit.pop(max(visit))
1056 yield c
1057 yield c
1057
1058
1058 class filectx(basefilectx):
1059 class filectx(basefilectx):
1059 """A filecontext object makes access to data related to a particular
1060 """A filecontext object makes access to data related to a particular
1060 filerevision convenient."""
1061 filerevision convenient."""
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1062 def __init__(self, repo, path, changeid=None, fileid=None,
1062 filelog=None, changectx=None):
1063 filelog=None, changectx=None):
1063 """changeid can be a changeset revision, node, or tag.
1064 """changeid can be a changeset revision, node, or tag.
1064 fileid can be a file revision or node."""
1065 fileid can be a file revision or node."""
1065 self._repo = repo
1066 self._repo = repo
1066 self._path = path
1067 self._path = path
1067
1068
1068 assert (changeid is not None
1069 assert (changeid is not None
1069 or fileid is not None
1070 or fileid is not None
1070 or changectx is not None), \
1071 or changectx is not None), \
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 % (changeid, fileid, changectx))
1073 % (changeid, fileid, changectx))
1073
1074
1074 if filelog is not None:
1075 if filelog is not None:
1075 self._filelog = filelog
1076 self._filelog = filelog
1076
1077
1077 if changeid is not None:
1078 if changeid is not None:
1078 self._changeid = changeid
1079 self._changeid = changeid
1079 if changectx is not None:
1080 if changectx is not None:
1080 self._changectx = changectx
1081 self._changectx = changectx
1081 if fileid is not None:
1082 if fileid is not None:
1082 self._fileid = fileid
1083 self._fileid = fileid
1083
1084
1084 @propertycache
1085 @propertycache
1085 def _changectx(self):
1086 def _changectx(self):
1086 try:
1087 try:
1087 return changectx(self._repo, self._changeid)
1088 return changectx(self._repo, self._changeid)
1088 except error.FilteredRepoLookupError:
1089 except error.FilteredRepoLookupError:
1089 # Linkrev may point to any revision in the repository. When the
1090 # Linkrev may point to any revision in the repository. When the
1090 # repository is filtered this may lead to `filectx` trying to build
1091 # repository is filtered this may lead to `filectx` trying to build
1091 # `changectx` for filtered revision. In such case we fallback to
1092 # `changectx` for filtered revision. In such case we fallback to
1092 # creating `changectx` on the unfiltered version of the reposition.
1093 # creating `changectx` on the unfiltered version of the reposition.
1093 # This fallback should not be an issue because `changectx` from
1094 # This fallback should not be an issue because `changectx` from
1094 # `filectx` are not used in complex operations that care about
1095 # `filectx` are not used in complex operations that care about
1095 # filtering.
1096 # filtering.
1096 #
1097 #
1097 # This fallback is a cheap and dirty fix that prevent several
1098 # This fallback is a cheap and dirty fix that prevent several
1098 # crashes. It does not ensure the behavior is correct. However the
1099 # crashes. It does not ensure the behavior is correct. However the
1099 # behavior was not correct before filtering either and "incorrect
1100 # behavior was not correct before filtering either and "incorrect
1100 # behavior" is seen as better as "crash"
1101 # behavior" is seen as better as "crash"
1101 #
1102 #
1102 # Linkrevs have several serious troubles with filtering that are
1103 # Linkrevs have several serious troubles with filtering that are
1103 # complicated to solve. Proper handling of the issue here should be
1104 # complicated to solve. Proper handling of the issue here should be
1104 # considered when solving linkrev issue are on the table.
1105 # considered when solving linkrev issue are on the table.
1105 return changectx(self._repo.unfiltered(), self._changeid)
1106 return changectx(self._repo.unfiltered(), self._changeid)
1106
1107
1107 def filectx(self, fileid, changeid=None):
1108 def filectx(self, fileid, changeid=None):
1108 '''opens an arbitrary revision of the file without
1109 '''opens an arbitrary revision of the file without
1109 opening a new filelog'''
1110 opening a new filelog'''
1110 return filectx(self._repo, self._path, fileid=fileid,
1111 return filectx(self._repo, self._path, fileid=fileid,
1111 filelog=self._filelog, changeid=changeid)
1112 filelog=self._filelog, changeid=changeid)
1112
1113
1113 def rawdata(self):
1114 def rawdata(self):
1114 return self._filelog.revision(self._filenode, raw=True)
1115 return self._filelog.revision(self._filenode, raw=True)
1115
1116
1116 def data(self):
1117 def data(self):
1117 try:
1118 try:
1118 return self._filelog.read(self._filenode)
1119 return self._filelog.read(self._filenode)
1119 except error.CensoredNodeError:
1120 except error.CensoredNodeError:
1120 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1121 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1121 return ""
1122 return ""
1122 raise error.Abort(_("censored node: %s") % short(self._filenode),
1123 raise error.Abort(_("censored node: %s") % short(self._filenode),
1123 hint=_("set censor.policy to ignore errors"))
1124 hint=_("set censor.policy to ignore errors"))
1124
1125
1125 def size(self):
1126 def size(self):
1126 return self._filelog.size(self._filerev)
1127 return self._filelog.size(self._filerev)
1127
1128
1128 def renamed(self):
1129 def renamed(self):
1129 """check if file was actually renamed in this changeset revision
1130 """check if file was actually renamed in this changeset revision
1130
1131
1131 If rename logged in file revision, we report copy for changeset only
1132 If rename logged in file revision, we report copy for changeset only
1132 if file revisions linkrev points back to the changeset in question
1133 if file revisions linkrev points back to the changeset in question
1133 or both changeset parents contain different file revisions.
1134 or both changeset parents contain different file revisions.
1134 """
1135 """
1135
1136
1136 renamed = self._filelog.renamed(self._filenode)
1137 renamed = self._filelog.renamed(self._filenode)
1137 if not renamed:
1138 if not renamed:
1138 return renamed
1139 return renamed
1139
1140
1140 if self.rev() == self.linkrev():
1141 if self.rev() == self.linkrev():
1141 return renamed
1142 return renamed
1142
1143
1143 name = self.path()
1144 name = self.path()
1144 fnode = self._filenode
1145 fnode = self._filenode
1145 for p in self._changectx.parents():
1146 for p in self._changectx.parents():
1146 try:
1147 try:
1147 if fnode == p.filenode(name):
1148 if fnode == p.filenode(name):
1148 return None
1149 return None
1149 except error.LookupError:
1150 except error.LookupError:
1150 pass
1151 pass
1151 return renamed
1152 return renamed
1152
1153
1153 def children(self):
1154 def children(self):
1154 # hard for renames
1155 # hard for renames
1155 c = self._filelog.children(self._filenode)
1156 c = self._filelog.children(self._filenode)
1156 return [filectx(self._repo, self._path, fileid=x,
1157 return [filectx(self._repo, self._path, fileid=x,
1157 filelog=self._filelog) for x in c]
1158 filelog=self._filelog) for x in c]
1158
1159
1159 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1160 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1160 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1161 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1161 if diff from fctx2 to fctx1 has changes in linerange2 and
1162 if diff from fctx2 to fctx1 has changes in linerange2 and
1162 `linerange1` is the new line range for fctx1.
1163 `linerange1` is the new line range for fctx1.
1163 """
1164 """
1164 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1165 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1165 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1166 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1166 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1167 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1167 return diffinrange, linerange1
1168 return diffinrange, linerange1
1168
1169
1169 def blockancestors(fctx, fromline, toline, followfirst=False):
1170 def blockancestors(fctx, fromline, toline, followfirst=False):
1170 """Yield ancestors of `fctx` with respect to the block of lines within
1171 """Yield ancestors of `fctx` with respect to the block of lines within
1171 `fromline`-`toline` range.
1172 `fromline`-`toline` range.
1172 """
1173 """
1173 diffopts = patch.diffopts(fctx._repo.ui)
1174 diffopts = patch.diffopts(fctx._repo.ui)
1174 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1175 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1175 while visit:
1176 while visit:
1176 c, linerange2 = visit.pop(max(visit))
1177 c, linerange2 = visit.pop(max(visit))
1177 pl = c.parents()
1178 pl = c.parents()
1178 if followfirst:
1179 if followfirst:
1179 pl = pl[:1]
1180 pl = pl[:1]
1180 if not pl:
1181 if not pl:
1181 # The block originates from the initial revision.
1182 # The block originates from the initial revision.
1182 yield c, linerange2
1183 yield c, linerange2
1183 continue
1184 continue
1184 inrange = False
1185 inrange = False
1185 for p in pl:
1186 for p in pl:
1186 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1187 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1187 inrange = inrange or inrangep
1188 inrange = inrange or inrangep
1188 if linerange1[0] == linerange1[1]:
1189 if linerange1[0] == linerange1[1]:
1189 # Parent's linerange is empty, meaning that the block got
1190 # Parent's linerange is empty, meaning that the block got
1190 # introduced in this revision; no need to go futher in this
1191 # introduced in this revision; no need to go futher in this
1191 # branch.
1192 # branch.
1192 continue
1193 continue
1193 visit[p.linkrev(), p.filenode()] = p, linerange1
1194 visit[p.linkrev(), p.filenode()] = p, linerange1
1194 if inrange:
1195 if inrange:
1195 yield c, linerange2
1196 yield c, linerange2
1196
1197
1197 class committablectx(basectx):
1198 class committablectx(basectx):
1198 """A committablectx object provides common functionality for a context that
1199 """A committablectx object provides common functionality for a context that
1199 wants the ability to commit, e.g. workingctx or memctx."""
1200 wants the ability to commit, e.g. workingctx or memctx."""
1200 def __init__(self, repo, text="", user=None, date=None, extra=None,
1201 def __init__(self, repo, text="", user=None, date=None, extra=None,
1201 changes=None):
1202 changes=None):
1202 self._repo = repo
1203 self._repo = repo
1203 self._rev = None
1204 self._rev = None
1204 self._node = None
1205 self._node = None
1205 self._text = text
1206 self._text = text
1206 if date:
1207 if date:
1207 self._date = util.parsedate(date)
1208 self._date = util.parsedate(date)
1208 if user:
1209 if user:
1209 self._user = user
1210 self._user = user
1210 if changes:
1211 if changes:
1211 self._status = changes
1212 self._status = changes
1212
1213
1213 self._extra = {}
1214 self._extra = {}
1214 if extra:
1215 if extra:
1215 self._extra = extra.copy()
1216 self._extra = extra.copy()
1216 if 'branch' not in self._extra:
1217 if 'branch' not in self._extra:
1217 try:
1218 try:
1218 branch = encoding.fromlocal(self._repo.dirstate.branch())
1219 branch = encoding.fromlocal(self._repo.dirstate.branch())
1219 except UnicodeDecodeError:
1220 except UnicodeDecodeError:
1220 raise error.Abort(_('branch name not in UTF-8!'))
1221 raise error.Abort(_('branch name not in UTF-8!'))
1221 self._extra['branch'] = branch
1222 self._extra['branch'] = branch
1222 if self._extra['branch'] == '':
1223 if self._extra['branch'] == '':
1223 self._extra['branch'] = 'default'
1224 self._extra['branch'] = 'default'
1224
1225
1225 def __str__(self):
1226 def __str__(self):
1226 return str(self._parents[0]) + "+"
1227 return str(self._parents[0]) + "+"
1227
1228
1228 def __nonzero__(self):
1229 def __nonzero__(self):
1229 return True
1230 return True
1230
1231
1231 def _buildflagfunc(self):
1232 def _buildflagfunc(self):
1232 # Create a fallback function for getting file flags when the
1233 # Create a fallback function for getting file flags when the
1233 # filesystem doesn't support them
1234 # filesystem doesn't support them
1234
1235
1235 copiesget = self._repo.dirstate.copies().get
1236 copiesget = self._repo.dirstate.copies().get
1236 parents = self.parents()
1237 parents = self.parents()
1237 if len(parents) < 2:
1238 if len(parents) < 2:
1238 # when we have one parent, it's easy: copy from parent
1239 # when we have one parent, it's easy: copy from parent
1239 man = parents[0].manifest()
1240 man = parents[0].manifest()
1240 def func(f):
1241 def func(f):
1241 f = copiesget(f, f)
1242 f = copiesget(f, f)
1242 return man.flags(f)
1243 return man.flags(f)
1243 else:
1244 else:
1244 # merges are tricky: we try to reconstruct the unstored
1245 # merges are tricky: we try to reconstruct the unstored
1245 # result from the merge (issue1802)
1246 # result from the merge (issue1802)
1246 p1, p2 = parents
1247 p1, p2 = parents
1247 pa = p1.ancestor(p2)
1248 pa = p1.ancestor(p2)
1248 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1249 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1249
1250
1250 def func(f):
1251 def func(f):
1251 f = copiesget(f, f) # may be wrong for merges with copies
1252 f = copiesget(f, f) # may be wrong for merges with copies
1252 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1253 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1253 if fl1 == fl2:
1254 if fl1 == fl2:
1254 return fl1
1255 return fl1
1255 if fl1 == fla:
1256 if fl1 == fla:
1256 return fl2
1257 return fl2
1257 if fl2 == fla:
1258 if fl2 == fla:
1258 return fl1
1259 return fl1
1259 return '' # punt for conflicts
1260 return '' # punt for conflicts
1260
1261
1261 return func
1262 return func
1262
1263
1263 @propertycache
1264 @propertycache
1264 def _flagfunc(self):
1265 def _flagfunc(self):
1265 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1266 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1266
1267
1267 @propertycache
1268 @propertycache
1268 def _manifest(self):
1269 def _manifest(self):
1269 """generate a manifest corresponding to the values in self._status
1270 """generate a manifest corresponding to the values in self._status
1270
1271
1271 This reuse the file nodeid from parent, but we append an extra letter
1272 This reuse the file nodeid from parent, but we append an extra letter
1272 when modified. Modified files get an extra 'm' while added files get
1273 when modified. Modified files get an extra 'm' while added files get
1273 an extra 'a'. This is used by manifests merge to see that files
1274 an extra 'a'. This is used by manifests merge to see that files
1274 are different and by update logic to avoid deleting newly added files.
1275 are different and by update logic to avoid deleting newly added files.
1275 """
1276 """
1276 parents = self.parents()
1277 parents = self.parents()
1277
1278
1278 man = parents[0].manifest().copy()
1279 man = parents[0].manifest().copy()
1279
1280
1280 ff = self._flagfunc
1281 ff = self._flagfunc
1281 for i, l in ((addednodeid, self._status.added),
1282 for i, l in ((addednodeid, self._status.added),
1282 (modifiednodeid, self._status.modified)):
1283 (modifiednodeid, self._status.modified)):
1283 for f in l:
1284 for f in l:
1284 man[f] = i
1285 man[f] = i
1285 try:
1286 try:
1286 man.setflag(f, ff(f))
1287 man.setflag(f, ff(f))
1287 except OSError:
1288 except OSError:
1288 pass
1289 pass
1289
1290
1290 for f in self._status.deleted + self._status.removed:
1291 for f in self._status.deleted + self._status.removed:
1291 if f in man:
1292 if f in man:
1292 del man[f]
1293 del man[f]
1293
1294
1294 return man
1295 return man
1295
1296
1296 @propertycache
1297 @propertycache
1297 def _status(self):
1298 def _status(self):
1298 return self._repo.status()
1299 return self._repo.status()
1299
1300
1300 @propertycache
1301 @propertycache
1301 def _user(self):
1302 def _user(self):
1302 return self._repo.ui.username()
1303 return self._repo.ui.username()
1303
1304
1304 @propertycache
1305 @propertycache
1305 def _date(self):
1306 def _date(self):
1306 return util.makedate()
1307 return util.makedate()
1307
1308
1308 def subrev(self, subpath):
1309 def subrev(self, subpath):
1309 return None
1310 return None
1310
1311
1311 def manifestnode(self):
1312 def manifestnode(self):
1312 return None
1313 return None
1313 def user(self):
1314 def user(self):
1314 return self._user or self._repo.ui.username()
1315 return self._user or self._repo.ui.username()
1315 def date(self):
1316 def date(self):
1316 return self._date
1317 return self._date
1317 def description(self):
1318 def description(self):
1318 return self._text
1319 return self._text
1319 def files(self):
1320 def files(self):
1320 return sorted(self._status.modified + self._status.added +
1321 return sorted(self._status.modified + self._status.added +
1321 self._status.removed)
1322 self._status.removed)
1322
1323
1323 def modified(self):
1324 def modified(self):
1324 return self._status.modified
1325 return self._status.modified
1325 def added(self):
1326 def added(self):
1326 return self._status.added
1327 return self._status.added
1327 def removed(self):
1328 def removed(self):
1328 return self._status.removed
1329 return self._status.removed
1329 def deleted(self):
1330 def deleted(self):
1330 return self._status.deleted
1331 return self._status.deleted
1331 def branch(self):
1332 def branch(self):
1332 return encoding.tolocal(self._extra['branch'])
1333 return encoding.tolocal(self._extra['branch'])
1333 def closesbranch(self):
1334 def closesbranch(self):
1334 return 'close' in self._extra
1335 return 'close' in self._extra
1335 def extra(self):
1336 def extra(self):
1336 return self._extra
1337 return self._extra
1337
1338
1338 def tags(self):
1339 def tags(self):
1339 return []
1340 return []
1340
1341
1341 def bookmarks(self):
1342 def bookmarks(self):
1342 b = []
1343 b = []
1343 for p in self.parents():
1344 for p in self.parents():
1344 b.extend(p.bookmarks())
1345 b.extend(p.bookmarks())
1345 return b
1346 return b
1346
1347
1347 def phase(self):
1348 def phase(self):
1348 phase = phases.draft # default phase to draft
1349 phase = phases.draft # default phase to draft
1349 for p in self.parents():
1350 for p in self.parents():
1350 phase = max(phase, p.phase())
1351 phase = max(phase, p.phase())
1351 return phase
1352 return phase
1352
1353
1353 def hidden(self):
1354 def hidden(self):
1354 return False
1355 return False
1355
1356
1356 def children(self):
1357 def children(self):
1357 return []
1358 return []
1358
1359
1359 def flags(self, path):
1360 def flags(self, path):
1360 if '_manifest' in self.__dict__:
1361 if '_manifest' in self.__dict__:
1361 try:
1362 try:
1362 return self._manifest.flags(path)
1363 return self._manifest.flags(path)
1363 except KeyError:
1364 except KeyError:
1364 return ''
1365 return ''
1365
1366
1366 try:
1367 try:
1367 return self._flagfunc(path)
1368 return self._flagfunc(path)
1368 except OSError:
1369 except OSError:
1369 return ''
1370 return ''
1370
1371
1371 def ancestor(self, c2):
1372 def ancestor(self, c2):
1372 """return the "best" ancestor context of self and c2"""
1373 """return the "best" ancestor context of self and c2"""
1373 return self._parents[0].ancestor(c2) # punt on two parents for now
1374 return self._parents[0].ancestor(c2) # punt on two parents for now
1374
1375
1375 def walk(self, match):
1376 def walk(self, match):
1376 '''Generates matching file names.'''
1377 '''Generates matching file names.'''
1377 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1378 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1378 True, False))
1379 True, False))
1379
1380
1380 def matches(self, match):
1381 def matches(self, match):
1381 return sorted(self._repo.dirstate.matches(match))
1382 return sorted(self._repo.dirstate.matches(match))
1382
1383
1383 def ancestors(self):
1384 def ancestors(self):
1384 for p in self._parents:
1385 for p in self._parents:
1385 yield p
1386 yield p
1386 for a in self._repo.changelog.ancestors(
1387 for a in self._repo.changelog.ancestors(
1387 [p.rev() for p in self._parents]):
1388 [p.rev() for p in self._parents]):
1388 yield changectx(self._repo, a)
1389 yield changectx(self._repo, a)
1389
1390
1390 def markcommitted(self, node):
1391 def markcommitted(self, node):
1391 """Perform post-commit cleanup necessary after committing this ctx
1392 """Perform post-commit cleanup necessary after committing this ctx
1392
1393
1393 Specifically, this updates backing stores this working context
1394 Specifically, this updates backing stores this working context
1394 wraps to reflect the fact that the changes reflected by this
1395 wraps to reflect the fact that the changes reflected by this
1395 workingctx have been committed. For example, it marks
1396 workingctx have been committed. For example, it marks
1396 modified and added files as normal in the dirstate.
1397 modified and added files as normal in the dirstate.
1397
1398
1398 """
1399 """
1399
1400
1400 self._repo.dirstate.beginparentchange()
1401 self._repo.dirstate.beginparentchange()
1401 for f in self.modified() + self.added():
1402 for f in self.modified() + self.added():
1402 self._repo.dirstate.normal(f)
1403 self._repo.dirstate.normal(f)
1403 for f in self.removed():
1404 for f in self.removed():
1404 self._repo.dirstate.drop(f)
1405 self._repo.dirstate.drop(f)
1405 self._repo.dirstate.setparents(node)
1406 self._repo.dirstate.setparents(node)
1406 self._repo.dirstate.endparentchange()
1407 self._repo.dirstate.endparentchange()
1407
1408
1408 # write changes out explicitly, because nesting wlock at
1409 # write changes out explicitly, because nesting wlock at
1409 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1410 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1410 # from immediately doing so for subsequent changing files
1411 # from immediately doing so for subsequent changing files
1411 self._repo.dirstate.write(self._repo.currenttransaction())
1412 self._repo.dirstate.write(self._repo.currenttransaction())
1412
1413
1413 class workingctx(committablectx):
1414 class workingctx(committablectx):
1414 """A workingctx object makes access to data related to
1415 """A workingctx object makes access to data related to
1415 the current working directory convenient.
1416 the current working directory convenient.
1416 date - any valid date string or (unixtime, offset), or None.
1417 date - any valid date string or (unixtime, offset), or None.
1417 user - username string, or None.
1418 user - username string, or None.
1418 extra - a dictionary of extra values, or None.
1419 extra - a dictionary of extra values, or None.
1419 changes - a list of file lists as returned by localrepo.status()
1420 changes - a list of file lists as returned by localrepo.status()
1420 or None to use the repository status.
1421 or None to use the repository status.
1421 """
1422 """
1422 def __init__(self, repo, text="", user=None, date=None, extra=None,
1423 def __init__(self, repo, text="", user=None, date=None, extra=None,
1423 changes=None):
1424 changes=None):
1424 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1425 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1425
1426
1426 def __iter__(self):
1427 def __iter__(self):
1427 d = self._repo.dirstate
1428 d = self._repo.dirstate
1428 for f in d:
1429 for f in d:
1429 if d[f] != 'r':
1430 if d[f] != 'r':
1430 yield f
1431 yield f
1431
1432
1432 def __contains__(self, key):
1433 def __contains__(self, key):
1433 return self._repo.dirstate[key] not in "?r"
1434 return self._repo.dirstate[key] not in "?r"
1434
1435
1435 def hex(self):
1436 def hex(self):
1436 return hex(wdirid)
1437 return hex(wdirid)
1437
1438
1438 @propertycache
1439 @propertycache
1439 def _parents(self):
1440 def _parents(self):
1440 p = self._repo.dirstate.parents()
1441 p = self._repo.dirstate.parents()
1441 if p[1] == nullid:
1442 if p[1] == nullid:
1442 p = p[:-1]
1443 p = p[:-1]
1443 return [changectx(self._repo, x) for x in p]
1444 return [changectx(self._repo, x) for x in p]
1444
1445
1445 def filectx(self, path, filelog=None):
1446 def filectx(self, path, filelog=None):
1446 """get a file context from the working directory"""
1447 """get a file context from the working directory"""
1447 return workingfilectx(self._repo, path, workingctx=self,
1448 return workingfilectx(self._repo, path, workingctx=self,
1448 filelog=filelog)
1449 filelog=filelog)
1449
1450
1450 def dirty(self, missing=False, merge=True, branch=True):
1451 def dirty(self, missing=False, merge=True, branch=True):
1451 "check whether a working directory is modified"
1452 "check whether a working directory is modified"
1452 # check subrepos first
1453 # check subrepos first
1453 for s in sorted(self.substate):
1454 for s in sorted(self.substate):
1454 if self.sub(s).dirty():
1455 if self.sub(s).dirty():
1455 return True
1456 return True
1456 # check current working dir
1457 # check current working dir
1457 return ((merge and self.p2()) or
1458 return ((merge and self.p2()) or
1458 (branch and self.branch() != self.p1().branch()) or
1459 (branch and self.branch() != self.p1().branch()) or
1459 self.modified() or self.added() or self.removed() or
1460 self.modified() or self.added() or self.removed() or
1460 (missing and self.deleted()))
1461 (missing and self.deleted()))
1461
1462
1462 def add(self, list, prefix=""):
1463 def add(self, list, prefix=""):
1463 join = lambda f: os.path.join(prefix, f)
1464 join = lambda f: os.path.join(prefix, f)
1464 with self._repo.wlock():
1465 with self._repo.wlock():
1465 ui, ds = self._repo.ui, self._repo.dirstate
1466 ui, ds = self._repo.ui, self._repo.dirstate
1466 rejected = []
1467 rejected = []
1467 lstat = self._repo.wvfs.lstat
1468 lstat = self._repo.wvfs.lstat
1468 for f in list:
1469 for f in list:
1469 scmutil.checkportable(ui, join(f))
1470 scmutil.checkportable(ui, join(f))
1470 try:
1471 try:
1471 st = lstat(f)
1472 st = lstat(f)
1472 except OSError:
1473 except OSError:
1473 ui.warn(_("%s does not exist!\n") % join(f))
1474 ui.warn(_("%s does not exist!\n") % join(f))
1474 rejected.append(f)
1475 rejected.append(f)
1475 continue
1476 continue
1476 if st.st_size > 10000000:
1477 if st.st_size > 10000000:
1477 ui.warn(_("%s: up to %d MB of RAM may be required "
1478 ui.warn(_("%s: up to %d MB of RAM may be required "
1478 "to manage this file\n"
1479 "to manage this file\n"
1479 "(use 'hg revert %s' to cancel the "
1480 "(use 'hg revert %s' to cancel the "
1480 "pending addition)\n")
1481 "pending addition)\n")
1481 % (f, 3 * st.st_size // 1000000, join(f)))
1482 % (f, 3 * st.st_size // 1000000, join(f)))
1482 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1483 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1483 ui.warn(_("%s not added: only files and symlinks "
1484 ui.warn(_("%s not added: only files and symlinks "
1484 "supported currently\n") % join(f))
1485 "supported currently\n") % join(f))
1485 rejected.append(f)
1486 rejected.append(f)
1486 elif ds[f] in 'amn':
1487 elif ds[f] in 'amn':
1487 ui.warn(_("%s already tracked!\n") % join(f))
1488 ui.warn(_("%s already tracked!\n") % join(f))
1488 elif ds[f] == 'r':
1489 elif ds[f] == 'r':
1489 ds.normallookup(f)
1490 ds.normallookup(f)
1490 else:
1491 else:
1491 ds.add(f)
1492 ds.add(f)
1492 return rejected
1493 return rejected
1493
1494
1494 def forget(self, files, prefix=""):
1495 def forget(self, files, prefix=""):
1495 join = lambda f: os.path.join(prefix, f)
1496 join = lambda f: os.path.join(prefix, f)
1496 with self._repo.wlock():
1497 with self._repo.wlock():
1497 rejected = []
1498 rejected = []
1498 for f in files:
1499 for f in files:
1499 if f not in self._repo.dirstate:
1500 if f not in self._repo.dirstate:
1500 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1501 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1501 rejected.append(f)
1502 rejected.append(f)
1502 elif self._repo.dirstate[f] != 'a':
1503 elif self._repo.dirstate[f] != 'a':
1503 self._repo.dirstate.remove(f)
1504 self._repo.dirstate.remove(f)
1504 else:
1505 else:
1505 self._repo.dirstate.drop(f)
1506 self._repo.dirstate.drop(f)
1506 return rejected
1507 return rejected
1507
1508
1508 def undelete(self, list):
1509 def undelete(self, list):
1509 pctxs = self.parents()
1510 pctxs = self.parents()
1510 with self._repo.wlock():
1511 with self._repo.wlock():
1511 for f in list:
1512 for f in list:
1512 if self._repo.dirstate[f] != 'r':
1513 if self._repo.dirstate[f] != 'r':
1513 self._repo.ui.warn(_("%s not removed!\n") % f)
1514 self._repo.ui.warn(_("%s not removed!\n") % f)
1514 else:
1515 else:
1515 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1516 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1516 t = fctx.data()
1517 t = fctx.data()
1517 self._repo.wwrite(f, t, fctx.flags())
1518 self._repo.wwrite(f, t, fctx.flags())
1518 self._repo.dirstate.normal(f)
1519 self._repo.dirstate.normal(f)
1519
1520
1520 def copy(self, source, dest):
1521 def copy(self, source, dest):
1521 try:
1522 try:
1522 st = self._repo.wvfs.lstat(dest)
1523 st = self._repo.wvfs.lstat(dest)
1523 except OSError as err:
1524 except OSError as err:
1524 if err.errno != errno.ENOENT:
1525 if err.errno != errno.ENOENT:
1525 raise
1526 raise
1526 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1527 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1527 return
1528 return
1528 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1529 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1529 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1530 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1530 "symbolic link\n") % dest)
1531 "symbolic link\n") % dest)
1531 else:
1532 else:
1532 with self._repo.wlock():
1533 with self._repo.wlock():
1533 if self._repo.dirstate[dest] in '?':
1534 if self._repo.dirstate[dest] in '?':
1534 self._repo.dirstate.add(dest)
1535 self._repo.dirstate.add(dest)
1535 elif self._repo.dirstate[dest] in 'r':
1536 elif self._repo.dirstate[dest] in 'r':
1536 self._repo.dirstate.normallookup(dest)
1537 self._repo.dirstate.normallookup(dest)
1537 self._repo.dirstate.copy(source, dest)
1538 self._repo.dirstate.copy(source, dest)
1538
1539
1539 def match(self, pats=[], include=None, exclude=None, default='glob',
1540 def match(self, pats=[], include=None, exclude=None, default='glob',
1540 listsubrepos=False, badfn=None):
1541 listsubrepos=False, badfn=None):
1541 r = self._repo
1542 r = self._repo
1542
1543
1543 # Only a case insensitive filesystem needs magic to translate user input
1544 # Only a case insensitive filesystem needs magic to translate user input
1544 # to actual case in the filesystem.
1545 # to actual case in the filesystem.
1545 if not util.fscasesensitive(r.root):
1546 if not util.fscasesensitive(r.root):
1546 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1547 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1547 exclude, default, r.auditor, self,
1548 exclude, default, r.auditor, self,
1548 listsubrepos=listsubrepos,
1549 listsubrepos=listsubrepos,
1549 badfn=badfn)
1550 badfn=badfn)
1550 return matchmod.match(r.root, r.getcwd(), pats,
1551 return matchmod.match(r.root, r.getcwd(), pats,
1551 include, exclude, default,
1552 include, exclude, default,
1552 auditor=r.auditor, ctx=self,
1553 auditor=r.auditor, ctx=self,
1553 listsubrepos=listsubrepos, badfn=badfn)
1554 listsubrepos=listsubrepos, badfn=badfn)
1554
1555
1555 def _filtersuspectsymlink(self, files):
1556 def _filtersuspectsymlink(self, files):
1556 if not files or self._repo.dirstate._checklink:
1557 if not files or self._repo.dirstate._checklink:
1557 return files
1558 return files
1558
1559
1559 # Symlink placeholders may get non-symlink-like contents
1560 # Symlink placeholders may get non-symlink-like contents
1560 # via user error or dereferencing by NFS or Samba servers,
1561 # via user error or dereferencing by NFS or Samba servers,
1561 # so we filter out any placeholders that don't look like a
1562 # so we filter out any placeholders that don't look like a
1562 # symlink
1563 # symlink
1563 sane = []
1564 sane = []
1564 for f in files:
1565 for f in files:
1565 if self.flags(f) == 'l':
1566 if self.flags(f) == 'l':
1566 d = self[f].data()
1567 d = self[f].data()
1567 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1568 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1568 self._repo.ui.debug('ignoring suspect symlink placeholder'
1569 self._repo.ui.debug('ignoring suspect symlink placeholder'
1569 ' "%s"\n' % f)
1570 ' "%s"\n' % f)
1570 continue
1571 continue
1571 sane.append(f)
1572 sane.append(f)
1572 return sane
1573 return sane
1573
1574
1574 def _checklookup(self, files):
1575 def _checklookup(self, files):
1575 # check for any possibly clean files
1576 # check for any possibly clean files
1576 if not files:
1577 if not files:
1577 return [], []
1578 return [], []
1578
1579
1579 modified = []
1580 modified = []
1580 fixup = []
1581 fixup = []
1581 pctx = self._parents[0]
1582 pctx = self._parents[0]
1582 # do a full compare of any files that might have changed
1583 # do a full compare of any files that might have changed
1583 for f in sorted(files):
1584 for f in sorted(files):
1584 if (f not in pctx or self.flags(f) != pctx.flags(f)
1585 if (f not in pctx or self.flags(f) != pctx.flags(f)
1585 or pctx[f].cmp(self[f])):
1586 or pctx[f].cmp(self[f])):
1586 modified.append(f)
1587 modified.append(f)
1587 else:
1588 else:
1588 fixup.append(f)
1589 fixup.append(f)
1589
1590
1590 # update dirstate for files that are actually clean
1591 # update dirstate for files that are actually clean
1591 if fixup:
1592 if fixup:
1592 try:
1593 try:
1593 # updating the dirstate is optional
1594 # updating the dirstate is optional
1594 # so we don't wait on the lock
1595 # so we don't wait on the lock
1595 # wlock can invalidate the dirstate, so cache normal _after_
1596 # wlock can invalidate the dirstate, so cache normal _after_
1596 # taking the lock
1597 # taking the lock
1597 with self._repo.wlock(False):
1598 with self._repo.wlock(False):
1598 normal = self._repo.dirstate.normal
1599 normal = self._repo.dirstate.normal
1599 for f in fixup:
1600 for f in fixup:
1600 normal(f)
1601 normal(f)
1601 # write changes out explicitly, because nesting
1602 # write changes out explicitly, because nesting
1602 # wlock at runtime may prevent 'wlock.release()'
1603 # wlock at runtime may prevent 'wlock.release()'
1603 # after this block from doing so for subsequent
1604 # after this block from doing so for subsequent
1604 # changing files
1605 # changing files
1605 self._repo.dirstate.write(self._repo.currenttransaction())
1606 self._repo.dirstate.write(self._repo.currenttransaction())
1606 except error.LockError:
1607 except error.LockError:
1607 pass
1608 pass
1608 return modified, fixup
1609 return modified, fixup
1609
1610
1610 def _manifestmatches(self, match, s):
1611 def _manifestmatches(self, match, s):
1611 """Slow path for workingctx
1612 """Slow path for workingctx
1612
1613
1613 The fast path is when we compare the working directory to its parent
1614 The fast path is when we compare the working directory to its parent
1614 which means this function is comparing with a non-parent; therefore we
1615 which means this function is comparing with a non-parent; therefore we
1615 need to build a manifest and return what matches.
1616 need to build a manifest and return what matches.
1616 """
1617 """
1617 mf = self._repo['.']._manifestmatches(match, s)
1618 mf = self._repo['.']._manifestmatches(match, s)
1618 for f in s.modified + s.added:
1619 for f in s.modified + s.added:
1619 mf[f] = newnodeid
1620 mf[f] = newnodeid
1620 mf.setflag(f, self.flags(f))
1621 mf.setflag(f, self.flags(f))
1621 for f in s.removed:
1622 for f in s.removed:
1622 if f in mf:
1623 if f in mf:
1623 del mf[f]
1624 del mf[f]
1624 return mf
1625 return mf
1625
1626
1626 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1627 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1627 unknown=False):
1628 unknown=False):
1628 '''Gets the status from the dirstate -- internal use only.'''
1629 '''Gets the status from the dirstate -- internal use only.'''
1629 listignored, listclean, listunknown = ignored, clean, unknown
1630 listignored, listclean, listunknown = ignored, clean, unknown
1630 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1631 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1631 subrepos = []
1632 subrepos = []
1632 if '.hgsub' in self:
1633 if '.hgsub' in self:
1633 subrepos = sorted(self.substate)
1634 subrepos = sorted(self.substate)
1634 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1635 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1635 listclean, listunknown)
1636 listclean, listunknown)
1636
1637
1637 # check for any possibly clean files
1638 # check for any possibly clean files
1638 if cmp:
1639 if cmp:
1639 modified2, fixup = self._checklookup(cmp)
1640 modified2, fixup = self._checklookup(cmp)
1640 s.modified.extend(modified2)
1641 s.modified.extend(modified2)
1641
1642
1642 # update dirstate for files that are actually clean
1643 # update dirstate for files that are actually clean
1643 if fixup and listclean:
1644 if fixup and listclean:
1644 s.clean.extend(fixup)
1645 s.clean.extend(fixup)
1645
1646
1646 if match.always():
1647 if match.always():
1647 # cache for performance
1648 # cache for performance
1648 if s.unknown or s.ignored or s.clean:
1649 if s.unknown or s.ignored or s.clean:
1649 # "_status" is cached with list*=False in the normal route
1650 # "_status" is cached with list*=False in the normal route
1650 self._status = scmutil.status(s.modified, s.added, s.removed,
1651 self._status = scmutil.status(s.modified, s.added, s.removed,
1651 s.deleted, [], [], [])
1652 s.deleted, [], [], [])
1652 else:
1653 else:
1653 self._status = s
1654 self._status = s
1654
1655
1655 return s
1656 return s
1656
1657
1657 def _buildstatus(self, other, s, match, listignored, listclean,
1658 def _buildstatus(self, other, s, match, listignored, listclean,
1658 listunknown):
1659 listunknown):
1659 """build a status with respect to another context
1660 """build a status with respect to another context
1660
1661
1661 This includes logic for maintaining the fast path of status when
1662 This includes logic for maintaining the fast path of status when
1662 comparing the working directory against its parent, which is to skip
1663 comparing the working directory against its parent, which is to skip
1663 building a new manifest if self (working directory) is not comparing
1664 building a new manifest if self (working directory) is not comparing
1664 against its parent (repo['.']).
1665 against its parent (repo['.']).
1665 """
1666 """
1666 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1667 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1667 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1668 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1668 # might have accidentally ended up with the entire contents of the file
1669 # might have accidentally ended up with the entire contents of the file
1669 # they are supposed to be linking to.
1670 # they are supposed to be linking to.
1670 s.modified[:] = self._filtersuspectsymlink(s.modified)
1671 s.modified[:] = self._filtersuspectsymlink(s.modified)
1671 if other != self._repo['.']:
1672 if other != self._repo['.']:
1672 s = super(workingctx, self)._buildstatus(other, s, match,
1673 s = super(workingctx, self)._buildstatus(other, s, match,
1673 listignored, listclean,
1674 listignored, listclean,
1674 listunknown)
1675 listunknown)
1675 return s
1676 return s
1676
1677
1677 def _matchstatus(self, other, match):
1678 def _matchstatus(self, other, match):
1678 """override the match method with a filter for directory patterns
1679 """override the match method with a filter for directory patterns
1679
1680
1680 We use inheritance to customize the match.bad method only in cases of
1681 We use inheritance to customize the match.bad method only in cases of
1681 workingctx since it belongs only to the working directory when
1682 workingctx since it belongs only to the working directory when
1682 comparing against the parent changeset.
1683 comparing against the parent changeset.
1683
1684
1684 If we aren't comparing against the working directory's parent, then we
1685 If we aren't comparing against the working directory's parent, then we
1685 just use the default match object sent to us.
1686 just use the default match object sent to us.
1686 """
1687 """
1687 superself = super(workingctx, self)
1688 superself = super(workingctx, self)
1688 match = superself._matchstatus(other, match)
1689 match = superself._matchstatus(other, match)
1689 if other != self._repo['.']:
1690 if other != self._repo['.']:
1690 def bad(f, msg):
1691 def bad(f, msg):
1691 # 'f' may be a directory pattern from 'match.files()',
1692 # 'f' may be a directory pattern from 'match.files()',
1692 # so 'f not in ctx1' is not enough
1693 # so 'f not in ctx1' is not enough
1693 if f not in other and not other.hasdir(f):
1694 if f not in other and not other.hasdir(f):
1694 self._repo.ui.warn('%s: %s\n' %
1695 self._repo.ui.warn('%s: %s\n' %
1695 (self._repo.dirstate.pathto(f), msg))
1696 (self._repo.dirstate.pathto(f), msg))
1696 match.bad = bad
1697 match.bad = bad
1697 return match
1698 return match
1698
1699
1699 class committablefilectx(basefilectx):
1700 class committablefilectx(basefilectx):
1700 """A committablefilectx provides common functionality for a file context
1701 """A committablefilectx provides common functionality for a file context
1701 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1702 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1702 def __init__(self, repo, path, filelog=None, ctx=None):
1703 def __init__(self, repo, path, filelog=None, ctx=None):
1703 self._repo = repo
1704 self._repo = repo
1704 self._path = path
1705 self._path = path
1705 self._changeid = None
1706 self._changeid = None
1706 self._filerev = self._filenode = None
1707 self._filerev = self._filenode = None
1707
1708
1708 if filelog is not None:
1709 if filelog is not None:
1709 self._filelog = filelog
1710 self._filelog = filelog
1710 if ctx:
1711 if ctx:
1711 self._changectx = ctx
1712 self._changectx = ctx
1712
1713
1713 def __nonzero__(self):
1714 def __nonzero__(self):
1714 return True
1715 return True
1715
1716
1716 def linkrev(self):
1717 def linkrev(self):
1717 # linked to self._changectx no matter if file is modified or not
1718 # linked to self._changectx no matter if file is modified or not
1718 return self.rev()
1719 return self.rev()
1719
1720
1720 def parents(self):
1721 def parents(self):
1721 '''return parent filectxs, following copies if necessary'''
1722 '''return parent filectxs, following copies if necessary'''
1722 def filenode(ctx, path):
1723 def filenode(ctx, path):
1723 return ctx._manifest.get(path, nullid)
1724 return ctx._manifest.get(path, nullid)
1724
1725
1725 path = self._path
1726 path = self._path
1726 fl = self._filelog
1727 fl = self._filelog
1727 pcl = self._changectx._parents
1728 pcl = self._changectx._parents
1728 renamed = self.renamed()
1729 renamed = self.renamed()
1729
1730
1730 if renamed:
1731 if renamed:
1731 pl = [renamed + (None,)]
1732 pl = [renamed + (None,)]
1732 else:
1733 else:
1733 pl = [(path, filenode(pcl[0], path), fl)]
1734 pl = [(path, filenode(pcl[0], path), fl)]
1734
1735
1735 for pc in pcl[1:]:
1736 for pc in pcl[1:]:
1736 pl.append((path, filenode(pc, path), fl))
1737 pl.append((path, filenode(pc, path), fl))
1737
1738
1738 return [self._parentfilectx(p, fileid=n, filelog=l)
1739 return [self._parentfilectx(p, fileid=n, filelog=l)
1739 for p, n, l in pl if n != nullid]
1740 for p, n, l in pl if n != nullid]
1740
1741
1741 def children(self):
1742 def children(self):
1742 return []
1743 return []
1743
1744
1744 class workingfilectx(committablefilectx):
1745 class workingfilectx(committablefilectx):
1745 """A workingfilectx object makes access to data related to a particular
1746 """A workingfilectx object makes access to data related to a particular
1746 file in the working directory convenient."""
1747 file in the working directory convenient."""
1747 def __init__(self, repo, path, filelog=None, workingctx=None):
1748 def __init__(self, repo, path, filelog=None, workingctx=None):
1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1749 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1749
1750
1750 @propertycache
1751 @propertycache
1751 def _changectx(self):
1752 def _changectx(self):
1752 return workingctx(self._repo)
1753 return workingctx(self._repo)
1753
1754
1754 def data(self):
1755 def data(self):
1755 return self._repo.wread(self._path)
1756 return self._repo.wread(self._path)
1756 def renamed(self):
1757 def renamed(self):
1757 rp = self._repo.dirstate.copied(self._path)
1758 rp = self._repo.dirstate.copied(self._path)
1758 if not rp:
1759 if not rp:
1759 return None
1760 return None
1760 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1761 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1761
1762
1762 def size(self):
1763 def size(self):
1763 return self._repo.wvfs.lstat(self._path).st_size
1764 return self._repo.wvfs.lstat(self._path).st_size
1764 def date(self):
1765 def date(self):
1765 t, tz = self._changectx.date()
1766 t, tz = self._changectx.date()
1766 try:
1767 try:
1767 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1768 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1768 except OSError as err:
1769 except OSError as err:
1769 if err.errno != errno.ENOENT:
1770 if err.errno != errno.ENOENT:
1770 raise
1771 raise
1771 return (t, tz)
1772 return (t, tz)
1772
1773
1773 def cmp(self, fctx):
1774 def cmp(self, fctx):
1774 """compare with other file context
1775 """compare with other file context
1775
1776
1776 returns True if different than fctx.
1777 returns True if different than fctx.
1777 """
1778 """
1778 # fctx should be a filectx (not a workingfilectx)
1779 # fctx should be a filectx (not a workingfilectx)
1779 # invert comparison to reuse the same code path
1780 # invert comparison to reuse the same code path
1780 return fctx.cmp(self)
1781 return fctx.cmp(self)
1781
1782
1782 def remove(self, ignoremissing=False):
1783 def remove(self, ignoremissing=False):
1783 """wraps unlink for a repo's working directory"""
1784 """wraps unlink for a repo's working directory"""
1784 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1785 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1785
1786
1786 def write(self, data, flags):
1787 def write(self, data, flags):
1787 """wraps repo.wwrite"""
1788 """wraps repo.wwrite"""
1788 self._repo.wwrite(self._path, data, flags)
1789 self._repo.wwrite(self._path, data, flags)
1789
1790
1790 class workingcommitctx(workingctx):
1791 class workingcommitctx(workingctx):
1791 """A workingcommitctx object makes access to data related to
1792 """A workingcommitctx object makes access to data related to
1792 the revision being committed convenient.
1793 the revision being committed convenient.
1793
1794
1794 This hides changes in the working directory, if they aren't
1795 This hides changes in the working directory, if they aren't
1795 committed in this context.
1796 committed in this context.
1796 """
1797 """
1797 def __init__(self, repo, changes,
1798 def __init__(self, repo, changes,
1798 text="", user=None, date=None, extra=None):
1799 text="", user=None, date=None, extra=None):
1799 super(workingctx, self).__init__(repo, text, user, date, extra,
1800 super(workingctx, self).__init__(repo, text, user, date, extra,
1800 changes)
1801 changes)
1801
1802
1802 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1803 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1803 unknown=False):
1804 unknown=False):
1804 """Return matched files only in ``self._status``
1805 """Return matched files only in ``self._status``
1805
1806
1806 Uncommitted files appear "clean" via this context, even if
1807 Uncommitted files appear "clean" via this context, even if
1807 they aren't actually so in the working directory.
1808 they aren't actually so in the working directory.
1808 """
1809 """
1809 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1810 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1810 if clean:
1811 if clean:
1811 clean = [f for f in self._manifest if f not in self._changedset]
1812 clean = [f for f in self._manifest if f not in self._changedset]
1812 else:
1813 else:
1813 clean = []
1814 clean = []
1814 return scmutil.status([f for f in self._status.modified if match(f)],
1815 return scmutil.status([f for f in self._status.modified if match(f)],
1815 [f for f in self._status.added if match(f)],
1816 [f for f in self._status.added if match(f)],
1816 [f for f in self._status.removed if match(f)],
1817 [f for f in self._status.removed if match(f)],
1817 [], [], [], clean)
1818 [], [], [], clean)
1818
1819
1819 @propertycache
1820 @propertycache
1820 def _changedset(self):
1821 def _changedset(self):
1821 """Return the set of files changed in this context
1822 """Return the set of files changed in this context
1822 """
1823 """
1823 changed = set(self._status.modified)
1824 changed = set(self._status.modified)
1824 changed.update(self._status.added)
1825 changed.update(self._status.added)
1825 changed.update(self._status.removed)
1826 changed.update(self._status.removed)
1826 return changed
1827 return changed
1827
1828
1828 def makecachingfilectxfn(func):
1829 def makecachingfilectxfn(func):
1829 """Create a filectxfn that caches based on the path.
1830 """Create a filectxfn that caches based on the path.
1830
1831
1831 We can't use util.cachefunc because it uses all arguments as the cache
1832 We can't use util.cachefunc because it uses all arguments as the cache
1832 key and this creates a cycle since the arguments include the repo and
1833 key and this creates a cycle since the arguments include the repo and
1833 memctx.
1834 memctx.
1834 """
1835 """
1835 cache = {}
1836 cache = {}
1836
1837
1837 def getfilectx(repo, memctx, path):
1838 def getfilectx(repo, memctx, path):
1838 if path not in cache:
1839 if path not in cache:
1839 cache[path] = func(repo, memctx, path)
1840 cache[path] = func(repo, memctx, path)
1840 return cache[path]
1841 return cache[path]
1841
1842
1842 return getfilectx
1843 return getfilectx
1843
1844
1844 class memctx(committablectx):
1845 class memctx(committablectx):
1845 """Use memctx to perform in-memory commits via localrepo.commitctx().
1846 """Use memctx to perform in-memory commits via localrepo.commitctx().
1846
1847
1847 Revision information is supplied at initialization time while
1848 Revision information is supplied at initialization time while
1848 related files data and is made available through a callback
1849 related files data and is made available through a callback
1849 mechanism. 'repo' is the current localrepo, 'parents' is a
1850 mechanism. 'repo' is the current localrepo, 'parents' is a
1850 sequence of two parent revisions identifiers (pass None for every
1851 sequence of two parent revisions identifiers (pass None for every
1851 missing parent), 'text' is the commit message and 'files' lists
1852 missing parent), 'text' is the commit message and 'files' lists
1852 names of files touched by the revision (normalized and relative to
1853 names of files touched by the revision (normalized and relative to
1853 repository root).
1854 repository root).
1854
1855
1855 filectxfn(repo, memctx, path) is a callable receiving the
1856 filectxfn(repo, memctx, path) is a callable receiving the
1856 repository, the current memctx object and the normalized path of
1857 repository, the current memctx object and the normalized path of
1857 requested file, relative to repository root. It is fired by the
1858 requested file, relative to repository root. It is fired by the
1858 commit function for every file in 'files', but calls order is
1859 commit function for every file in 'files', but calls order is
1859 undefined. If the file is available in the revision being
1860 undefined. If the file is available in the revision being
1860 committed (updated or added), filectxfn returns a memfilectx
1861 committed (updated or added), filectxfn returns a memfilectx
1861 object. If the file was removed, filectxfn raises an
1862 object. If the file was removed, filectxfn raises an
1862 IOError. Moved files are represented by marking the source file
1863 IOError. Moved files are represented by marking the source file
1863 removed and the new file added with copy information (see
1864 removed and the new file added with copy information (see
1864 memfilectx).
1865 memfilectx).
1865
1866
1866 user receives the committer name and defaults to current
1867 user receives the committer name and defaults to current
1867 repository username, date is the commit date in any format
1868 repository username, date is the commit date in any format
1868 supported by util.parsedate() and defaults to current date, extra
1869 supported by util.parsedate() and defaults to current date, extra
1869 is a dictionary of metadata or is left empty.
1870 is a dictionary of metadata or is left empty.
1870 """
1871 """
1871
1872
1872 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1873 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1873 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1874 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1874 # this field to determine what to do in filectxfn.
1875 # this field to determine what to do in filectxfn.
1875 _returnnoneformissingfiles = True
1876 _returnnoneformissingfiles = True
1876
1877
1877 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1878 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1878 date=None, extra=None, editor=False):
1879 date=None, extra=None, editor=False):
1879 super(memctx, self).__init__(repo, text, user, date, extra)
1880 super(memctx, self).__init__(repo, text, user, date, extra)
1880 self._rev = None
1881 self._rev = None
1881 self._node = None
1882 self._node = None
1882 parents = [(p or nullid) for p in parents]
1883 parents = [(p or nullid) for p in parents]
1883 p1, p2 = parents
1884 p1, p2 = parents
1884 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1885 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1885 files = sorted(set(files))
1886 files = sorted(set(files))
1886 self._files = files
1887 self._files = files
1887 self.substate = {}
1888 self.substate = {}
1888
1889
1889 # if store is not callable, wrap it in a function
1890 # if store is not callable, wrap it in a function
1890 if not callable(filectxfn):
1891 if not callable(filectxfn):
1891 def getfilectx(repo, memctx, path):
1892 def getfilectx(repo, memctx, path):
1892 fctx = filectxfn[path]
1893 fctx = filectxfn[path]
1893 # this is weird but apparently we only keep track of one parent
1894 # this is weird but apparently we only keep track of one parent
1894 # (why not only store that instead of a tuple?)
1895 # (why not only store that instead of a tuple?)
1895 copied = fctx.renamed()
1896 copied = fctx.renamed()
1896 if copied:
1897 if copied:
1897 copied = copied[0]
1898 copied = copied[0]
1898 return memfilectx(repo, path, fctx.data(),
1899 return memfilectx(repo, path, fctx.data(),
1899 islink=fctx.islink(), isexec=fctx.isexec(),
1900 islink=fctx.islink(), isexec=fctx.isexec(),
1900 copied=copied, memctx=memctx)
1901 copied=copied, memctx=memctx)
1901 self._filectxfn = getfilectx
1902 self._filectxfn = getfilectx
1902 else:
1903 else:
1903 # memoizing increases performance for e.g. vcs convert scenarios.
1904 # memoizing increases performance for e.g. vcs convert scenarios.
1904 self._filectxfn = makecachingfilectxfn(filectxfn)
1905 self._filectxfn = makecachingfilectxfn(filectxfn)
1905
1906
1906 if extra:
1907 if extra:
1907 self._extra = extra.copy()
1908 self._extra = extra.copy()
1908 else:
1909 else:
1909 self._extra = {}
1910 self._extra = {}
1910
1911
1911 if self._extra.get('branch', '') == '':
1912 if self._extra.get('branch', '') == '':
1912 self._extra['branch'] = 'default'
1913 self._extra['branch'] = 'default'
1913
1914
1914 if editor:
1915 if editor:
1915 self._text = editor(self._repo, self, [])
1916 self._text = editor(self._repo, self, [])
1916 self._repo.savecommitmessage(self._text)
1917 self._repo.savecommitmessage(self._text)
1917
1918
1918 def filectx(self, path, filelog=None):
1919 def filectx(self, path, filelog=None):
1919 """get a file context from the working directory
1920 """get a file context from the working directory
1920
1921
1921 Returns None if file doesn't exist and should be removed."""
1922 Returns None if file doesn't exist and should be removed."""
1922 return self._filectxfn(self._repo, self, path)
1923 return self._filectxfn(self._repo, self, path)
1923
1924
1924 def commit(self):
1925 def commit(self):
1925 """commit context to the repo"""
1926 """commit context to the repo"""
1926 return self._repo.commitctx(self)
1927 return self._repo.commitctx(self)
1927
1928
1928 @propertycache
1929 @propertycache
1929 def _manifest(self):
1930 def _manifest(self):
1930 """generate a manifest based on the return values of filectxfn"""
1931 """generate a manifest based on the return values of filectxfn"""
1931
1932
1932 # keep this simple for now; just worry about p1
1933 # keep this simple for now; just worry about p1
1933 pctx = self._parents[0]
1934 pctx = self._parents[0]
1934 man = pctx.manifest().copy()
1935 man = pctx.manifest().copy()
1935
1936
1936 for f in self._status.modified:
1937 for f in self._status.modified:
1937 p1node = nullid
1938 p1node = nullid
1938 p2node = nullid
1939 p2node = nullid
1939 p = pctx[f].parents() # if file isn't in pctx, check p2?
1940 p = pctx[f].parents() # if file isn't in pctx, check p2?
1940 if len(p) > 0:
1941 if len(p) > 0:
1941 p1node = p[0].filenode()
1942 p1node = p[0].filenode()
1942 if len(p) > 1:
1943 if len(p) > 1:
1943 p2node = p[1].filenode()
1944 p2node = p[1].filenode()
1944 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1945 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1945
1946
1946 for f in self._status.added:
1947 for f in self._status.added:
1947 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1948 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1948
1949
1949 for f in self._status.removed:
1950 for f in self._status.removed:
1950 if f in man:
1951 if f in man:
1951 del man[f]
1952 del man[f]
1952
1953
1953 return man
1954 return man
1954
1955
1955 @propertycache
1956 @propertycache
1956 def _status(self):
1957 def _status(self):
1957 """Calculate exact status from ``files`` specified at construction
1958 """Calculate exact status from ``files`` specified at construction
1958 """
1959 """
1959 man1 = self.p1().manifest()
1960 man1 = self.p1().manifest()
1960 p2 = self._parents[1]
1961 p2 = self._parents[1]
1961 # "1 < len(self._parents)" can't be used for checking
1962 # "1 < len(self._parents)" can't be used for checking
1962 # existence of the 2nd parent, because "memctx._parents" is
1963 # existence of the 2nd parent, because "memctx._parents" is
1963 # explicitly initialized by the list, of which length is 2.
1964 # explicitly initialized by the list, of which length is 2.
1964 if p2.node() != nullid:
1965 if p2.node() != nullid:
1965 man2 = p2.manifest()
1966 man2 = p2.manifest()
1966 managing = lambda f: f in man1 or f in man2
1967 managing = lambda f: f in man1 or f in man2
1967 else:
1968 else:
1968 managing = lambda f: f in man1
1969 managing = lambda f: f in man1
1969
1970
1970 modified, added, removed = [], [], []
1971 modified, added, removed = [], [], []
1971 for f in self._files:
1972 for f in self._files:
1972 if not managing(f):
1973 if not managing(f):
1973 added.append(f)
1974 added.append(f)
1974 elif self[f]:
1975 elif self[f]:
1975 modified.append(f)
1976 modified.append(f)
1976 else:
1977 else:
1977 removed.append(f)
1978 removed.append(f)
1978
1979
1979 return scmutil.status(modified, added, removed, [], [], [], [])
1980 return scmutil.status(modified, added, removed, [], [], [], [])
1980
1981
1981 class memfilectx(committablefilectx):
1982 class memfilectx(committablefilectx):
1982 """memfilectx represents an in-memory file to commit.
1983 """memfilectx represents an in-memory file to commit.
1983
1984
1984 See memctx and committablefilectx for more details.
1985 See memctx and committablefilectx for more details.
1985 """
1986 """
1986 def __init__(self, repo, path, data, islink=False,
1987 def __init__(self, repo, path, data, islink=False,
1987 isexec=False, copied=None, memctx=None):
1988 isexec=False, copied=None, memctx=None):
1988 """
1989 """
1989 path is the normalized file path relative to repository root.
1990 path is the normalized file path relative to repository root.
1990 data is the file content as a string.
1991 data is the file content as a string.
1991 islink is True if the file is a symbolic link.
1992 islink is True if the file is a symbolic link.
1992 isexec is True if the file is executable.
1993 isexec is True if the file is executable.
1993 copied is the source file path if current file was copied in the
1994 copied is the source file path if current file was copied in the
1994 revision being committed, or None."""
1995 revision being committed, or None."""
1995 super(memfilectx, self).__init__(repo, path, None, memctx)
1996 super(memfilectx, self).__init__(repo, path, None, memctx)
1996 self._data = data
1997 self._data = data
1997 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1998 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1998 self._copied = None
1999 self._copied = None
1999 if copied:
2000 if copied:
2000 self._copied = (copied, nullid)
2001 self._copied = (copied, nullid)
2001
2002
2002 def data(self):
2003 def data(self):
2003 return self._data
2004 return self._data
2004 def size(self):
2005 def size(self):
2005 return len(self.data())
2006 return len(self.data())
2006 def flags(self):
2007 def flags(self):
2007 return self._flags
2008 return self._flags
2008 def renamed(self):
2009 def renamed(self):
2009 return self._copied
2010 return self._copied
2010
2011
2011 def remove(self, ignoremissing=False):
2012 def remove(self, ignoremissing=False):
2012 """wraps unlink for a repo's working directory"""
2013 """wraps unlink for a repo's working directory"""
2013 # need to figure out what to do here
2014 # need to figure out what to do here
2014 del self._changectx[self._path]
2015 del self._changectx[self._path]
2015
2016
2016 def write(self, data, flags):
2017 def write(self, data, flags):
2017 """wraps repo.wwrite"""
2018 """wraps repo.wwrite"""
2018 self._data = data
2019 self._data = data
2019
2020
2020 class metadataonlyctx(committablectx):
2021 class metadataonlyctx(committablectx):
2021 """Like memctx but it's reusing the manifest of different commit.
2022 """Like memctx but it's reusing the manifest of different commit.
2022 Intended to be used by lightweight operations that are creating
2023 Intended to be used by lightweight operations that are creating
2023 metadata-only changes.
2024 metadata-only changes.
2024
2025
2025 Revision information is supplied at initialization time. 'repo' is the
2026 Revision information is supplied at initialization time. 'repo' is the
2026 current localrepo, 'ctx' is original revision which manifest we're reuisng
2027 current localrepo, 'ctx' is original revision which manifest we're reuisng
2027 'parents' is a sequence of two parent revisions identifiers (pass None for
2028 'parents' is a sequence of two parent revisions identifiers (pass None for
2028 every missing parent), 'text' is the commit.
2029 every missing parent), 'text' is the commit.
2029
2030
2030 user receives the committer name and defaults to current repository
2031 user receives the committer name and defaults to current repository
2031 username, date is the commit date in any format supported by
2032 username, date is the commit date in any format supported by
2032 util.parsedate() and defaults to current date, extra is a dictionary of
2033 util.parsedate() and defaults to current date, extra is a dictionary of
2033 metadata or is left empty.
2034 metadata or is left empty.
2034 """
2035 """
2035 def __new__(cls, repo, originalctx, *args, **kwargs):
2036 def __new__(cls, repo, originalctx, *args, **kwargs):
2036 return super(metadataonlyctx, cls).__new__(cls, repo)
2037 return super(metadataonlyctx, cls).__new__(cls, repo)
2037
2038
2038 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2039 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2039 extra=None, editor=False):
2040 extra=None, editor=False):
2040 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2041 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2041 self._rev = None
2042 self._rev = None
2042 self._node = None
2043 self._node = None
2043 self._originalctx = originalctx
2044 self._originalctx = originalctx
2044 self._manifestnode = originalctx.manifestnode()
2045 self._manifestnode = originalctx.manifestnode()
2045 parents = [(p or nullid) for p in parents]
2046 parents = [(p or nullid) for p in parents]
2046 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2047 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2047
2048
2048 # sanity check to ensure that the reused manifest parents are
2049 # sanity check to ensure that the reused manifest parents are
2049 # manifests of our commit parents
2050 # manifests of our commit parents
2050 mp1, mp2 = self.manifestctx().parents
2051 mp1, mp2 = self.manifestctx().parents
2051 if p1 != nullid and p1.manifestctx().node() != mp1:
2052 if p1 != nullid and p1.manifestctx().node() != mp1:
2052 raise RuntimeError('can\'t reuse the manifest: '
2053 raise RuntimeError('can\'t reuse the manifest: '
2053 'its p1 doesn\'t match the new ctx p1')
2054 'its p1 doesn\'t match the new ctx p1')
2054 if p2 != nullid and p2.manifestctx().node() != mp2:
2055 if p2 != nullid and p2.manifestctx().node() != mp2:
2055 raise RuntimeError('can\'t reuse the manifest: '
2056 raise RuntimeError('can\'t reuse the manifest: '
2056 'its p2 doesn\'t match the new ctx p2')
2057 'its p2 doesn\'t match the new ctx p2')
2057
2058
2058 self._files = originalctx.files()
2059 self._files = originalctx.files()
2059 self.substate = {}
2060 self.substate = {}
2060
2061
2061 if extra:
2062 if extra:
2062 self._extra = extra.copy()
2063 self._extra = extra.copy()
2063 else:
2064 else:
2064 self._extra = {}
2065 self._extra = {}
2065
2066
2066 if self._extra.get('branch', '') == '':
2067 if self._extra.get('branch', '') == '':
2067 self._extra['branch'] = 'default'
2068 self._extra['branch'] = 'default'
2068
2069
2069 if editor:
2070 if editor:
2070 self._text = editor(self._repo, self, [])
2071 self._text = editor(self._repo, self, [])
2071 self._repo.savecommitmessage(self._text)
2072 self._repo.savecommitmessage(self._text)
2072
2073
2073 def manifestnode(self):
2074 def manifestnode(self):
2074 return self._manifestnode
2075 return self._manifestnode
2075
2076
2076 @propertycache
2077 @propertycache
2077 def _manifestctx(self):
2078 def _manifestctx(self):
2078 return self._repo.manifestlog[self._manifestnode]
2079 return self._repo.manifestlog[self._manifestnode]
2079
2080
2080 def filectx(self, path, filelog=None):
2081 def filectx(self, path, filelog=None):
2081 return self._originalctx.filectx(path, filelog=filelog)
2082 return self._originalctx.filectx(path, filelog=filelog)
2082
2083
2083 def commit(self):
2084 def commit(self):
2084 """commit context to the repo"""
2085 """commit context to the repo"""
2085 return self._repo.commitctx(self)
2086 return self._repo.commitctx(self)
2086
2087
2087 @property
2088 @property
2088 def _manifest(self):
2089 def _manifest(self):
2089 return self._originalctx.manifest()
2090 return self._originalctx.manifest()
2090
2091
2091 @propertycache
2092 @propertycache
2092 def _status(self):
2093 def _status(self):
2093 """Calculate exact status from ``files`` specified in the ``origctx``
2094 """Calculate exact status from ``files`` specified in the ``origctx``
2094 and parents manifests.
2095 and parents manifests.
2095 """
2096 """
2096 man1 = self.p1().manifest()
2097 man1 = self.p1().manifest()
2097 p2 = self._parents[1]
2098 p2 = self._parents[1]
2098 # "1 < len(self._parents)" can't be used for checking
2099 # "1 < len(self._parents)" can't be used for checking
2099 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2100 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2100 # explicitly initialized by the list, of which length is 2.
2101 # explicitly initialized by the list, of which length is 2.
2101 if p2.node() != nullid:
2102 if p2.node() != nullid:
2102 man2 = p2.manifest()
2103 man2 = p2.manifest()
2103 managing = lambda f: f in man1 or f in man2
2104 managing = lambda f: f in man1 or f in man2
2104 else:
2105 else:
2105 managing = lambda f: f in man1
2106 managing = lambda f: f in man1
2106
2107
2107 modified, added, removed = [], [], []
2108 modified, added, removed = [], [], []
2108 for f in self._files:
2109 for f in self._files:
2109 if not managing(f):
2110 if not managing(f):
2110 added.append(f)
2111 added.append(f)
2111 elif self[f]:
2112 elif self[f]:
2112 modified.append(f)
2113 modified.append(f)
2113 else:
2114 else:
2114 removed.append(f)
2115 removed.append(f)
2115
2116
2116 return scmutil.status(modified, added, removed, [], [], [], [])
2117 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now