##// END OF EJS Templates
context: make sure __str__ works, also when there is no _changectx...
Mads Kiilerich -
r30270:e25ce44f default
parent child Browse files
Show More
@@ -1,1984 +1,1987 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 if not node:
263 if not node:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266
266
267 return node, flag
267 return node, flag
268
268
269 def filenode(self, path):
269 def filenode(self, path):
270 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
271
271
272 def flags(self, path):
272 def flags(self, path):
273 try:
273 try:
274 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
275 except error.LookupError:
275 except error.LookupError:
276 return ''
276 return ''
277
277
278 def sub(self, path, allowcreate=True):
278 def sub(self, path, allowcreate=True):
279 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281
281
282 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
284
284
285 def workingsub(self, path):
285 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
287 context.
288 '''
288 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
290
290
291 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
293 r = self._repo
293 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
295 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
298
298
299 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
307
308 def dirs(self):
308 def dirs(self):
309 return self._manifest.dirs()
309 return self._manifest.dirs()
310
310
311 def hasdir(self, dir):
311 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
313
313
314 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
315 return False
315 return False
316
316
317 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
320 directory.
320 directory.
321
321
322 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
323
323
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
325 """
326
326
327 ctx1 = self
327 ctx1 = self
328 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
329
329
330 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
333 # with its first parent.
334 #
334 #
335 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
336 #
336 #
337 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
338 #
338 #
339 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
342 reversed = False
342 reversed = False
343 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
345 reversed = True
345 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
347
347
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380
380
381 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
382 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
385 if data is None:
385 if data is None:
386 return None
386 return None
387 islink, isexec = mode
387 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
390 if extra is None:
390 if extra is None:
391 extra = {}
391 extra = {}
392 if branch:
392 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
395 date, extra, editor)
396 return ctx
396 return ctx
397
397
398 class changectx(basectx):
398 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
401 the repo."""
401 the repo."""
402 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
404
404
405 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
408 return
408 return
409
409
410 if changeid == '':
410 if changeid == '':
411 changeid = '.'
411 changeid = '.'
412 self._repo = repo
412 self._repo = repo
413
413
414 try:
414 try:
415 if isinstance(changeid, int):
415 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
417 self._rev = changeid
418 return
418 return
419 if isinstance(changeid, long):
419 if isinstance(changeid, long):
420 changeid = str(changeid)
420 changeid = str(changeid)
421 if changeid == 'null':
421 if changeid == 'null':
422 self._node = nullid
422 self._node = nullid
423 self._rev = nullrev
423 self._rev = nullrev
424 return
424 return
425 if changeid == 'tip':
425 if changeid == 'tip':
426 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
434 return
435 if len(changeid) == 20:
435 if len(changeid) == 20:
436 try:
436 try:
437 self._node = changeid
437 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
439 return
439 return
440 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
441 raise
441 raise
442 except LookupError:
442 except LookupError:
443 pass
443 pass
444
444
445 try:
445 try:
446 r = int(changeid)
446 r = int(changeid)
447 if str(r) != changeid:
447 if str(r) != changeid:
448 raise ValueError
448 raise ValueError
449 l = len(repo.changelog)
449 l = len(repo.changelog)
450 if r < 0:
450 if r < 0:
451 r += l
451 r += l
452 if r < 0 or r >= l:
452 if r < 0 or r >= l:
453 raise ValueError
453 raise ValueError
454 self._rev = r
454 self._rev = r
455 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
456 return
456 return
457 except error.FilteredIndexError:
457 except error.FilteredIndexError:
458 raise
458 raise
459 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
460 pass
460 pass
461
461
462 if len(changeid) == 40:
462 if len(changeid) == 40:
463 try:
463 try:
464 self._node = bin(changeid)
464 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
466 return
466 return
467 except error.FilteredLookupError:
467 except error.FilteredLookupError:
468 raise
468 raise
469 except (TypeError, LookupError):
469 except (TypeError, LookupError):
470 pass
470 pass
471
471
472 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
473 try:
473 try:
474 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
480 raise
480 raise
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 pass
482 pass
483
483
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
485 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488
488
489 # lookup failed
489 # lookup failed
490 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
491 #
491 #
492 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
493 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
497 try:
497 try:
498 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
499 changeid = hex(changeid)
500 except TypeError:
500 except TypeError:
501 pass
501 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
511 except IndexError:
512 pass
512 pass
513 raise error.RepoLookupError(
513 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
515
515
516 def __hash__(self):
516 def __hash__(self):
517 try:
517 try:
518 return hash(self._rev)
518 return hash(self._rev)
519 except AttributeError:
519 except AttributeError:
520 return id(self)
520 return id(self)
521
521
522 def __nonzero__(self):
522 def __nonzero__(self):
523 return self._rev != nullrev
523 return self._rev != nullrev
524
524
525 @propertycache
525 @propertycache
526 def _changeset(self):
526 def _changeset(self):
527 return self._repo.changelog.changelogrevision(self.rev())
527 return self._repo.changelog.changelogrevision(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._repo.manifestlog[self._changeset.manifest].read()
531 return self._repo.manifestlog[self._changeset.manifest].read()
532
532
533 @propertycache
533 @propertycache
534 def _manifestdelta(self):
534 def _manifestdelta(self):
535 mfnode = self._changeset.manifest
535 mfnode = self._changeset.manifest
536 return self._repo.manifestlog[mfnode].readdelta()
536 return self._repo.manifestlog[mfnode].readdelta()
537
537
538 @propertycache
538 @propertycache
539 def _parents(self):
539 def _parents(self):
540 repo = self._repo
540 repo = self._repo
541 p1, p2 = repo.changelog.parentrevs(self._rev)
541 p1, p2 = repo.changelog.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1)]
543 return [changectx(repo, p1)]
544 return [changectx(repo, p1), changectx(repo, p2)]
544 return [changectx(repo, p1), changectx(repo, p2)]
545
545
546 def changeset(self):
546 def changeset(self):
547 c = self._changeset
547 c = self._changeset
548 return (
548 return (
549 c.manifest,
549 c.manifest,
550 c.user,
550 c.user,
551 c.date,
551 c.date,
552 c.files,
552 c.files,
553 c.description,
553 c.description,
554 c.extra,
554 c.extra,
555 )
555 )
556 def manifestnode(self):
556 def manifestnode(self):
557 return self._changeset.manifest
557 return self._changeset.manifest
558
558
559 def user(self):
559 def user(self):
560 return self._changeset.user
560 return self._changeset.user
561 def date(self):
561 def date(self):
562 return self._changeset.date
562 return self._changeset.date
563 def files(self):
563 def files(self):
564 return self._changeset.files
564 return self._changeset.files
565 def description(self):
565 def description(self):
566 return self._changeset.description
566 return self._changeset.description
567 def branch(self):
567 def branch(self):
568 return encoding.tolocal(self._changeset.extra.get("branch"))
568 return encoding.tolocal(self._changeset.extra.get("branch"))
569 def closesbranch(self):
569 def closesbranch(self):
570 return 'close' in self._changeset.extra
570 return 'close' in self._changeset.extra
571 def extra(self):
571 def extra(self):
572 return self._changeset.extra
572 return self._changeset.extra
573 def tags(self):
573 def tags(self):
574 return self._repo.nodetags(self._node)
574 return self._repo.nodetags(self._node)
575 def bookmarks(self):
575 def bookmarks(self):
576 return self._repo.nodebookmarks(self._node)
576 return self._repo.nodebookmarks(self._node)
577 def phase(self):
577 def phase(self):
578 return self._repo._phasecache.phase(self._repo, self._rev)
578 return self._repo._phasecache.phase(self._repo, self._rev)
579 def hidden(self):
579 def hidden(self):
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
581
581
582 def children(self):
582 def children(self):
583 """return contexts for each child changeset"""
583 """return contexts for each child changeset"""
584 c = self._repo.changelog.children(self._node)
584 c = self._repo.changelog.children(self._node)
585 return [changectx(self._repo, x) for x in c]
585 return [changectx(self._repo, x) for x in c]
586
586
587 def ancestors(self):
587 def ancestors(self):
588 for a in self._repo.changelog.ancestors([self._rev]):
588 for a in self._repo.changelog.ancestors([self._rev]):
589 yield changectx(self._repo, a)
589 yield changectx(self._repo, a)
590
590
591 def descendants(self):
591 def descendants(self):
592 for d in self._repo.changelog.descendants([self._rev]):
592 for d in self._repo.changelog.descendants([self._rev]):
593 yield changectx(self._repo, d)
593 yield changectx(self._repo, d)
594
594
595 def filectx(self, path, fileid=None, filelog=None):
595 def filectx(self, path, fileid=None, filelog=None):
596 """get a file context from this changeset"""
596 """get a file context from this changeset"""
597 if fileid is None:
597 if fileid is None:
598 fileid = self.filenode(path)
598 fileid = self.filenode(path)
599 return filectx(self._repo, path, fileid=fileid,
599 return filectx(self._repo, path, fileid=fileid,
600 changectx=self, filelog=filelog)
600 changectx=self, filelog=filelog)
601
601
602 def ancestor(self, c2, warn=False):
602 def ancestor(self, c2, warn=False):
603 """return the "best" ancestor context of self and c2
603 """return the "best" ancestor context of self and c2
604
604
605 If there are multiple candidates, it will show a message and check
605 If there are multiple candidates, it will show a message and check
606 merge.preferancestor configuration before falling back to the
606 merge.preferancestor configuration before falling back to the
607 revlog ancestor."""
607 revlog ancestor."""
608 # deal with workingctxs
608 # deal with workingctxs
609 n2 = c2._node
609 n2 = c2._node
610 if n2 is None:
610 if n2 is None:
611 n2 = c2._parents[0]._node
611 n2 = c2._parents[0]._node
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 if not cahs:
613 if not cahs:
614 anc = nullid
614 anc = nullid
615 elif len(cahs) == 1:
615 elif len(cahs) == 1:
616 anc = cahs[0]
616 anc = cahs[0]
617 else:
617 else:
618 # experimental config: merge.preferancestor
618 # experimental config: merge.preferancestor
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 try:
620 try:
621 ctx = changectx(self._repo, r)
621 ctx = changectx(self._repo, r)
622 except error.RepoLookupError:
622 except error.RepoLookupError:
623 continue
623 continue
624 anc = ctx.node()
624 anc = ctx.node()
625 if anc in cahs:
625 if anc in cahs:
626 break
626 break
627 else:
627 else:
628 anc = self._repo.changelog.ancestor(self._node, n2)
628 anc = self._repo.changelog.ancestor(self._node, n2)
629 if warn:
629 if warn:
630 self._repo.ui.status(
630 self._repo.ui.status(
631 (_("note: using %s as ancestor of %s and %s\n") %
631 (_("note: using %s as ancestor of %s and %s\n") %
632 (short(anc), short(self._node), short(n2))) +
632 (short(anc), short(self._node), short(n2))) +
633 ''.join(_(" alternatively, use --config "
633 ''.join(_(" alternatively, use --config "
634 "merge.preferancestor=%s\n") %
634 "merge.preferancestor=%s\n") %
635 short(n) for n in sorted(cahs) if n != anc))
635 short(n) for n in sorted(cahs) if n != anc))
636 return changectx(self._repo, anc)
636 return changectx(self._repo, anc)
637
637
638 def descendant(self, other):
638 def descendant(self, other):
639 """True if other is descendant of this changeset"""
639 """True if other is descendant of this changeset"""
640 return self._repo.changelog.descendant(self._rev, other._rev)
640 return self._repo.changelog.descendant(self._rev, other._rev)
641
641
642 def walk(self, match):
642 def walk(self, match):
643 '''Generates matching file names.'''
643 '''Generates matching file names.'''
644
644
645 # Wrap match.bad method to have message with nodeid
645 # Wrap match.bad method to have message with nodeid
646 def bad(fn, msg):
646 def bad(fn, msg):
647 # The manifest doesn't know about subrepos, so don't complain about
647 # The manifest doesn't know about subrepos, so don't complain about
648 # paths into valid subrepos.
648 # paths into valid subrepos.
649 if any(fn == s or fn.startswith(s + '/')
649 if any(fn == s or fn.startswith(s + '/')
650 for s in self.substate):
650 for s in self.substate):
651 return
651 return
652 match.bad(fn, _('no such file in rev %s') % self)
652 match.bad(fn, _('no such file in rev %s') % self)
653
653
654 m = matchmod.badmatch(match, bad)
654 m = matchmod.badmatch(match, bad)
655 return self._manifest.walk(m)
655 return self._manifest.walk(m)
656
656
657 def matches(self, match):
657 def matches(self, match):
658 return self.walk(match)
658 return self.walk(match)
659
659
660 class basefilectx(object):
660 class basefilectx(object):
661 """A filecontext object represents the common logic for its children:
661 """A filecontext object represents the common logic for its children:
662 filectx: read-only access to a filerevision that is already present
662 filectx: read-only access to a filerevision that is already present
663 in the repo,
663 in the repo,
664 workingfilectx: a filecontext that represents files from the working
664 workingfilectx: a filecontext that represents files from the working
665 directory,
665 directory,
666 memfilectx: a filecontext that represents files in-memory."""
666 memfilectx: a filecontext that represents files in-memory."""
667 def __new__(cls, repo, path, *args, **kwargs):
667 def __new__(cls, repo, path, *args, **kwargs):
668 return super(basefilectx, cls).__new__(cls)
668 return super(basefilectx, cls).__new__(cls)
669
669
670 @propertycache
670 @propertycache
671 def _filelog(self):
671 def _filelog(self):
672 return self._repo.file(self._path)
672 return self._repo.file(self._path)
673
673
674 @propertycache
674 @propertycache
675 def _changeid(self):
675 def _changeid(self):
676 if '_changeid' in self.__dict__:
676 if '_changeid' in self.__dict__:
677 return self._changeid
677 return self._changeid
678 elif '_changectx' in self.__dict__:
678 elif '_changectx' in self.__dict__:
679 return self._changectx.rev()
679 return self._changectx.rev()
680 elif '_descendantrev' in self.__dict__:
680 elif '_descendantrev' in self.__dict__:
681 # this file context was created from a revision with a known
681 # this file context was created from a revision with a known
682 # descendant, we can (lazily) correct for linkrev aliases
682 # descendant, we can (lazily) correct for linkrev aliases
683 return self._adjustlinkrev(self._path, self._filelog,
683 return self._adjustlinkrev(self._path, self._filelog,
684 self._filenode, self._descendantrev)
684 self._filenode, self._descendantrev)
685 else:
685 else:
686 return self._filelog.linkrev(self._filerev)
686 return self._filelog.linkrev(self._filerev)
687
687
688 @propertycache
688 @propertycache
689 def _filenode(self):
689 def _filenode(self):
690 if '_fileid' in self.__dict__:
690 if '_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
691 return self._filelog.lookup(self._fileid)
692 else:
692 else:
693 return self._changectx.filenode(self._path)
693 return self._changectx.filenode(self._path)
694
694
695 @propertycache
695 @propertycache
696 def _filerev(self):
696 def _filerev(self):
697 return self._filelog.rev(self._filenode)
697 return self._filelog.rev(self._filenode)
698
698
699 @propertycache
699 @propertycache
700 def _repopath(self):
700 def _repopath(self):
701 return self._path
701 return self._path
702
702
703 def __nonzero__(self):
703 def __nonzero__(self):
704 try:
704 try:
705 self._filenode
705 self._filenode
706 return True
706 return True
707 except error.LookupError:
707 except error.LookupError:
708 # file is missing
708 # file is missing
709 return False
709 return False
710
710
711 def __str__(self):
711 def __str__(self):
712 return "%s@%s" % (self.path(), self._changectx)
712 try:
713 return "%s@%s" % (self.path(), self._changectx)
714 except error.LookupError:
715 return "%s@???" % self.path()
713
716
714 def __repr__(self):
717 def __repr__(self):
715 return "<%s %s>" % (type(self).__name__, str(self))
718 return "<%s %s>" % (type(self).__name__, str(self))
716
719
717 def __hash__(self):
720 def __hash__(self):
718 try:
721 try:
719 return hash((self._path, self._filenode))
722 return hash((self._path, self._filenode))
720 except AttributeError:
723 except AttributeError:
721 return id(self)
724 return id(self)
722
725
723 def __eq__(self, other):
726 def __eq__(self, other):
724 try:
727 try:
725 return (type(self) == type(other) and self._path == other._path
728 return (type(self) == type(other) and self._path == other._path
726 and self._filenode == other._filenode)
729 and self._filenode == other._filenode)
727 except AttributeError:
730 except AttributeError:
728 return False
731 return False
729
732
730 def __ne__(self, other):
733 def __ne__(self, other):
731 return not (self == other)
734 return not (self == other)
732
735
733 def filerev(self):
736 def filerev(self):
734 return self._filerev
737 return self._filerev
735 def filenode(self):
738 def filenode(self):
736 return self._filenode
739 return self._filenode
737 def flags(self):
740 def flags(self):
738 return self._changectx.flags(self._path)
741 return self._changectx.flags(self._path)
739 def filelog(self):
742 def filelog(self):
740 return self._filelog
743 return self._filelog
741 def rev(self):
744 def rev(self):
742 return self._changeid
745 return self._changeid
743 def linkrev(self):
746 def linkrev(self):
744 return self._filelog.linkrev(self._filerev)
747 return self._filelog.linkrev(self._filerev)
745 def node(self):
748 def node(self):
746 return self._changectx.node()
749 return self._changectx.node()
747 def hex(self):
750 def hex(self):
748 return self._changectx.hex()
751 return self._changectx.hex()
749 def user(self):
752 def user(self):
750 return self._changectx.user()
753 return self._changectx.user()
751 def date(self):
754 def date(self):
752 return self._changectx.date()
755 return self._changectx.date()
753 def files(self):
756 def files(self):
754 return self._changectx.files()
757 return self._changectx.files()
755 def description(self):
758 def description(self):
756 return self._changectx.description()
759 return self._changectx.description()
757 def branch(self):
760 def branch(self):
758 return self._changectx.branch()
761 return self._changectx.branch()
759 def extra(self):
762 def extra(self):
760 return self._changectx.extra()
763 return self._changectx.extra()
761 def phase(self):
764 def phase(self):
762 return self._changectx.phase()
765 return self._changectx.phase()
763 def phasestr(self):
766 def phasestr(self):
764 return self._changectx.phasestr()
767 return self._changectx.phasestr()
765 def manifest(self):
768 def manifest(self):
766 return self._changectx.manifest()
769 return self._changectx.manifest()
767 def changectx(self):
770 def changectx(self):
768 return self._changectx
771 return self._changectx
769 def repo(self):
772 def repo(self):
770 return self._repo
773 return self._repo
771
774
772 def path(self):
775 def path(self):
773 return self._path
776 return self._path
774
777
775 def isbinary(self):
778 def isbinary(self):
776 try:
779 try:
777 return util.binary(self.data())
780 return util.binary(self.data())
778 except IOError:
781 except IOError:
779 return False
782 return False
780 def isexec(self):
783 def isexec(self):
781 return 'x' in self.flags()
784 return 'x' in self.flags()
782 def islink(self):
785 def islink(self):
783 return 'l' in self.flags()
786 return 'l' in self.flags()
784
787
785 def isabsent(self):
788 def isabsent(self):
786 """whether this filectx represents a file not in self._changectx
789 """whether this filectx represents a file not in self._changectx
787
790
788 This is mainly for merge code to detect change/delete conflicts. This is
791 This is mainly for merge code to detect change/delete conflicts. This is
789 expected to be True for all subclasses of basectx."""
792 expected to be True for all subclasses of basectx."""
790 return False
793 return False
791
794
792 _customcmp = False
795 _customcmp = False
793 def cmp(self, fctx):
796 def cmp(self, fctx):
794 """compare with other file context
797 """compare with other file context
795
798
796 returns True if different than fctx.
799 returns True if different than fctx.
797 """
800 """
798 if fctx._customcmp:
801 if fctx._customcmp:
799 return fctx.cmp(self)
802 return fctx.cmp(self)
800
803
801 if (fctx._filenode is None
804 if (fctx._filenode is None
802 and (self._repo._encodefilterpats
805 and (self._repo._encodefilterpats
803 # if file data starts with '\1\n', empty metadata block is
806 # if file data starts with '\1\n', empty metadata block is
804 # prepended, which adds 4 bytes to filelog.size().
807 # prepended, which adds 4 bytes to filelog.size().
805 or self.size() - 4 == fctx.size())
808 or self.size() - 4 == fctx.size())
806 or self.size() == fctx.size()):
809 or self.size() == fctx.size()):
807 return self._filelog.cmp(self._filenode, fctx.data())
810 return self._filelog.cmp(self._filenode, fctx.data())
808
811
809 return True
812 return True
810
813
811 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
814 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
812 """return the first ancestor of <srcrev> introducing <fnode>
815 """return the first ancestor of <srcrev> introducing <fnode>
813
816
814 If the linkrev of the file revision does not point to an ancestor of
817 If the linkrev of the file revision does not point to an ancestor of
815 srcrev, we'll walk down the ancestors until we find one introducing
818 srcrev, we'll walk down the ancestors until we find one introducing
816 this file revision.
819 this file revision.
817
820
818 :repo: a localrepository object (used to access changelog and manifest)
821 :repo: a localrepository object (used to access changelog and manifest)
819 :path: the file path
822 :path: the file path
820 :fnode: the nodeid of the file revision
823 :fnode: the nodeid of the file revision
821 :filelog: the filelog of this path
824 :filelog: the filelog of this path
822 :srcrev: the changeset revision we search ancestors from
825 :srcrev: the changeset revision we search ancestors from
823 :inclusive: if true, the src revision will also be checked
826 :inclusive: if true, the src revision will also be checked
824 """
827 """
825 repo = self._repo
828 repo = self._repo
826 cl = repo.unfiltered().changelog
829 cl = repo.unfiltered().changelog
827 mfl = repo.manifestlog
830 mfl = repo.manifestlog
828 # fetch the linkrev
831 # fetch the linkrev
829 fr = filelog.rev(fnode)
832 fr = filelog.rev(fnode)
830 lkr = filelog.linkrev(fr)
833 lkr = filelog.linkrev(fr)
831 # hack to reuse ancestor computation when searching for renames
834 # hack to reuse ancestor computation when searching for renames
832 memberanc = getattr(self, '_ancestrycontext', None)
835 memberanc = getattr(self, '_ancestrycontext', None)
833 iteranc = None
836 iteranc = None
834 if srcrev is None:
837 if srcrev is None:
835 # wctx case, used by workingfilectx during mergecopy
838 # wctx case, used by workingfilectx during mergecopy
836 revs = [p.rev() for p in self._repo[None].parents()]
839 revs = [p.rev() for p in self._repo[None].parents()]
837 inclusive = True # we skipped the real (revless) source
840 inclusive = True # we skipped the real (revless) source
838 else:
841 else:
839 revs = [srcrev]
842 revs = [srcrev]
840 if memberanc is None:
843 if memberanc is None:
841 memberanc = iteranc = cl.ancestors(revs, lkr,
844 memberanc = iteranc = cl.ancestors(revs, lkr,
842 inclusive=inclusive)
845 inclusive=inclusive)
843 # check if this linkrev is an ancestor of srcrev
846 # check if this linkrev is an ancestor of srcrev
844 if lkr not in memberanc:
847 if lkr not in memberanc:
845 if iteranc is None:
848 if iteranc is None:
846 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
847 for a in iteranc:
850 for a in iteranc:
848 ac = cl.read(a) # get changeset data (we avoid object creation)
851 ac = cl.read(a) # get changeset data (we avoid object creation)
849 if path in ac[3]: # checking the 'files' field.
852 if path in ac[3]: # checking the 'files' field.
850 # The file has been touched, check if the content is
853 # The file has been touched, check if the content is
851 # similar to the one we search for.
854 # similar to the one we search for.
852 if fnode == mfl[ac[0]].readfast().get(path):
855 if fnode == mfl[ac[0]].readfast().get(path):
853 return a
856 return a
854 # In theory, we should never get out of that loop without a result.
857 # In theory, we should never get out of that loop without a result.
855 # But if manifest uses a buggy file revision (not children of the
858 # But if manifest uses a buggy file revision (not children of the
856 # one it replaces) we could. Such a buggy situation will likely
859 # one it replaces) we could. Such a buggy situation will likely
857 # result is crash somewhere else at to some point.
860 # result is crash somewhere else at to some point.
858 return lkr
861 return lkr
859
862
860 def introrev(self):
863 def introrev(self):
861 """return the rev of the changeset which introduced this file revision
864 """return the rev of the changeset which introduced this file revision
862
865
863 This method is different from linkrev because it take into account the
866 This method is different from linkrev because it take into account the
864 changeset the filectx was created from. It ensures the returned
867 changeset the filectx was created from. It ensures the returned
865 revision is one of its ancestors. This prevents bugs from
868 revision is one of its ancestors. This prevents bugs from
866 'linkrev-shadowing' when a file revision is used by multiple
869 'linkrev-shadowing' when a file revision is used by multiple
867 changesets.
870 changesets.
868 """
871 """
869 lkr = self.linkrev()
872 lkr = self.linkrev()
870 attrs = vars(self)
873 attrs = vars(self)
871 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
874 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
872 if noctx or self.rev() == lkr:
875 if noctx or self.rev() == lkr:
873 return self.linkrev()
876 return self.linkrev()
874 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
877 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
875 self.rev(), inclusive=True)
878 self.rev(), inclusive=True)
876
879
877 def _parentfilectx(self, path, fileid, filelog):
880 def _parentfilectx(self, path, fileid, filelog):
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 # If self is associated with a changeset (probably explicitly
884 # If self is associated with a changeset (probably explicitly
882 # fed), ensure the created filectx is associated with a
885 # fed), ensure the created filectx is associated with a
883 # changeset that is an ancestor of self.changectx.
886 # changeset that is an ancestor of self.changectx.
884 # This lets us later use _adjustlinkrev to get a correct link.
887 # This lets us later use _adjustlinkrev to get a correct link.
885 fctx._descendantrev = self.rev()
888 fctx._descendantrev = self.rev()
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 elif '_descendantrev' in vars(self):
890 elif '_descendantrev' in vars(self):
888 # Otherwise propagate _descendantrev if we have one associated.
891 # Otherwise propagate _descendantrev if we have one associated.
889 fctx._descendantrev = self._descendantrev
892 fctx._descendantrev = self._descendantrev
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 return fctx
894 return fctx
892
895
893 def parents(self):
896 def parents(self):
894 _path = self._path
897 _path = self._path
895 fl = self._filelog
898 fl = self._filelog
896 parents = self._filelog.parents(self._filenode)
899 parents = self._filelog.parents(self._filenode)
897 pl = [(_path, node, fl) for node in parents if node != nullid]
900 pl = [(_path, node, fl) for node in parents if node != nullid]
898
901
899 r = fl.renamed(self._filenode)
902 r = fl.renamed(self._filenode)
900 if r:
903 if r:
901 # - In the simple rename case, both parent are nullid, pl is empty.
904 # - In the simple rename case, both parent are nullid, pl is empty.
902 # - In case of merge, only one of the parent is null id and should
905 # - In case of merge, only one of the parent is null id and should
903 # be replaced with the rename information. This parent is -always-
906 # be replaced with the rename information. This parent is -always-
904 # the first one.
907 # the first one.
905 #
908 #
906 # As null id have always been filtered out in the previous list
909 # As null id have always been filtered out in the previous list
907 # comprehension, inserting to 0 will always result in "replacing
910 # comprehension, inserting to 0 will always result in "replacing
908 # first nullid parent with rename information.
911 # first nullid parent with rename information.
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910
913
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912
915
913 def p1(self):
916 def p1(self):
914 return self.parents()[0]
917 return self.parents()[0]
915
918
916 def p2(self):
919 def p2(self):
917 p = self.parents()
920 p = self.parents()
918 if len(p) == 2:
921 if len(p) == 2:
919 return p[1]
922 return p[1]
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921
924
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 '''returns a list of tuples of ((ctx, number), line) for each line
926 '''returns a list of tuples of ((ctx, number), line) for each line
924 in the file, where ctx is the filectx of the node where
927 in the file, where ctx is the filectx of the node where
925 that line was last changed; if linenumber parameter is true, number is
928 that line was last changed; if linenumber parameter is true, number is
926 the line number at the first appearance in the managed file, otherwise,
929 the line number at the first appearance in the managed file, otherwise,
927 number has a fixed value of False.
930 number has a fixed value of False.
928 '''
931 '''
929
932
930 def lines(text):
933 def lines(text):
931 if text.endswith("\n"):
934 if text.endswith("\n"):
932 return text.count("\n")
935 return text.count("\n")
933 return text.count("\n") + int(bool(text))
936 return text.count("\n") + int(bool(text))
934
937
935 if linenumber:
938 if linenumber:
936 def decorate(text, rev):
939 def decorate(text, rev):
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 else:
941 else:
939 def decorate(text, rev):
942 def decorate(text, rev):
940 return ([(rev, False)] * lines(text), text)
943 return ([(rev, False)] * lines(text), text)
941
944
942 def pair(parent, child):
945 def pair(parent, child):
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
944 for (a1, a2, b1, b2), t in blocks:
947 for (a1, a2, b1, b2), t in blocks:
945 # Changed blocks ('!') or blocks made only of blank lines ('~')
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
946 # belong to the child.
949 # belong to the child.
947 if t == '=':
950 if t == '=':
948 child[0][b1:b2] = parent[0][a1:a2]
951 child[0][b1:b2] = parent[0][a1:a2]
949 return child
952 return child
950
953
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952
955
953 def parents(f):
956 def parents(f):
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # isn't an ancestor of the srcrev.
960 # isn't an ancestor of the srcrev.
958 f._changeid
961 f._changeid
959 pl = f.parents()
962 pl = f.parents()
960
963
961 # Don't return renamed parents if we aren't following.
964 # Don't return renamed parents if we aren't following.
962 if not follow:
965 if not follow:
963 pl = [p for p in pl if p.path() == f.path()]
966 pl = [p for p in pl if p.path() == f.path()]
964
967
965 # renamed filectx won't have a filelog yet, so set it
968 # renamed filectx won't have a filelog yet, so set it
966 # from the cache to save time
969 # from the cache to save time
967 for p in pl:
970 for p in pl:
968 if not '_filelog' in p.__dict__:
971 if not '_filelog' in p.__dict__:
969 p._filelog = getlog(p.path())
972 p._filelog = getlog(p.path())
970
973
971 return pl
974 return pl
972
975
973 # use linkrev to find the first changeset where self appeared
976 # use linkrev to find the first changeset where self appeared
974 base = self
977 base = self
975 introrev = self.introrev()
978 introrev = self.introrev()
976 if self.rev() != introrev:
979 if self.rev() != introrev:
977 base = self.filectx(self.filenode(), changeid=introrev)
980 base = self.filectx(self.filenode(), changeid=introrev)
978 if getattr(base, '_ancestrycontext', None) is None:
981 if getattr(base, '_ancestrycontext', None) is None:
979 cl = self._repo.changelog
982 cl = self._repo.changelog
980 if introrev is None:
983 if introrev is None:
981 # wctx is not inclusive, but works because _ancestrycontext
984 # wctx is not inclusive, but works because _ancestrycontext
982 # is used to test filelog revisions
985 # is used to test filelog revisions
983 ac = cl.ancestors([p.rev() for p in base.parents()],
986 ac = cl.ancestors([p.rev() for p in base.parents()],
984 inclusive=True)
987 inclusive=True)
985 else:
988 else:
986 ac = cl.ancestors([introrev], inclusive=True)
989 ac = cl.ancestors([introrev], inclusive=True)
987 base._ancestrycontext = ac
990 base._ancestrycontext = ac
988
991
989 # This algorithm would prefer to be recursive, but Python is a
992 # This algorithm would prefer to be recursive, but Python is a
990 # bit recursion-hostile. Instead we do an iterative
993 # bit recursion-hostile. Instead we do an iterative
991 # depth-first search.
994 # depth-first search.
992
995
993 # 1st DFS pre-calculates pcache and needed
996 # 1st DFS pre-calculates pcache and needed
994 visit = [base]
997 visit = [base]
995 pcache = {}
998 pcache = {}
996 needed = {base: 1}
999 needed = {base: 1}
997 while visit:
1000 while visit:
998 f = visit.pop()
1001 f = visit.pop()
999 if f in pcache:
1002 if f in pcache:
1000 continue
1003 continue
1001 pl = parents(f)
1004 pl = parents(f)
1002 pcache[f] = pl
1005 pcache[f] = pl
1003 for p in pl:
1006 for p in pl:
1004 needed[p] = needed.get(p, 0) + 1
1007 needed[p] = needed.get(p, 0) + 1
1005 if p not in pcache:
1008 if p not in pcache:
1006 visit.append(p)
1009 visit.append(p)
1007
1010
1008 # 2nd DFS does the actual annotate
1011 # 2nd DFS does the actual annotate
1009 visit[:] = [base]
1012 visit[:] = [base]
1010 hist = {}
1013 hist = {}
1011 while visit:
1014 while visit:
1012 f = visit[-1]
1015 f = visit[-1]
1013 if f in hist:
1016 if f in hist:
1014 visit.pop()
1017 visit.pop()
1015 continue
1018 continue
1016
1019
1017 ready = True
1020 ready = True
1018 pl = pcache[f]
1021 pl = pcache[f]
1019 for p in pl:
1022 for p in pl:
1020 if p not in hist:
1023 if p not in hist:
1021 ready = False
1024 ready = False
1022 visit.append(p)
1025 visit.append(p)
1023 if ready:
1026 if ready:
1024 visit.pop()
1027 visit.pop()
1025 curr = decorate(f.data(), f)
1028 curr = decorate(f.data(), f)
1026 for p in pl:
1029 for p in pl:
1027 curr = pair(hist[p], curr)
1030 curr = pair(hist[p], curr)
1028 if needed[p] == 1:
1031 if needed[p] == 1:
1029 del hist[p]
1032 del hist[p]
1030 del needed[p]
1033 del needed[p]
1031 else:
1034 else:
1032 needed[p] -= 1
1035 needed[p] -= 1
1033
1036
1034 hist[f] = curr
1037 hist[f] = curr
1035 del pcache[f]
1038 del pcache[f]
1036
1039
1037 return zip(hist[base][0], hist[base][1].splitlines(True))
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1038
1041
1039 def ancestors(self, followfirst=False):
1042 def ancestors(self, followfirst=False):
1040 visit = {}
1043 visit = {}
1041 c = self
1044 c = self
1042 if followfirst:
1045 if followfirst:
1043 cut = 1
1046 cut = 1
1044 else:
1047 else:
1045 cut = None
1048 cut = None
1046
1049
1047 while True:
1050 while True:
1048 for parent in c.parents()[:cut]:
1051 for parent in c.parents()[:cut]:
1049 visit[(parent.linkrev(), parent.filenode())] = parent
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1050 if not visit:
1053 if not visit:
1051 break
1054 break
1052 c = visit.pop(max(visit))
1055 c = visit.pop(max(visit))
1053 yield c
1056 yield c
1054
1057
1055 class filectx(basefilectx):
1058 class filectx(basefilectx):
1056 """A filecontext object makes access to data related to a particular
1059 """A filecontext object makes access to data related to a particular
1057 filerevision convenient."""
1060 filerevision convenient."""
1058 def __init__(self, repo, path, changeid=None, fileid=None,
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1059 filelog=None, changectx=None):
1062 filelog=None, changectx=None):
1060 """changeid can be a changeset revision, node, or tag.
1063 """changeid can be a changeset revision, node, or tag.
1061 fileid can be a file revision or node."""
1064 fileid can be a file revision or node."""
1062 self._repo = repo
1065 self._repo = repo
1063 self._path = path
1066 self._path = path
1064
1067
1065 assert (changeid is not None
1068 assert (changeid is not None
1066 or fileid is not None
1069 or fileid is not None
1067 or changectx is not None), \
1070 or changectx is not None), \
1068 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1069 % (changeid, fileid, changectx))
1072 % (changeid, fileid, changectx))
1070
1073
1071 if filelog is not None:
1074 if filelog is not None:
1072 self._filelog = filelog
1075 self._filelog = filelog
1073
1076
1074 if changeid is not None:
1077 if changeid is not None:
1075 self._changeid = changeid
1078 self._changeid = changeid
1076 if changectx is not None:
1079 if changectx is not None:
1077 self._changectx = changectx
1080 self._changectx = changectx
1078 if fileid is not None:
1081 if fileid is not None:
1079 self._fileid = fileid
1082 self._fileid = fileid
1080
1083
1081 @propertycache
1084 @propertycache
1082 def _changectx(self):
1085 def _changectx(self):
1083 try:
1086 try:
1084 return changectx(self._repo, self._changeid)
1087 return changectx(self._repo, self._changeid)
1085 except error.FilteredRepoLookupError:
1088 except error.FilteredRepoLookupError:
1086 # Linkrev may point to any revision in the repository. When the
1089 # Linkrev may point to any revision in the repository. When the
1087 # repository is filtered this may lead to `filectx` trying to build
1090 # repository is filtered this may lead to `filectx` trying to build
1088 # `changectx` for filtered revision. In such case we fallback to
1091 # `changectx` for filtered revision. In such case we fallback to
1089 # creating `changectx` on the unfiltered version of the reposition.
1092 # creating `changectx` on the unfiltered version of the reposition.
1090 # This fallback should not be an issue because `changectx` from
1093 # This fallback should not be an issue because `changectx` from
1091 # `filectx` are not used in complex operations that care about
1094 # `filectx` are not used in complex operations that care about
1092 # filtering.
1095 # filtering.
1093 #
1096 #
1094 # This fallback is a cheap and dirty fix that prevent several
1097 # This fallback is a cheap and dirty fix that prevent several
1095 # crashes. It does not ensure the behavior is correct. However the
1098 # crashes. It does not ensure the behavior is correct. However the
1096 # behavior was not correct before filtering either and "incorrect
1099 # behavior was not correct before filtering either and "incorrect
1097 # behavior" is seen as better as "crash"
1100 # behavior" is seen as better as "crash"
1098 #
1101 #
1099 # Linkrevs have several serious troubles with filtering that are
1102 # Linkrevs have several serious troubles with filtering that are
1100 # complicated to solve. Proper handling of the issue here should be
1103 # complicated to solve. Proper handling of the issue here should be
1101 # considered when solving linkrev issue are on the table.
1104 # considered when solving linkrev issue are on the table.
1102 return changectx(self._repo.unfiltered(), self._changeid)
1105 return changectx(self._repo.unfiltered(), self._changeid)
1103
1106
1104 def filectx(self, fileid, changeid=None):
1107 def filectx(self, fileid, changeid=None):
1105 '''opens an arbitrary revision of the file without
1108 '''opens an arbitrary revision of the file without
1106 opening a new filelog'''
1109 opening a new filelog'''
1107 return filectx(self._repo, self._path, fileid=fileid,
1110 return filectx(self._repo, self._path, fileid=fileid,
1108 filelog=self._filelog, changeid=changeid)
1111 filelog=self._filelog, changeid=changeid)
1109
1112
1110 def data(self):
1113 def data(self):
1111 try:
1114 try:
1112 return self._filelog.read(self._filenode)
1115 return self._filelog.read(self._filenode)
1113 except error.CensoredNodeError:
1116 except error.CensoredNodeError:
1114 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1117 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1115 return ""
1118 return ""
1116 raise error.Abort(_("censored node: %s") % short(self._filenode),
1119 raise error.Abort(_("censored node: %s") % short(self._filenode),
1117 hint=_("set censor.policy to ignore errors"))
1120 hint=_("set censor.policy to ignore errors"))
1118
1121
1119 def size(self):
1122 def size(self):
1120 return self._filelog.size(self._filerev)
1123 return self._filelog.size(self._filerev)
1121
1124
1122 def renamed(self):
1125 def renamed(self):
1123 """check if file was actually renamed in this changeset revision
1126 """check if file was actually renamed in this changeset revision
1124
1127
1125 If rename logged in file revision, we report copy for changeset only
1128 If rename logged in file revision, we report copy for changeset only
1126 if file revisions linkrev points back to the changeset in question
1129 if file revisions linkrev points back to the changeset in question
1127 or both changeset parents contain different file revisions.
1130 or both changeset parents contain different file revisions.
1128 """
1131 """
1129
1132
1130 renamed = self._filelog.renamed(self._filenode)
1133 renamed = self._filelog.renamed(self._filenode)
1131 if not renamed:
1134 if not renamed:
1132 return renamed
1135 return renamed
1133
1136
1134 if self.rev() == self.linkrev():
1137 if self.rev() == self.linkrev():
1135 return renamed
1138 return renamed
1136
1139
1137 name = self.path()
1140 name = self.path()
1138 fnode = self._filenode
1141 fnode = self._filenode
1139 for p in self._changectx.parents():
1142 for p in self._changectx.parents():
1140 try:
1143 try:
1141 if fnode == p.filenode(name):
1144 if fnode == p.filenode(name):
1142 return None
1145 return None
1143 except error.LookupError:
1146 except error.LookupError:
1144 pass
1147 pass
1145 return renamed
1148 return renamed
1146
1149
1147 def children(self):
1150 def children(self):
1148 # hard for renames
1151 # hard for renames
1149 c = self._filelog.children(self._filenode)
1152 c = self._filelog.children(self._filenode)
1150 return [filectx(self._repo, self._path, fileid=x,
1153 return [filectx(self._repo, self._path, fileid=x,
1151 filelog=self._filelog) for x in c]
1154 filelog=self._filelog) for x in c]
1152
1155
1153 class committablectx(basectx):
1156 class committablectx(basectx):
1154 """A committablectx object provides common functionality for a context that
1157 """A committablectx object provides common functionality for a context that
1155 wants the ability to commit, e.g. workingctx or memctx."""
1158 wants the ability to commit, e.g. workingctx or memctx."""
1156 def __init__(self, repo, text="", user=None, date=None, extra=None,
1159 def __init__(self, repo, text="", user=None, date=None, extra=None,
1157 changes=None):
1160 changes=None):
1158 self._repo = repo
1161 self._repo = repo
1159 self._rev = None
1162 self._rev = None
1160 self._node = None
1163 self._node = None
1161 self._text = text
1164 self._text = text
1162 if date:
1165 if date:
1163 self._date = util.parsedate(date)
1166 self._date = util.parsedate(date)
1164 if user:
1167 if user:
1165 self._user = user
1168 self._user = user
1166 if changes:
1169 if changes:
1167 self._status = changes
1170 self._status = changes
1168
1171
1169 self._extra = {}
1172 self._extra = {}
1170 if extra:
1173 if extra:
1171 self._extra = extra.copy()
1174 self._extra = extra.copy()
1172 if 'branch' not in self._extra:
1175 if 'branch' not in self._extra:
1173 try:
1176 try:
1174 branch = encoding.fromlocal(self._repo.dirstate.branch())
1177 branch = encoding.fromlocal(self._repo.dirstate.branch())
1175 except UnicodeDecodeError:
1178 except UnicodeDecodeError:
1176 raise error.Abort(_('branch name not in UTF-8!'))
1179 raise error.Abort(_('branch name not in UTF-8!'))
1177 self._extra['branch'] = branch
1180 self._extra['branch'] = branch
1178 if self._extra['branch'] == '':
1181 if self._extra['branch'] == '':
1179 self._extra['branch'] = 'default'
1182 self._extra['branch'] = 'default'
1180
1183
1181 def __str__(self):
1184 def __str__(self):
1182 return str(self._parents[0]) + "+"
1185 return str(self._parents[0]) + "+"
1183
1186
1184 def __nonzero__(self):
1187 def __nonzero__(self):
1185 return True
1188 return True
1186
1189
1187 def _buildflagfunc(self):
1190 def _buildflagfunc(self):
1188 # Create a fallback function for getting file flags when the
1191 # Create a fallback function for getting file flags when the
1189 # filesystem doesn't support them
1192 # filesystem doesn't support them
1190
1193
1191 copiesget = self._repo.dirstate.copies().get
1194 copiesget = self._repo.dirstate.copies().get
1192 parents = self.parents()
1195 parents = self.parents()
1193 if len(parents) < 2:
1196 if len(parents) < 2:
1194 # when we have one parent, it's easy: copy from parent
1197 # when we have one parent, it's easy: copy from parent
1195 man = parents[0].manifest()
1198 man = parents[0].manifest()
1196 def func(f):
1199 def func(f):
1197 f = copiesget(f, f)
1200 f = copiesget(f, f)
1198 return man.flags(f)
1201 return man.flags(f)
1199 else:
1202 else:
1200 # merges are tricky: we try to reconstruct the unstored
1203 # merges are tricky: we try to reconstruct the unstored
1201 # result from the merge (issue1802)
1204 # result from the merge (issue1802)
1202 p1, p2 = parents
1205 p1, p2 = parents
1203 pa = p1.ancestor(p2)
1206 pa = p1.ancestor(p2)
1204 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1207 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1205
1208
1206 def func(f):
1209 def func(f):
1207 f = copiesget(f, f) # may be wrong for merges with copies
1210 f = copiesget(f, f) # may be wrong for merges with copies
1208 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1211 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1209 if fl1 == fl2:
1212 if fl1 == fl2:
1210 return fl1
1213 return fl1
1211 if fl1 == fla:
1214 if fl1 == fla:
1212 return fl2
1215 return fl2
1213 if fl2 == fla:
1216 if fl2 == fla:
1214 return fl1
1217 return fl1
1215 return '' # punt for conflicts
1218 return '' # punt for conflicts
1216
1219
1217 return func
1220 return func
1218
1221
1219 @propertycache
1222 @propertycache
1220 def _flagfunc(self):
1223 def _flagfunc(self):
1221 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1224 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1222
1225
1223 @propertycache
1226 @propertycache
1224 def _manifest(self):
1227 def _manifest(self):
1225 """generate a manifest corresponding to the values in self._status
1228 """generate a manifest corresponding to the values in self._status
1226
1229
1227 This reuse the file nodeid from parent, but we append an extra letter
1230 This reuse the file nodeid from parent, but we append an extra letter
1228 when modified. Modified files get an extra 'm' while added files get
1231 when modified. Modified files get an extra 'm' while added files get
1229 an extra 'a'. This is used by manifests merge to see that files
1232 an extra 'a'. This is used by manifests merge to see that files
1230 are different and by update logic to avoid deleting newly added files.
1233 are different and by update logic to avoid deleting newly added files.
1231 """
1234 """
1232 parents = self.parents()
1235 parents = self.parents()
1233
1236
1234 man1 = parents[0].manifest()
1237 man1 = parents[0].manifest()
1235 man = man1.copy()
1238 man = man1.copy()
1236 if len(parents) > 1:
1239 if len(parents) > 1:
1237 man2 = self.p2().manifest()
1240 man2 = self.p2().manifest()
1238 def getman(f):
1241 def getman(f):
1239 if f in man1:
1242 if f in man1:
1240 return man1
1243 return man1
1241 return man2
1244 return man2
1242 else:
1245 else:
1243 getman = lambda f: man1
1246 getman = lambda f: man1
1244
1247
1245 copied = self._repo.dirstate.copies()
1248 copied = self._repo.dirstate.copies()
1246 ff = self._flagfunc
1249 ff = self._flagfunc
1247 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1250 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1248 for f in l:
1251 for f in l:
1249 orig = copied.get(f, f)
1252 orig = copied.get(f, f)
1250 man[f] = getman(orig).get(orig, nullid) + i
1253 man[f] = getman(orig).get(orig, nullid) + i
1251 try:
1254 try:
1252 man.setflag(f, ff(f))
1255 man.setflag(f, ff(f))
1253 except OSError:
1256 except OSError:
1254 pass
1257 pass
1255
1258
1256 for f in self._status.deleted + self._status.removed:
1259 for f in self._status.deleted + self._status.removed:
1257 if f in man:
1260 if f in man:
1258 del man[f]
1261 del man[f]
1259
1262
1260 return man
1263 return man
1261
1264
1262 @propertycache
1265 @propertycache
1263 def _status(self):
1266 def _status(self):
1264 return self._repo.status()
1267 return self._repo.status()
1265
1268
1266 @propertycache
1269 @propertycache
1267 def _user(self):
1270 def _user(self):
1268 return self._repo.ui.username()
1271 return self._repo.ui.username()
1269
1272
1270 @propertycache
1273 @propertycache
1271 def _date(self):
1274 def _date(self):
1272 return util.makedate()
1275 return util.makedate()
1273
1276
1274 def subrev(self, subpath):
1277 def subrev(self, subpath):
1275 return None
1278 return None
1276
1279
1277 def manifestnode(self):
1280 def manifestnode(self):
1278 return None
1281 return None
1279 def user(self):
1282 def user(self):
1280 return self._user or self._repo.ui.username()
1283 return self._user or self._repo.ui.username()
1281 def date(self):
1284 def date(self):
1282 return self._date
1285 return self._date
1283 def description(self):
1286 def description(self):
1284 return self._text
1287 return self._text
1285 def files(self):
1288 def files(self):
1286 return sorted(self._status.modified + self._status.added +
1289 return sorted(self._status.modified + self._status.added +
1287 self._status.removed)
1290 self._status.removed)
1288
1291
1289 def modified(self):
1292 def modified(self):
1290 return self._status.modified
1293 return self._status.modified
1291 def added(self):
1294 def added(self):
1292 return self._status.added
1295 return self._status.added
1293 def removed(self):
1296 def removed(self):
1294 return self._status.removed
1297 return self._status.removed
1295 def deleted(self):
1298 def deleted(self):
1296 return self._status.deleted
1299 return self._status.deleted
1297 def branch(self):
1300 def branch(self):
1298 return encoding.tolocal(self._extra['branch'])
1301 return encoding.tolocal(self._extra['branch'])
1299 def closesbranch(self):
1302 def closesbranch(self):
1300 return 'close' in self._extra
1303 return 'close' in self._extra
1301 def extra(self):
1304 def extra(self):
1302 return self._extra
1305 return self._extra
1303
1306
1304 def tags(self):
1307 def tags(self):
1305 return []
1308 return []
1306
1309
1307 def bookmarks(self):
1310 def bookmarks(self):
1308 b = []
1311 b = []
1309 for p in self.parents():
1312 for p in self.parents():
1310 b.extend(p.bookmarks())
1313 b.extend(p.bookmarks())
1311 return b
1314 return b
1312
1315
1313 def phase(self):
1316 def phase(self):
1314 phase = phases.draft # default phase to draft
1317 phase = phases.draft # default phase to draft
1315 for p in self.parents():
1318 for p in self.parents():
1316 phase = max(phase, p.phase())
1319 phase = max(phase, p.phase())
1317 return phase
1320 return phase
1318
1321
1319 def hidden(self):
1322 def hidden(self):
1320 return False
1323 return False
1321
1324
1322 def children(self):
1325 def children(self):
1323 return []
1326 return []
1324
1327
1325 def flags(self, path):
1328 def flags(self, path):
1326 if '_manifest' in self.__dict__:
1329 if '_manifest' in self.__dict__:
1327 try:
1330 try:
1328 return self._manifest.flags(path)
1331 return self._manifest.flags(path)
1329 except KeyError:
1332 except KeyError:
1330 return ''
1333 return ''
1331
1334
1332 try:
1335 try:
1333 return self._flagfunc(path)
1336 return self._flagfunc(path)
1334 except OSError:
1337 except OSError:
1335 return ''
1338 return ''
1336
1339
1337 def ancestor(self, c2):
1340 def ancestor(self, c2):
1338 """return the "best" ancestor context of self and c2"""
1341 """return the "best" ancestor context of self and c2"""
1339 return self._parents[0].ancestor(c2) # punt on two parents for now
1342 return self._parents[0].ancestor(c2) # punt on two parents for now
1340
1343
1341 def walk(self, match):
1344 def walk(self, match):
1342 '''Generates matching file names.'''
1345 '''Generates matching file names.'''
1343 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1346 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1344 True, False))
1347 True, False))
1345
1348
1346 def matches(self, match):
1349 def matches(self, match):
1347 return sorted(self._repo.dirstate.matches(match))
1350 return sorted(self._repo.dirstate.matches(match))
1348
1351
1349 def ancestors(self):
1352 def ancestors(self):
1350 for p in self._parents:
1353 for p in self._parents:
1351 yield p
1354 yield p
1352 for a in self._repo.changelog.ancestors(
1355 for a in self._repo.changelog.ancestors(
1353 [p.rev() for p in self._parents]):
1356 [p.rev() for p in self._parents]):
1354 yield changectx(self._repo, a)
1357 yield changectx(self._repo, a)
1355
1358
1356 def markcommitted(self, node):
1359 def markcommitted(self, node):
1357 """Perform post-commit cleanup necessary after committing this ctx
1360 """Perform post-commit cleanup necessary after committing this ctx
1358
1361
1359 Specifically, this updates backing stores this working context
1362 Specifically, this updates backing stores this working context
1360 wraps to reflect the fact that the changes reflected by this
1363 wraps to reflect the fact that the changes reflected by this
1361 workingctx have been committed. For example, it marks
1364 workingctx have been committed. For example, it marks
1362 modified and added files as normal in the dirstate.
1365 modified and added files as normal in the dirstate.
1363
1366
1364 """
1367 """
1365
1368
1366 self._repo.dirstate.beginparentchange()
1369 self._repo.dirstate.beginparentchange()
1367 for f in self.modified() + self.added():
1370 for f in self.modified() + self.added():
1368 self._repo.dirstate.normal(f)
1371 self._repo.dirstate.normal(f)
1369 for f in self.removed():
1372 for f in self.removed():
1370 self._repo.dirstate.drop(f)
1373 self._repo.dirstate.drop(f)
1371 self._repo.dirstate.setparents(node)
1374 self._repo.dirstate.setparents(node)
1372 self._repo.dirstate.endparentchange()
1375 self._repo.dirstate.endparentchange()
1373
1376
1374 # write changes out explicitly, because nesting wlock at
1377 # write changes out explicitly, because nesting wlock at
1375 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1378 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1376 # from immediately doing so for subsequent changing files
1379 # from immediately doing so for subsequent changing files
1377 self._repo.dirstate.write(self._repo.currenttransaction())
1380 self._repo.dirstate.write(self._repo.currenttransaction())
1378
1381
1379 class workingctx(committablectx):
1382 class workingctx(committablectx):
1380 """A workingctx object makes access to data related to
1383 """A workingctx object makes access to data related to
1381 the current working directory convenient.
1384 the current working directory convenient.
1382 date - any valid date string or (unixtime, offset), or None.
1385 date - any valid date string or (unixtime, offset), or None.
1383 user - username string, or None.
1386 user - username string, or None.
1384 extra - a dictionary of extra values, or None.
1387 extra - a dictionary of extra values, or None.
1385 changes - a list of file lists as returned by localrepo.status()
1388 changes - a list of file lists as returned by localrepo.status()
1386 or None to use the repository status.
1389 or None to use the repository status.
1387 """
1390 """
1388 def __init__(self, repo, text="", user=None, date=None, extra=None,
1391 def __init__(self, repo, text="", user=None, date=None, extra=None,
1389 changes=None):
1392 changes=None):
1390 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1393 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1391
1394
1392 def __iter__(self):
1395 def __iter__(self):
1393 d = self._repo.dirstate
1396 d = self._repo.dirstate
1394 for f in d:
1397 for f in d:
1395 if d[f] != 'r':
1398 if d[f] != 'r':
1396 yield f
1399 yield f
1397
1400
1398 def __contains__(self, key):
1401 def __contains__(self, key):
1399 return self._repo.dirstate[key] not in "?r"
1402 return self._repo.dirstate[key] not in "?r"
1400
1403
1401 def hex(self):
1404 def hex(self):
1402 return hex(wdirid)
1405 return hex(wdirid)
1403
1406
1404 @propertycache
1407 @propertycache
1405 def _parents(self):
1408 def _parents(self):
1406 p = self._repo.dirstate.parents()
1409 p = self._repo.dirstate.parents()
1407 if p[1] == nullid:
1410 if p[1] == nullid:
1408 p = p[:-1]
1411 p = p[:-1]
1409 return [changectx(self._repo, x) for x in p]
1412 return [changectx(self._repo, x) for x in p]
1410
1413
1411 def filectx(self, path, filelog=None):
1414 def filectx(self, path, filelog=None):
1412 """get a file context from the working directory"""
1415 """get a file context from the working directory"""
1413 return workingfilectx(self._repo, path, workingctx=self,
1416 return workingfilectx(self._repo, path, workingctx=self,
1414 filelog=filelog)
1417 filelog=filelog)
1415
1418
1416 def dirty(self, missing=False, merge=True, branch=True):
1419 def dirty(self, missing=False, merge=True, branch=True):
1417 "check whether a working directory is modified"
1420 "check whether a working directory is modified"
1418 # check subrepos first
1421 # check subrepos first
1419 for s in sorted(self.substate):
1422 for s in sorted(self.substate):
1420 if self.sub(s).dirty():
1423 if self.sub(s).dirty():
1421 return True
1424 return True
1422 # check current working dir
1425 # check current working dir
1423 return ((merge and self.p2()) or
1426 return ((merge and self.p2()) or
1424 (branch and self.branch() != self.p1().branch()) or
1427 (branch and self.branch() != self.p1().branch()) or
1425 self.modified() or self.added() or self.removed() or
1428 self.modified() or self.added() or self.removed() or
1426 (missing and self.deleted()))
1429 (missing and self.deleted()))
1427
1430
1428 def add(self, list, prefix=""):
1431 def add(self, list, prefix=""):
1429 join = lambda f: os.path.join(prefix, f)
1432 join = lambda f: os.path.join(prefix, f)
1430 with self._repo.wlock():
1433 with self._repo.wlock():
1431 ui, ds = self._repo.ui, self._repo.dirstate
1434 ui, ds = self._repo.ui, self._repo.dirstate
1432 rejected = []
1435 rejected = []
1433 lstat = self._repo.wvfs.lstat
1436 lstat = self._repo.wvfs.lstat
1434 for f in list:
1437 for f in list:
1435 scmutil.checkportable(ui, join(f))
1438 scmutil.checkportable(ui, join(f))
1436 try:
1439 try:
1437 st = lstat(f)
1440 st = lstat(f)
1438 except OSError:
1441 except OSError:
1439 ui.warn(_("%s does not exist!\n") % join(f))
1442 ui.warn(_("%s does not exist!\n") % join(f))
1440 rejected.append(f)
1443 rejected.append(f)
1441 continue
1444 continue
1442 if st.st_size > 10000000:
1445 if st.st_size > 10000000:
1443 ui.warn(_("%s: up to %d MB of RAM may be required "
1446 ui.warn(_("%s: up to %d MB of RAM may be required "
1444 "to manage this file\n"
1447 "to manage this file\n"
1445 "(use 'hg revert %s' to cancel the "
1448 "(use 'hg revert %s' to cancel the "
1446 "pending addition)\n")
1449 "pending addition)\n")
1447 % (f, 3 * st.st_size // 1000000, join(f)))
1450 % (f, 3 * st.st_size // 1000000, join(f)))
1448 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1451 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 ui.warn(_("%s not added: only files and symlinks "
1452 ui.warn(_("%s not added: only files and symlinks "
1450 "supported currently\n") % join(f))
1453 "supported currently\n") % join(f))
1451 rejected.append(f)
1454 rejected.append(f)
1452 elif ds[f] in 'amn':
1455 elif ds[f] in 'amn':
1453 ui.warn(_("%s already tracked!\n") % join(f))
1456 ui.warn(_("%s already tracked!\n") % join(f))
1454 elif ds[f] == 'r':
1457 elif ds[f] == 'r':
1455 ds.normallookup(f)
1458 ds.normallookup(f)
1456 else:
1459 else:
1457 ds.add(f)
1460 ds.add(f)
1458 return rejected
1461 return rejected
1459
1462
1460 def forget(self, files, prefix=""):
1463 def forget(self, files, prefix=""):
1461 join = lambda f: os.path.join(prefix, f)
1464 join = lambda f: os.path.join(prefix, f)
1462 with self._repo.wlock():
1465 with self._repo.wlock():
1463 rejected = []
1466 rejected = []
1464 for f in files:
1467 for f in files:
1465 if f not in self._repo.dirstate:
1468 if f not in self._repo.dirstate:
1466 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1469 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1467 rejected.append(f)
1470 rejected.append(f)
1468 elif self._repo.dirstate[f] != 'a':
1471 elif self._repo.dirstate[f] != 'a':
1469 self._repo.dirstate.remove(f)
1472 self._repo.dirstate.remove(f)
1470 else:
1473 else:
1471 self._repo.dirstate.drop(f)
1474 self._repo.dirstate.drop(f)
1472 return rejected
1475 return rejected
1473
1476
1474 def undelete(self, list):
1477 def undelete(self, list):
1475 pctxs = self.parents()
1478 pctxs = self.parents()
1476 with self._repo.wlock():
1479 with self._repo.wlock():
1477 for f in list:
1480 for f in list:
1478 if self._repo.dirstate[f] != 'r':
1481 if self._repo.dirstate[f] != 'r':
1479 self._repo.ui.warn(_("%s not removed!\n") % f)
1482 self._repo.ui.warn(_("%s not removed!\n") % f)
1480 else:
1483 else:
1481 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1484 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1482 t = fctx.data()
1485 t = fctx.data()
1483 self._repo.wwrite(f, t, fctx.flags())
1486 self._repo.wwrite(f, t, fctx.flags())
1484 self._repo.dirstate.normal(f)
1487 self._repo.dirstate.normal(f)
1485
1488
1486 def copy(self, source, dest):
1489 def copy(self, source, dest):
1487 try:
1490 try:
1488 st = self._repo.wvfs.lstat(dest)
1491 st = self._repo.wvfs.lstat(dest)
1489 except OSError as err:
1492 except OSError as err:
1490 if err.errno != errno.ENOENT:
1493 if err.errno != errno.ENOENT:
1491 raise
1494 raise
1492 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1495 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1493 return
1496 return
1494 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1497 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1495 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1498 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1496 "symbolic link\n") % dest)
1499 "symbolic link\n") % dest)
1497 else:
1500 else:
1498 with self._repo.wlock():
1501 with self._repo.wlock():
1499 if self._repo.dirstate[dest] in '?':
1502 if self._repo.dirstate[dest] in '?':
1500 self._repo.dirstate.add(dest)
1503 self._repo.dirstate.add(dest)
1501 elif self._repo.dirstate[dest] in 'r':
1504 elif self._repo.dirstate[dest] in 'r':
1502 self._repo.dirstate.normallookup(dest)
1505 self._repo.dirstate.normallookup(dest)
1503 self._repo.dirstate.copy(source, dest)
1506 self._repo.dirstate.copy(source, dest)
1504
1507
1505 def match(self, pats=[], include=None, exclude=None, default='glob',
1508 def match(self, pats=[], include=None, exclude=None, default='glob',
1506 listsubrepos=False, badfn=None):
1509 listsubrepos=False, badfn=None):
1507 r = self._repo
1510 r = self._repo
1508
1511
1509 # Only a case insensitive filesystem needs magic to translate user input
1512 # Only a case insensitive filesystem needs magic to translate user input
1510 # to actual case in the filesystem.
1513 # to actual case in the filesystem.
1511 if not util.fscasesensitive(r.root):
1514 if not util.fscasesensitive(r.root):
1512 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1515 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1513 exclude, default, r.auditor, self,
1516 exclude, default, r.auditor, self,
1514 listsubrepos=listsubrepos,
1517 listsubrepos=listsubrepos,
1515 badfn=badfn)
1518 badfn=badfn)
1516 return matchmod.match(r.root, r.getcwd(), pats,
1519 return matchmod.match(r.root, r.getcwd(), pats,
1517 include, exclude, default,
1520 include, exclude, default,
1518 auditor=r.auditor, ctx=self,
1521 auditor=r.auditor, ctx=self,
1519 listsubrepos=listsubrepos, badfn=badfn)
1522 listsubrepos=listsubrepos, badfn=badfn)
1520
1523
1521 def _filtersuspectsymlink(self, files):
1524 def _filtersuspectsymlink(self, files):
1522 if not files or self._repo.dirstate._checklink:
1525 if not files or self._repo.dirstate._checklink:
1523 return files
1526 return files
1524
1527
1525 # Symlink placeholders may get non-symlink-like contents
1528 # Symlink placeholders may get non-symlink-like contents
1526 # via user error or dereferencing by NFS or Samba servers,
1529 # via user error or dereferencing by NFS or Samba servers,
1527 # so we filter out any placeholders that don't look like a
1530 # so we filter out any placeholders that don't look like a
1528 # symlink
1531 # symlink
1529 sane = []
1532 sane = []
1530 for f in files:
1533 for f in files:
1531 if self.flags(f) == 'l':
1534 if self.flags(f) == 'l':
1532 d = self[f].data()
1535 d = self[f].data()
1533 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1536 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1534 self._repo.ui.debug('ignoring suspect symlink placeholder'
1537 self._repo.ui.debug('ignoring suspect symlink placeholder'
1535 ' "%s"\n' % f)
1538 ' "%s"\n' % f)
1536 continue
1539 continue
1537 sane.append(f)
1540 sane.append(f)
1538 return sane
1541 return sane
1539
1542
1540 def _checklookup(self, files):
1543 def _checklookup(self, files):
1541 # check for any possibly clean files
1544 # check for any possibly clean files
1542 if not files:
1545 if not files:
1543 return [], []
1546 return [], []
1544
1547
1545 modified = []
1548 modified = []
1546 fixup = []
1549 fixup = []
1547 pctx = self._parents[0]
1550 pctx = self._parents[0]
1548 # do a full compare of any files that might have changed
1551 # do a full compare of any files that might have changed
1549 for f in sorted(files):
1552 for f in sorted(files):
1550 if (f not in pctx or self.flags(f) != pctx.flags(f)
1553 if (f not in pctx or self.flags(f) != pctx.flags(f)
1551 or pctx[f].cmp(self[f])):
1554 or pctx[f].cmp(self[f])):
1552 modified.append(f)
1555 modified.append(f)
1553 else:
1556 else:
1554 fixup.append(f)
1557 fixup.append(f)
1555
1558
1556 # update dirstate for files that are actually clean
1559 # update dirstate for files that are actually clean
1557 if fixup:
1560 if fixup:
1558 try:
1561 try:
1559 # updating the dirstate is optional
1562 # updating the dirstate is optional
1560 # so we don't wait on the lock
1563 # so we don't wait on the lock
1561 # wlock can invalidate the dirstate, so cache normal _after_
1564 # wlock can invalidate the dirstate, so cache normal _after_
1562 # taking the lock
1565 # taking the lock
1563 with self._repo.wlock(False):
1566 with self._repo.wlock(False):
1564 normal = self._repo.dirstate.normal
1567 normal = self._repo.dirstate.normal
1565 for f in fixup:
1568 for f in fixup:
1566 normal(f)
1569 normal(f)
1567 # write changes out explicitly, because nesting
1570 # write changes out explicitly, because nesting
1568 # wlock at runtime may prevent 'wlock.release()'
1571 # wlock at runtime may prevent 'wlock.release()'
1569 # after this block from doing so for subsequent
1572 # after this block from doing so for subsequent
1570 # changing files
1573 # changing files
1571 self._repo.dirstate.write(self._repo.currenttransaction())
1574 self._repo.dirstate.write(self._repo.currenttransaction())
1572 except error.LockError:
1575 except error.LockError:
1573 pass
1576 pass
1574 return modified, fixup
1577 return modified, fixup
1575
1578
1576 def _manifestmatches(self, match, s):
1579 def _manifestmatches(self, match, s):
1577 """Slow path for workingctx
1580 """Slow path for workingctx
1578
1581
1579 The fast path is when we compare the working directory to its parent
1582 The fast path is when we compare the working directory to its parent
1580 which means this function is comparing with a non-parent; therefore we
1583 which means this function is comparing with a non-parent; therefore we
1581 need to build a manifest and return what matches.
1584 need to build a manifest and return what matches.
1582 """
1585 """
1583 mf = self._repo['.']._manifestmatches(match, s)
1586 mf = self._repo['.']._manifestmatches(match, s)
1584 for f in s.modified + s.added:
1587 for f in s.modified + s.added:
1585 mf[f] = _newnode
1588 mf[f] = _newnode
1586 mf.setflag(f, self.flags(f))
1589 mf.setflag(f, self.flags(f))
1587 for f in s.removed:
1590 for f in s.removed:
1588 if f in mf:
1591 if f in mf:
1589 del mf[f]
1592 del mf[f]
1590 return mf
1593 return mf
1591
1594
1592 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1595 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1593 unknown=False):
1596 unknown=False):
1594 '''Gets the status from the dirstate -- internal use only.'''
1597 '''Gets the status from the dirstate -- internal use only.'''
1595 listignored, listclean, listunknown = ignored, clean, unknown
1598 listignored, listclean, listunknown = ignored, clean, unknown
1596 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1599 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1597 subrepos = []
1600 subrepos = []
1598 if '.hgsub' in self:
1601 if '.hgsub' in self:
1599 subrepos = sorted(self.substate)
1602 subrepos = sorted(self.substate)
1600 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1603 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1601 listclean, listunknown)
1604 listclean, listunknown)
1602
1605
1603 # check for any possibly clean files
1606 # check for any possibly clean files
1604 if cmp:
1607 if cmp:
1605 modified2, fixup = self._checklookup(cmp)
1608 modified2, fixup = self._checklookup(cmp)
1606 s.modified.extend(modified2)
1609 s.modified.extend(modified2)
1607
1610
1608 # update dirstate for files that are actually clean
1611 # update dirstate for files that are actually clean
1609 if fixup and listclean:
1612 if fixup and listclean:
1610 s.clean.extend(fixup)
1613 s.clean.extend(fixup)
1611
1614
1612 if match.always():
1615 if match.always():
1613 # cache for performance
1616 # cache for performance
1614 if s.unknown or s.ignored or s.clean:
1617 if s.unknown or s.ignored or s.clean:
1615 # "_status" is cached with list*=False in the normal route
1618 # "_status" is cached with list*=False in the normal route
1616 self._status = scmutil.status(s.modified, s.added, s.removed,
1619 self._status = scmutil.status(s.modified, s.added, s.removed,
1617 s.deleted, [], [], [])
1620 s.deleted, [], [], [])
1618 else:
1621 else:
1619 self._status = s
1622 self._status = s
1620
1623
1621 return s
1624 return s
1622
1625
1623 def _buildstatus(self, other, s, match, listignored, listclean,
1626 def _buildstatus(self, other, s, match, listignored, listclean,
1624 listunknown):
1627 listunknown):
1625 """build a status with respect to another context
1628 """build a status with respect to another context
1626
1629
1627 This includes logic for maintaining the fast path of status when
1630 This includes logic for maintaining the fast path of status when
1628 comparing the working directory against its parent, which is to skip
1631 comparing the working directory against its parent, which is to skip
1629 building a new manifest if self (working directory) is not comparing
1632 building a new manifest if self (working directory) is not comparing
1630 against its parent (repo['.']).
1633 against its parent (repo['.']).
1631 """
1634 """
1632 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1635 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1633 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1636 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1634 # might have accidentally ended up with the entire contents of the file
1637 # might have accidentally ended up with the entire contents of the file
1635 # they are supposed to be linking to.
1638 # they are supposed to be linking to.
1636 s.modified[:] = self._filtersuspectsymlink(s.modified)
1639 s.modified[:] = self._filtersuspectsymlink(s.modified)
1637 if other != self._repo['.']:
1640 if other != self._repo['.']:
1638 s = super(workingctx, self)._buildstatus(other, s, match,
1641 s = super(workingctx, self)._buildstatus(other, s, match,
1639 listignored, listclean,
1642 listignored, listclean,
1640 listunknown)
1643 listunknown)
1641 return s
1644 return s
1642
1645
1643 def _matchstatus(self, other, match):
1646 def _matchstatus(self, other, match):
1644 """override the match method with a filter for directory patterns
1647 """override the match method with a filter for directory patterns
1645
1648
1646 We use inheritance to customize the match.bad method only in cases of
1649 We use inheritance to customize the match.bad method only in cases of
1647 workingctx since it belongs only to the working directory when
1650 workingctx since it belongs only to the working directory when
1648 comparing against the parent changeset.
1651 comparing against the parent changeset.
1649
1652
1650 If we aren't comparing against the working directory's parent, then we
1653 If we aren't comparing against the working directory's parent, then we
1651 just use the default match object sent to us.
1654 just use the default match object sent to us.
1652 """
1655 """
1653 superself = super(workingctx, self)
1656 superself = super(workingctx, self)
1654 match = superself._matchstatus(other, match)
1657 match = superself._matchstatus(other, match)
1655 if other != self._repo['.']:
1658 if other != self._repo['.']:
1656 def bad(f, msg):
1659 def bad(f, msg):
1657 # 'f' may be a directory pattern from 'match.files()',
1660 # 'f' may be a directory pattern from 'match.files()',
1658 # so 'f not in ctx1' is not enough
1661 # so 'f not in ctx1' is not enough
1659 if f not in other and not other.hasdir(f):
1662 if f not in other and not other.hasdir(f):
1660 self._repo.ui.warn('%s: %s\n' %
1663 self._repo.ui.warn('%s: %s\n' %
1661 (self._repo.dirstate.pathto(f), msg))
1664 (self._repo.dirstate.pathto(f), msg))
1662 match.bad = bad
1665 match.bad = bad
1663 return match
1666 return match
1664
1667
1665 class committablefilectx(basefilectx):
1668 class committablefilectx(basefilectx):
1666 """A committablefilectx provides common functionality for a file context
1669 """A committablefilectx provides common functionality for a file context
1667 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1670 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 def __init__(self, repo, path, filelog=None, ctx=None):
1671 def __init__(self, repo, path, filelog=None, ctx=None):
1669 self._repo = repo
1672 self._repo = repo
1670 self._path = path
1673 self._path = path
1671 self._changeid = None
1674 self._changeid = None
1672 self._filerev = self._filenode = None
1675 self._filerev = self._filenode = None
1673
1676
1674 if filelog is not None:
1677 if filelog is not None:
1675 self._filelog = filelog
1678 self._filelog = filelog
1676 if ctx:
1679 if ctx:
1677 self._changectx = ctx
1680 self._changectx = ctx
1678
1681
1679 def __nonzero__(self):
1682 def __nonzero__(self):
1680 return True
1683 return True
1681
1684
1682 def linkrev(self):
1685 def linkrev(self):
1683 # linked to self._changectx no matter if file is modified or not
1686 # linked to self._changectx no matter if file is modified or not
1684 return self.rev()
1687 return self.rev()
1685
1688
1686 def parents(self):
1689 def parents(self):
1687 '''return parent filectxs, following copies if necessary'''
1690 '''return parent filectxs, following copies if necessary'''
1688 def filenode(ctx, path):
1691 def filenode(ctx, path):
1689 return ctx._manifest.get(path, nullid)
1692 return ctx._manifest.get(path, nullid)
1690
1693
1691 path = self._path
1694 path = self._path
1692 fl = self._filelog
1695 fl = self._filelog
1693 pcl = self._changectx._parents
1696 pcl = self._changectx._parents
1694 renamed = self.renamed()
1697 renamed = self.renamed()
1695
1698
1696 if renamed:
1699 if renamed:
1697 pl = [renamed + (None,)]
1700 pl = [renamed + (None,)]
1698 else:
1701 else:
1699 pl = [(path, filenode(pcl[0], path), fl)]
1702 pl = [(path, filenode(pcl[0], path), fl)]
1700
1703
1701 for pc in pcl[1:]:
1704 for pc in pcl[1:]:
1702 pl.append((path, filenode(pc, path), fl))
1705 pl.append((path, filenode(pc, path), fl))
1703
1706
1704 return [self._parentfilectx(p, fileid=n, filelog=l)
1707 return [self._parentfilectx(p, fileid=n, filelog=l)
1705 for p, n, l in pl if n != nullid]
1708 for p, n, l in pl if n != nullid]
1706
1709
1707 def children(self):
1710 def children(self):
1708 return []
1711 return []
1709
1712
1710 class workingfilectx(committablefilectx):
1713 class workingfilectx(committablefilectx):
1711 """A workingfilectx object makes access to data related to a particular
1714 """A workingfilectx object makes access to data related to a particular
1712 file in the working directory convenient."""
1715 file in the working directory convenient."""
1713 def __init__(self, repo, path, filelog=None, workingctx=None):
1716 def __init__(self, repo, path, filelog=None, workingctx=None):
1714 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1717 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1715
1718
1716 @propertycache
1719 @propertycache
1717 def _changectx(self):
1720 def _changectx(self):
1718 return workingctx(self._repo)
1721 return workingctx(self._repo)
1719
1722
1720 def data(self):
1723 def data(self):
1721 return self._repo.wread(self._path)
1724 return self._repo.wread(self._path)
1722 def renamed(self):
1725 def renamed(self):
1723 rp = self._repo.dirstate.copied(self._path)
1726 rp = self._repo.dirstate.copied(self._path)
1724 if not rp:
1727 if not rp:
1725 return None
1728 return None
1726 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1729 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1727
1730
1728 def size(self):
1731 def size(self):
1729 return self._repo.wvfs.lstat(self._path).st_size
1732 return self._repo.wvfs.lstat(self._path).st_size
1730 def date(self):
1733 def date(self):
1731 t, tz = self._changectx.date()
1734 t, tz = self._changectx.date()
1732 try:
1735 try:
1733 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1736 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1734 except OSError as err:
1737 except OSError as err:
1735 if err.errno != errno.ENOENT:
1738 if err.errno != errno.ENOENT:
1736 raise
1739 raise
1737 return (t, tz)
1740 return (t, tz)
1738
1741
1739 def cmp(self, fctx):
1742 def cmp(self, fctx):
1740 """compare with other file context
1743 """compare with other file context
1741
1744
1742 returns True if different than fctx.
1745 returns True if different than fctx.
1743 """
1746 """
1744 # fctx should be a filectx (not a workingfilectx)
1747 # fctx should be a filectx (not a workingfilectx)
1745 # invert comparison to reuse the same code path
1748 # invert comparison to reuse the same code path
1746 return fctx.cmp(self)
1749 return fctx.cmp(self)
1747
1750
1748 def remove(self, ignoremissing=False):
1751 def remove(self, ignoremissing=False):
1749 """wraps unlink for a repo's working directory"""
1752 """wraps unlink for a repo's working directory"""
1750 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1753 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1751
1754
1752 def write(self, data, flags):
1755 def write(self, data, flags):
1753 """wraps repo.wwrite"""
1756 """wraps repo.wwrite"""
1754 self._repo.wwrite(self._path, data, flags)
1757 self._repo.wwrite(self._path, data, flags)
1755
1758
1756 class workingcommitctx(workingctx):
1759 class workingcommitctx(workingctx):
1757 """A workingcommitctx object makes access to data related to
1760 """A workingcommitctx object makes access to data related to
1758 the revision being committed convenient.
1761 the revision being committed convenient.
1759
1762
1760 This hides changes in the working directory, if they aren't
1763 This hides changes in the working directory, if they aren't
1761 committed in this context.
1764 committed in this context.
1762 """
1765 """
1763 def __init__(self, repo, changes,
1766 def __init__(self, repo, changes,
1764 text="", user=None, date=None, extra=None):
1767 text="", user=None, date=None, extra=None):
1765 super(workingctx, self).__init__(repo, text, user, date, extra,
1768 super(workingctx, self).__init__(repo, text, user, date, extra,
1766 changes)
1769 changes)
1767
1770
1768 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1771 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1769 unknown=False):
1772 unknown=False):
1770 """Return matched files only in ``self._status``
1773 """Return matched files only in ``self._status``
1771
1774
1772 Uncommitted files appear "clean" via this context, even if
1775 Uncommitted files appear "clean" via this context, even if
1773 they aren't actually so in the working directory.
1776 they aren't actually so in the working directory.
1774 """
1777 """
1775 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1778 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1776 if clean:
1779 if clean:
1777 clean = [f for f in self._manifest if f not in self._changedset]
1780 clean = [f for f in self._manifest if f not in self._changedset]
1778 else:
1781 else:
1779 clean = []
1782 clean = []
1780 return scmutil.status([f for f in self._status.modified if match(f)],
1783 return scmutil.status([f for f in self._status.modified if match(f)],
1781 [f for f in self._status.added if match(f)],
1784 [f for f in self._status.added if match(f)],
1782 [f for f in self._status.removed if match(f)],
1785 [f for f in self._status.removed if match(f)],
1783 [], [], [], clean)
1786 [], [], [], clean)
1784
1787
1785 @propertycache
1788 @propertycache
1786 def _changedset(self):
1789 def _changedset(self):
1787 """Return the set of files changed in this context
1790 """Return the set of files changed in this context
1788 """
1791 """
1789 changed = set(self._status.modified)
1792 changed = set(self._status.modified)
1790 changed.update(self._status.added)
1793 changed.update(self._status.added)
1791 changed.update(self._status.removed)
1794 changed.update(self._status.removed)
1792 return changed
1795 return changed
1793
1796
1794 def makecachingfilectxfn(func):
1797 def makecachingfilectxfn(func):
1795 """Create a filectxfn that caches based on the path.
1798 """Create a filectxfn that caches based on the path.
1796
1799
1797 We can't use util.cachefunc because it uses all arguments as the cache
1800 We can't use util.cachefunc because it uses all arguments as the cache
1798 key and this creates a cycle since the arguments include the repo and
1801 key and this creates a cycle since the arguments include the repo and
1799 memctx.
1802 memctx.
1800 """
1803 """
1801 cache = {}
1804 cache = {}
1802
1805
1803 def getfilectx(repo, memctx, path):
1806 def getfilectx(repo, memctx, path):
1804 if path not in cache:
1807 if path not in cache:
1805 cache[path] = func(repo, memctx, path)
1808 cache[path] = func(repo, memctx, path)
1806 return cache[path]
1809 return cache[path]
1807
1810
1808 return getfilectx
1811 return getfilectx
1809
1812
1810 class memctx(committablectx):
1813 class memctx(committablectx):
1811 """Use memctx to perform in-memory commits via localrepo.commitctx().
1814 """Use memctx to perform in-memory commits via localrepo.commitctx().
1812
1815
1813 Revision information is supplied at initialization time while
1816 Revision information is supplied at initialization time while
1814 related files data and is made available through a callback
1817 related files data and is made available through a callback
1815 mechanism. 'repo' is the current localrepo, 'parents' is a
1818 mechanism. 'repo' is the current localrepo, 'parents' is a
1816 sequence of two parent revisions identifiers (pass None for every
1819 sequence of two parent revisions identifiers (pass None for every
1817 missing parent), 'text' is the commit message and 'files' lists
1820 missing parent), 'text' is the commit message and 'files' lists
1818 names of files touched by the revision (normalized and relative to
1821 names of files touched by the revision (normalized and relative to
1819 repository root).
1822 repository root).
1820
1823
1821 filectxfn(repo, memctx, path) is a callable receiving the
1824 filectxfn(repo, memctx, path) is a callable receiving the
1822 repository, the current memctx object and the normalized path of
1825 repository, the current memctx object and the normalized path of
1823 requested file, relative to repository root. It is fired by the
1826 requested file, relative to repository root. It is fired by the
1824 commit function for every file in 'files', but calls order is
1827 commit function for every file in 'files', but calls order is
1825 undefined. If the file is available in the revision being
1828 undefined. If the file is available in the revision being
1826 committed (updated or added), filectxfn returns a memfilectx
1829 committed (updated or added), filectxfn returns a memfilectx
1827 object. If the file was removed, filectxfn raises an
1830 object. If the file was removed, filectxfn raises an
1828 IOError. Moved files are represented by marking the source file
1831 IOError. Moved files are represented by marking the source file
1829 removed and the new file added with copy information (see
1832 removed and the new file added with copy information (see
1830 memfilectx).
1833 memfilectx).
1831
1834
1832 user receives the committer name and defaults to current
1835 user receives the committer name and defaults to current
1833 repository username, date is the commit date in any format
1836 repository username, date is the commit date in any format
1834 supported by util.parsedate() and defaults to current date, extra
1837 supported by util.parsedate() and defaults to current date, extra
1835 is a dictionary of metadata or is left empty.
1838 is a dictionary of metadata or is left empty.
1836 """
1839 """
1837
1840
1838 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1841 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1839 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1842 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1840 # this field to determine what to do in filectxfn.
1843 # this field to determine what to do in filectxfn.
1841 _returnnoneformissingfiles = True
1844 _returnnoneformissingfiles = True
1842
1845
1843 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1846 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1844 date=None, extra=None, editor=False):
1847 date=None, extra=None, editor=False):
1845 super(memctx, self).__init__(repo, text, user, date, extra)
1848 super(memctx, self).__init__(repo, text, user, date, extra)
1846 self._rev = None
1849 self._rev = None
1847 self._node = None
1850 self._node = None
1848 parents = [(p or nullid) for p in parents]
1851 parents = [(p or nullid) for p in parents]
1849 p1, p2 = parents
1852 p1, p2 = parents
1850 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1853 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1851 files = sorted(set(files))
1854 files = sorted(set(files))
1852 self._files = files
1855 self._files = files
1853 self.substate = {}
1856 self.substate = {}
1854
1857
1855 # if store is not callable, wrap it in a function
1858 # if store is not callable, wrap it in a function
1856 if not callable(filectxfn):
1859 if not callable(filectxfn):
1857 def getfilectx(repo, memctx, path):
1860 def getfilectx(repo, memctx, path):
1858 fctx = filectxfn[path]
1861 fctx = filectxfn[path]
1859 # this is weird but apparently we only keep track of one parent
1862 # this is weird but apparently we only keep track of one parent
1860 # (why not only store that instead of a tuple?)
1863 # (why not only store that instead of a tuple?)
1861 copied = fctx.renamed()
1864 copied = fctx.renamed()
1862 if copied:
1865 if copied:
1863 copied = copied[0]
1866 copied = copied[0]
1864 return memfilectx(repo, path, fctx.data(),
1867 return memfilectx(repo, path, fctx.data(),
1865 islink=fctx.islink(), isexec=fctx.isexec(),
1868 islink=fctx.islink(), isexec=fctx.isexec(),
1866 copied=copied, memctx=memctx)
1869 copied=copied, memctx=memctx)
1867 self._filectxfn = getfilectx
1870 self._filectxfn = getfilectx
1868 else:
1871 else:
1869 # memoizing increases performance for e.g. vcs convert scenarios.
1872 # memoizing increases performance for e.g. vcs convert scenarios.
1870 self._filectxfn = makecachingfilectxfn(filectxfn)
1873 self._filectxfn = makecachingfilectxfn(filectxfn)
1871
1874
1872 if extra:
1875 if extra:
1873 self._extra = extra.copy()
1876 self._extra = extra.copy()
1874 else:
1877 else:
1875 self._extra = {}
1878 self._extra = {}
1876
1879
1877 if self._extra.get('branch', '') == '':
1880 if self._extra.get('branch', '') == '':
1878 self._extra['branch'] = 'default'
1881 self._extra['branch'] = 'default'
1879
1882
1880 if editor:
1883 if editor:
1881 self._text = editor(self._repo, self, [])
1884 self._text = editor(self._repo, self, [])
1882 self._repo.savecommitmessage(self._text)
1885 self._repo.savecommitmessage(self._text)
1883
1886
1884 def filectx(self, path, filelog=None):
1887 def filectx(self, path, filelog=None):
1885 """get a file context from the working directory
1888 """get a file context from the working directory
1886
1889
1887 Returns None if file doesn't exist and should be removed."""
1890 Returns None if file doesn't exist and should be removed."""
1888 return self._filectxfn(self._repo, self, path)
1891 return self._filectxfn(self._repo, self, path)
1889
1892
1890 def commit(self):
1893 def commit(self):
1891 """commit context to the repo"""
1894 """commit context to the repo"""
1892 return self._repo.commitctx(self)
1895 return self._repo.commitctx(self)
1893
1896
1894 @propertycache
1897 @propertycache
1895 def _manifest(self):
1898 def _manifest(self):
1896 """generate a manifest based on the return values of filectxfn"""
1899 """generate a manifest based on the return values of filectxfn"""
1897
1900
1898 # keep this simple for now; just worry about p1
1901 # keep this simple for now; just worry about p1
1899 pctx = self._parents[0]
1902 pctx = self._parents[0]
1900 man = pctx.manifest().copy()
1903 man = pctx.manifest().copy()
1901
1904
1902 for f in self._status.modified:
1905 for f in self._status.modified:
1903 p1node = nullid
1906 p1node = nullid
1904 p2node = nullid
1907 p2node = nullid
1905 p = pctx[f].parents() # if file isn't in pctx, check p2?
1908 p = pctx[f].parents() # if file isn't in pctx, check p2?
1906 if len(p) > 0:
1909 if len(p) > 0:
1907 p1node = p[0].filenode()
1910 p1node = p[0].filenode()
1908 if len(p) > 1:
1911 if len(p) > 1:
1909 p2node = p[1].filenode()
1912 p2node = p[1].filenode()
1910 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1913 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1911
1914
1912 for f in self._status.added:
1915 for f in self._status.added:
1913 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1916 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1914
1917
1915 for f in self._status.removed:
1918 for f in self._status.removed:
1916 if f in man:
1919 if f in man:
1917 del man[f]
1920 del man[f]
1918
1921
1919 return man
1922 return man
1920
1923
1921 @propertycache
1924 @propertycache
1922 def _status(self):
1925 def _status(self):
1923 """Calculate exact status from ``files`` specified at construction
1926 """Calculate exact status from ``files`` specified at construction
1924 """
1927 """
1925 man1 = self.p1().manifest()
1928 man1 = self.p1().manifest()
1926 p2 = self._parents[1]
1929 p2 = self._parents[1]
1927 # "1 < len(self._parents)" can't be used for checking
1930 # "1 < len(self._parents)" can't be used for checking
1928 # existence of the 2nd parent, because "memctx._parents" is
1931 # existence of the 2nd parent, because "memctx._parents" is
1929 # explicitly initialized by the list, of which length is 2.
1932 # explicitly initialized by the list, of which length is 2.
1930 if p2.node() != nullid:
1933 if p2.node() != nullid:
1931 man2 = p2.manifest()
1934 man2 = p2.manifest()
1932 managing = lambda f: f in man1 or f in man2
1935 managing = lambda f: f in man1 or f in man2
1933 else:
1936 else:
1934 managing = lambda f: f in man1
1937 managing = lambda f: f in man1
1935
1938
1936 modified, added, removed = [], [], []
1939 modified, added, removed = [], [], []
1937 for f in self._files:
1940 for f in self._files:
1938 if not managing(f):
1941 if not managing(f):
1939 added.append(f)
1942 added.append(f)
1940 elif self[f]:
1943 elif self[f]:
1941 modified.append(f)
1944 modified.append(f)
1942 else:
1945 else:
1943 removed.append(f)
1946 removed.append(f)
1944
1947
1945 return scmutil.status(modified, added, removed, [], [], [], [])
1948 return scmutil.status(modified, added, removed, [], [], [], [])
1946
1949
1947 class memfilectx(committablefilectx):
1950 class memfilectx(committablefilectx):
1948 """memfilectx represents an in-memory file to commit.
1951 """memfilectx represents an in-memory file to commit.
1949
1952
1950 See memctx and committablefilectx for more details.
1953 See memctx and committablefilectx for more details.
1951 """
1954 """
1952 def __init__(self, repo, path, data, islink=False,
1955 def __init__(self, repo, path, data, islink=False,
1953 isexec=False, copied=None, memctx=None):
1956 isexec=False, copied=None, memctx=None):
1954 """
1957 """
1955 path is the normalized file path relative to repository root.
1958 path is the normalized file path relative to repository root.
1956 data is the file content as a string.
1959 data is the file content as a string.
1957 islink is True if the file is a symbolic link.
1960 islink is True if the file is a symbolic link.
1958 isexec is True if the file is executable.
1961 isexec is True if the file is executable.
1959 copied is the source file path if current file was copied in the
1962 copied is the source file path if current file was copied in the
1960 revision being committed, or None."""
1963 revision being committed, or None."""
1961 super(memfilectx, self).__init__(repo, path, None, memctx)
1964 super(memfilectx, self).__init__(repo, path, None, memctx)
1962 self._data = data
1965 self._data = data
1963 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1966 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1964 self._copied = None
1967 self._copied = None
1965 if copied:
1968 if copied:
1966 self._copied = (copied, nullid)
1969 self._copied = (copied, nullid)
1967
1970
1968 def data(self):
1971 def data(self):
1969 return self._data
1972 return self._data
1970 def size(self):
1973 def size(self):
1971 return len(self.data())
1974 return len(self.data())
1972 def flags(self):
1975 def flags(self):
1973 return self._flags
1976 return self._flags
1974 def renamed(self):
1977 def renamed(self):
1975 return self._copied
1978 return self._copied
1976
1979
1977 def remove(self, ignoremissing=False):
1980 def remove(self, ignoremissing=False):
1978 """wraps unlink for a repo's working directory"""
1981 """wraps unlink for a repo's working directory"""
1979 # need to figure out what to do here
1982 # need to figure out what to do here
1980 del self._changectx[self._path]
1983 del self._changectx[self._path]
1981
1984
1982 def write(self, data, flags):
1985 def write(self, data, flags):
1983 """wraps repo.wwrite"""
1986 """wraps repo.wwrite"""
1984 self._data = data
1987 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now