##// END OF EJS Templates
context: introduce the nullsub() method...
Matt Harbison -
r25417:95c27135 default
parent child Browse files
Show More
@@ -1,1910 +1,1913 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import copy, os, errno, stat
12 import copy, os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 return iter(self._manifest)
69 return iter(self._manifest)
70
70
71 def _manifestmatches(self, match, s):
71 def _manifestmatches(self, match, s):
72 """generate a new manifest filtered by the match argument
72 """generate a new manifest filtered by the match argument
73
73
74 This method is for internal use only and mainly exists to provide an
74 This method is for internal use only and mainly exists to provide an
75 object oriented way for other contexts to customize the manifest
75 object oriented way for other contexts to customize the manifest
76 generation.
76 generation.
77 """
77 """
78 return self.manifest().matches(match)
78 return self.manifest().matches(match)
79
79
80 def _matchstatus(self, other, match):
80 def _matchstatus(self, other, match):
81 """return match.always if match is none
81 """return match.always if match is none
82
82
83 This internal method provides a way for child objects to override the
83 This internal method provides a way for child objects to override the
84 match operator.
84 match operator.
85 """
85 """
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87
87
88 def _buildstatus(self, other, s, match, listignored, listclean,
88 def _buildstatus(self, other, s, match, listignored, listclean,
89 listunknown):
89 listunknown):
90 """build a status with respect to another context"""
90 """build a status with respect to another context"""
91 # Load earliest manifest first for caching reasons. More specifically,
91 # Load earliest manifest first for caching reasons. More specifically,
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # 1000 and cache it so that when you read 1001, we just need to apply a
94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # delta to what's in the cache. So that's one full reconstruction + one
95 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta application.
96 # delta application.
97 if self.rev() is not None and self.rev() < other.rev():
97 if self.rev() is not None and self.rev() < other.rev():
98 self.manifest()
98 self.manifest()
99 mf1 = other._manifestmatches(match, s)
99 mf1 = other._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
101
101
102 modified, added = [], []
102 modified, added = [], []
103 removed = []
103 removed = []
104 clean = []
104 clean = []
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deletedset = set(deleted)
106 deletedset = set(deleted)
107 d = mf1.diff(mf2, clean=listclean)
107 d = mf1.diff(mf2, clean=listclean)
108 for fn, value in d.iteritems():
108 for fn, value in d.iteritems():
109 if fn in deletedset:
109 if fn in deletedset:
110 continue
110 continue
111 if value is None:
111 if value is None:
112 clean.append(fn)
112 clean.append(fn)
113 continue
113 continue
114 (node1, flag1), (node2, flag2) = value
114 (node1, flag1), (node2, flag2) = value
115 if node1 is None:
115 if node1 is None:
116 added.append(fn)
116 added.append(fn)
117 elif node2 is None:
117 elif node2 is None:
118 removed.append(fn)
118 removed.append(fn)
119 elif node2 != _newnode:
119 elif node2 != _newnode:
120 # The file was not a new file in mf2, so an entry
120 # The file was not a new file in mf2, so an entry
121 # from diff is really a difference.
121 # from diff is really a difference.
122 modified.append(fn)
122 modified.append(fn)
123 elif self[fn].cmp(other[fn]):
123 elif self[fn].cmp(other[fn]):
124 # node2 was newnode, but the working file doesn't
124 # node2 was newnode, but the working file doesn't
125 # match the one in mf1.
125 # match the one in mf1.
126 modified.append(fn)
126 modified.append(fn)
127 else:
127 else:
128 clean.append(fn)
128 clean.append(fn)
129
129
130 if removed:
130 if removed:
131 # need to filter files if they are already reported as removed
131 # need to filter files if they are already reported as removed
132 unknown = [fn for fn in unknown if fn not in mf1]
132 unknown = [fn for fn in unknown if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
134 # if they're deleted, don't report them as removed
134 # if they're deleted, don't report them as removed
135 removed = [fn for fn in removed if fn not in deletedset]
135 removed = [fn for fn in removed if fn not in deletedset]
136
136
137 return scmutil.status(modified, added, removed, deleted, unknown,
137 return scmutil.status(modified, added, removed, deleted, unknown,
138 ignored, clean)
138 ignored, clean)
139
139
140 @propertycache
140 @propertycache
141 def substate(self):
141 def substate(self):
142 return subrepo.state(self, self._repo.ui)
142 return subrepo.state(self, self._repo.ui)
143
143
144 def subrev(self, subpath):
144 def subrev(self, subpath):
145 return self.substate[subpath][1]
145 return self.substate[subpath][1]
146
146
147 def rev(self):
147 def rev(self):
148 return self._rev
148 return self._rev
149 def node(self):
149 def node(self):
150 return self._node
150 return self._node
151 def hex(self):
151 def hex(self):
152 return hex(self.node())
152 return hex(self.node())
153 def manifest(self):
153 def manifest(self):
154 return self._manifest
154 return self._manifest
155 def repo(self):
155 def repo(self):
156 return self._repo
156 return self._repo
157 def phasestr(self):
157 def phasestr(self):
158 return phases.phasenames[self.phase()]
158 return phases.phasenames[self.phase()]
159 def mutable(self):
159 def mutable(self):
160 return self.phase() > phases.public
160 return self.phase() > phases.public
161
161
162 def getfileset(self, expr):
162 def getfileset(self, expr):
163 return fileset.getfileset(self, expr)
163 return fileset.getfileset(self, expr)
164
164
165 def obsolete(self):
165 def obsolete(self):
166 """True if the changeset is obsolete"""
166 """True if the changeset is obsolete"""
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168
168
169 def extinct(self):
169 def extinct(self):
170 """True if the changeset is extinct"""
170 """True if the changeset is extinct"""
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172
172
173 def unstable(self):
173 def unstable(self):
174 """True if the changeset is not obsolete but it's ancestor are"""
174 """True if the changeset is not obsolete but it's ancestor are"""
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176
176
177 def bumped(self):
177 def bumped(self):
178 """True if the changeset try to be a successor of a public changeset
178 """True if the changeset try to be a successor of a public changeset
179
179
180 Only non-public and non-obsolete changesets may be bumped.
180 Only non-public and non-obsolete changesets may be bumped.
181 """
181 """
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183
183
184 def divergent(self):
184 def divergent(self):
185 """Is a successors of a changeset with multiple possible successors set
185 """Is a successors of a changeset with multiple possible successors set
186
186
187 Only non-public and non-obsolete changesets may be divergent.
187 Only non-public and non-obsolete changesets may be divergent.
188 """
188 """
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190
190
191 def troubled(self):
191 def troubled(self):
192 """True if the changeset is either unstable, bumped or divergent"""
192 """True if the changeset is either unstable, bumped or divergent"""
193 return self.unstable() or self.bumped() or self.divergent()
193 return self.unstable() or self.bumped() or self.divergent()
194
194
195 def troubles(self):
195 def troubles(self):
196 """return the list of troubles affecting this changesets.
196 """return the list of troubles affecting this changesets.
197
197
198 Troubles are returned as strings. possible values are:
198 Troubles are returned as strings. possible values are:
199 - unstable,
199 - unstable,
200 - bumped,
200 - bumped,
201 - divergent.
201 - divergent.
202 """
202 """
203 troubles = []
203 troubles = []
204 if self.unstable():
204 if self.unstable():
205 troubles.append('unstable')
205 troubles.append('unstable')
206 if self.bumped():
206 if self.bumped():
207 troubles.append('bumped')
207 troubles.append('bumped')
208 if self.divergent():
208 if self.divergent():
209 troubles.append('divergent')
209 troubles.append('divergent')
210 return troubles
210 return troubles
211
211
212 def parents(self):
212 def parents(self):
213 """return contexts for each parent changeset"""
213 """return contexts for each parent changeset"""
214 return self._parents
214 return self._parents
215
215
216 def p1(self):
216 def p1(self):
217 return self._parents[0]
217 return self._parents[0]
218
218
219 def p2(self):
219 def p2(self):
220 if len(self._parents) == 2:
220 if len(self._parents) == 2:
221 return self._parents[1]
221 return self._parents[1]
222 return changectx(self._repo, -1)
222 return changectx(self._repo, -1)
223
223
224 def _fileinfo(self, path):
224 def _fileinfo(self, path):
225 if '_manifest' in self.__dict__:
225 if '_manifest' in self.__dict__:
226 try:
226 try:
227 return self._manifest[path], self._manifest.flags(path)
227 return self._manifest[path], self._manifest.flags(path)
228 except KeyError:
228 except KeyError:
229 raise error.ManifestLookupError(self._node, path,
229 raise error.ManifestLookupError(self._node, path,
230 _('not found in manifest'))
230 _('not found in manifest'))
231 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 if path in self._manifestdelta:
232 if path in self._manifestdelta:
233 return (self._manifestdelta[path],
233 return (self._manifestdelta[path],
234 self._manifestdelta.flags(path))
234 self._manifestdelta.flags(path))
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 if not node:
236 if not node:
237 raise error.ManifestLookupError(self._node, path,
237 raise error.ManifestLookupError(self._node, path,
238 _('not found in manifest'))
238 _('not found in manifest'))
239
239
240 return node, flag
240 return node, flag
241
241
242 def filenode(self, path):
242 def filenode(self, path):
243 return self._fileinfo(path)[0]
243 return self._fileinfo(path)[0]
244
244
245 def flags(self, path):
245 def flags(self, path):
246 try:
246 try:
247 return self._fileinfo(path)[1]
247 return self._fileinfo(path)[1]
248 except error.LookupError:
248 except error.LookupError:
249 return ''
249 return ''
250
250
251 def sub(self, path):
251 def sub(self, path):
252 return subrepo.subrepo(self, path)
252 return subrepo.subrepo(self, path)
253
253
254 def nullsub(self, path, pctx):
255 return subrepo.nullsubrepo(self, path, pctx)
256
254 def match(self, pats=[], include=None, exclude=None, default='glob',
257 def match(self, pats=[], include=None, exclude=None, default='glob',
255 listsubrepos=False):
258 listsubrepos=False):
256 r = self._repo
259 r = self._repo
257 return matchmod.match(r.root, r.getcwd(), pats,
260 return matchmod.match(r.root, r.getcwd(), pats,
258 include, exclude, default,
261 include, exclude, default,
259 auditor=r.auditor, ctx=self,
262 auditor=r.auditor, ctx=self,
260 listsubrepos=listsubrepos)
263 listsubrepos=listsubrepos)
261
264
262 def diff(self, ctx2=None, match=None, **opts):
265 def diff(self, ctx2=None, match=None, **opts):
263 """Returns a diff generator for the given contexts and matcher"""
266 """Returns a diff generator for the given contexts and matcher"""
264 if ctx2 is None:
267 if ctx2 is None:
265 ctx2 = self.p1()
268 ctx2 = self.p1()
266 if ctx2 is not None:
269 if ctx2 is not None:
267 ctx2 = self._repo[ctx2]
270 ctx2 = self._repo[ctx2]
268 diffopts = patch.diffopts(self._repo.ui, opts)
271 diffopts = patch.diffopts(self._repo.ui, opts)
269 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
272 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
270
273
271 def dirs(self):
274 def dirs(self):
272 return self._manifest.dirs()
275 return self._manifest.dirs()
273
276
274 def hasdir(self, dir):
277 def hasdir(self, dir):
275 return self._manifest.hasdir(dir)
278 return self._manifest.hasdir(dir)
276
279
277 def dirty(self, missing=False, merge=True, branch=True):
280 def dirty(self, missing=False, merge=True, branch=True):
278 return False
281 return False
279
282
280 def status(self, other=None, match=None, listignored=False,
283 def status(self, other=None, match=None, listignored=False,
281 listclean=False, listunknown=False, listsubrepos=False):
284 listclean=False, listunknown=False, listsubrepos=False):
282 """return status of files between two nodes or node and working
285 """return status of files between two nodes or node and working
283 directory.
286 directory.
284
287
285 If other is None, compare this node with working directory.
288 If other is None, compare this node with working directory.
286
289
287 returns (modified, added, removed, deleted, unknown, ignored, clean)
290 returns (modified, added, removed, deleted, unknown, ignored, clean)
288 """
291 """
289
292
290 ctx1 = self
293 ctx1 = self
291 ctx2 = self._repo[other]
294 ctx2 = self._repo[other]
292
295
293 # This next code block is, admittedly, fragile logic that tests for
296 # This next code block is, admittedly, fragile logic that tests for
294 # reversing the contexts and wouldn't need to exist if it weren't for
297 # reversing the contexts and wouldn't need to exist if it weren't for
295 # the fast (and common) code path of comparing the working directory
298 # the fast (and common) code path of comparing the working directory
296 # with its first parent.
299 # with its first parent.
297 #
300 #
298 # What we're aiming for here is the ability to call:
301 # What we're aiming for here is the ability to call:
299 #
302 #
300 # workingctx.status(parentctx)
303 # workingctx.status(parentctx)
301 #
304 #
302 # If we always built the manifest for each context and compared those,
305 # If we always built the manifest for each context and compared those,
303 # then we'd be done. But the special case of the above call means we
306 # then we'd be done. But the special case of the above call means we
304 # just copy the manifest of the parent.
307 # just copy the manifest of the parent.
305 reversed = False
308 reversed = False
306 if (not isinstance(ctx1, changectx)
309 if (not isinstance(ctx1, changectx)
307 and isinstance(ctx2, changectx)):
310 and isinstance(ctx2, changectx)):
308 reversed = True
311 reversed = True
309 ctx1, ctx2 = ctx2, ctx1
312 ctx1, ctx2 = ctx2, ctx1
310
313
311 match = ctx2._matchstatus(ctx1, match)
314 match = ctx2._matchstatus(ctx1, match)
312 r = scmutil.status([], [], [], [], [], [], [])
315 r = scmutil.status([], [], [], [], [], [], [])
313 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
316 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
314 listunknown)
317 listunknown)
315
318
316 if reversed:
319 if reversed:
317 # Reverse added and removed. Clear deleted, unknown and ignored as
320 # Reverse added and removed. Clear deleted, unknown and ignored as
318 # these make no sense to reverse.
321 # these make no sense to reverse.
319 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
322 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
320 r.clean)
323 r.clean)
321
324
322 if listsubrepos:
325 if listsubrepos:
323 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
326 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
324 rev2 = ctx2.subrev(subpath)
327 rev2 = ctx2.subrev(subpath)
325 try:
328 try:
326 submatch = matchmod.narrowmatcher(subpath, match)
329 submatch = matchmod.narrowmatcher(subpath, match)
327 s = sub.status(rev2, match=submatch, ignored=listignored,
330 s = sub.status(rev2, match=submatch, ignored=listignored,
328 clean=listclean, unknown=listunknown,
331 clean=listclean, unknown=listunknown,
329 listsubrepos=True)
332 listsubrepos=True)
330 for rfiles, sfiles in zip(r, s):
333 for rfiles, sfiles in zip(r, s):
331 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
334 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
332 except error.LookupError:
335 except error.LookupError:
333 self._repo.ui.status(_("skipping missing "
336 self._repo.ui.status(_("skipping missing "
334 "subrepository: %s\n") % subpath)
337 "subrepository: %s\n") % subpath)
335
338
336 for l in r:
339 for l in r:
337 l.sort()
340 l.sort()
338
341
339 return r
342 return r
340
343
341
344
342 def makememctx(repo, parents, text, user, date, branch, files, store,
345 def makememctx(repo, parents, text, user, date, branch, files, store,
343 editor=None, extra=None):
346 editor=None, extra=None):
344 def getfilectx(repo, memctx, path):
347 def getfilectx(repo, memctx, path):
345 data, mode, copied = store.getfile(path)
348 data, mode, copied = store.getfile(path)
346 if data is None:
349 if data is None:
347 return None
350 return None
348 islink, isexec = mode
351 islink, isexec = mode
349 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
352 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
350 copied=copied, memctx=memctx)
353 copied=copied, memctx=memctx)
351 if extra is None:
354 if extra is None:
352 extra = {}
355 extra = {}
353 if branch:
356 if branch:
354 extra['branch'] = encoding.fromlocal(branch)
357 extra['branch'] = encoding.fromlocal(branch)
355 ctx = memctx(repo, parents, text, files, getfilectx, user,
358 ctx = memctx(repo, parents, text, files, getfilectx, user,
356 date, extra, editor)
359 date, extra, editor)
357 return ctx
360 return ctx
358
361
359 class changectx(basectx):
362 class changectx(basectx):
360 """A changecontext object makes access to data related to a particular
363 """A changecontext object makes access to data related to a particular
361 changeset convenient. It represents a read-only context already present in
364 changeset convenient. It represents a read-only context already present in
362 the repo."""
365 the repo."""
363 def __init__(self, repo, changeid=''):
366 def __init__(self, repo, changeid=''):
364 """changeid is a revision number, node, or tag"""
367 """changeid is a revision number, node, or tag"""
365
368
366 # since basectx.__new__ already took care of copying the object, we
369 # since basectx.__new__ already took care of copying the object, we
367 # don't need to do anything in __init__, so we just exit here
370 # don't need to do anything in __init__, so we just exit here
368 if isinstance(changeid, basectx):
371 if isinstance(changeid, basectx):
369 return
372 return
370
373
371 if changeid == '':
374 if changeid == '':
372 changeid = '.'
375 changeid = '.'
373 self._repo = repo
376 self._repo = repo
374
377
375 try:
378 try:
376 if isinstance(changeid, int):
379 if isinstance(changeid, int):
377 self._node = repo.changelog.node(changeid)
380 self._node = repo.changelog.node(changeid)
378 self._rev = changeid
381 self._rev = changeid
379 return
382 return
380 if isinstance(changeid, long):
383 if isinstance(changeid, long):
381 changeid = str(changeid)
384 changeid = str(changeid)
382 if changeid == 'null':
385 if changeid == 'null':
383 self._node = nullid
386 self._node = nullid
384 self._rev = nullrev
387 self._rev = nullrev
385 return
388 return
386 if changeid == 'tip':
389 if changeid == 'tip':
387 self._node = repo.changelog.tip()
390 self._node = repo.changelog.tip()
388 self._rev = repo.changelog.rev(self._node)
391 self._rev = repo.changelog.rev(self._node)
389 return
392 return
390 if changeid == '.' or changeid == repo.dirstate.p1():
393 if changeid == '.' or changeid == repo.dirstate.p1():
391 # this is a hack to delay/avoid loading obsmarkers
394 # this is a hack to delay/avoid loading obsmarkers
392 # when we know that '.' won't be hidden
395 # when we know that '.' won't be hidden
393 self._node = repo.dirstate.p1()
396 self._node = repo.dirstate.p1()
394 self._rev = repo.unfiltered().changelog.rev(self._node)
397 self._rev = repo.unfiltered().changelog.rev(self._node)
395 return
398 return
396 if len(changeid) == 20:
399 if len(changeid) == 20:
397 try:
400 try:
398 self._node = changeid
401 self._node = changeid
399 self._rev = repo.changelog.rev(changeid)
402 self._rev = repo.changelog.rev(changeid)
400 return
403 return
401 except error.FilteredRepoLookupError:
404 except error.FilteredRepoLookupError:
402 raise
405 raise
403 except LookupError:
406 except LookupError:
404 pass
407 pass
405
408
406 try:
409 try:
407 r = int(changeid)
410 r = int(changeid)
408 if str(r) != changeid:
411 if str(r) != changeid:
409 raise ValueError
412 raise ValueError
410 l = len(repo.changelog)
413 l = len(repo.changelog)
411 if r < 0:
414 if r < 0:
412 r += l
415 r += l
413 if r < 0 or r >= l:
416 if r < 0 or r >= l:
414 raise ValueError
417 raise ValueError
415 self._rev = r
418 self._rev = r
416 self._node = repo.changelog.node(r)
419 self._node = repo.changelog.node(r)
417 return
420 return
418 except error.FilteredIndexError:
421 except error.FilteredIndexError:
419 raise
422 raise
420 except (ValueError, OverflowError, IndexError):
423 except (ValueError, OverflowError, IndexError):
421 pass
424 pass
422
425
423 if len(changeid) == 40:
426 if len(changeid) == 40:
424 try:
427 try:
425 self._node = bin(changeid)
428 self._node = bin(changeid)
426 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
427 return
430 return
428 except error.FilteredLookupError:
431 except error.FilteredLookupError:
429 raise
432 raise
430 except (TypeError, LookupError):
433 except (TypeError, LookupError):
431 pass
434 pass
432
435
433 # lookup bookmarks through the name interface
436 # lookup bookmarks through the name interface
434 try:
437 try:
435 self._node = repo.names.singlenode(repo, changeid)
438 self._node = repo.names.singlenode(repo, changeid)
436 self._rev = repo.changelog.rev(self._node)
439 self._rev = repo.changelog.rev(self._node)
437 return
440 return
438 except KeyError:
441 except KeyError:
439 pass
442 pass
440 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
441 raise
444 raise
442 except error.RepoLookupError:
445 except error.RepoLookupError:
443 pass
446 pass
444
447
445 self._node = repo.unfiltered().changelog._partialmatch(changeid)
448 self._node = repo.unfiltered().changelog._partialmatch(changeid)
446 if self._node is not None:
449 if self._node is not None:
447 self._rev = repo.changelog.rev(self._node)
450 self._rev = repo.changelog.rev(self._node)
448 return
451 return
449
452
450 # lookup failed
453 # lookup failed
451 # check if it might have come from damaged dirstate
454 # check if it might have come from damaged dirstate
452 #
455 #
453 # XXX we could avoid the unfiltered if we had a recognizable
456 # XXX we could avoid the unfiltered if we had a recognizable
454 # exception for filtered changeset access
457 # exception for filtered changeset access
455 if changeid in repo.unfiltered().dirstate.parents():
458 if changeid in repo.unfiltered().dirstate.parents():
456 msg = _("working directory has unknown parent '%s'!")
459 msg = _("working directory has unknown parent '%s'!")
457 raise error.Abort(msg % short(changeid))
460 raise error.Abort(msg % short(changeid))
458 try:
461 try:
459 if len(changeid) == 20:
462 if len(changeid) == 20:
460 changeid = hex(changeid)
463 changeid = hex(changeid)
461 except TypeError:
464 except TypeError:
462 pass
465 pass
463 except (error.FilteredIndexError, error.FilteredLookupError,
466 except (error.FilteredIndexError, error.FilteredLookupError,
464 error.FilteredRepoLookupError):
467 error.FilteredRepoLookupError):
465 if repo.filtername.startswith('visible'):
468 if repo.filtername.startswith('visible'):
466 msg = _("hidden revision '%s'") % changeid
469 msg = _("hidden revision '%s'") % changeid
467 hint = _('use --hidden to access hidden revisions')
470 hint = _('use --hidden to access hidden revisions')
468 raise error.FilteredRepoLookupError(msg, hint=hint)
471 raise error.FilteredRepoLookupError(msg, hint=hint)
469 msg = _("filtered revision '%s' (not in '%s' subset)")
472 msg = _("filtered revision '%s' (not in '%s' subset)")
470 msg %= (changeid, repo.filtername)
473 msg %= (changeid, repo.filtername)
471 raise error.FilteredRepoLookupError(msg)
474 raise error.FilteredRepoLookupError(msg)
472 except IndexError:
475 except IndexError:
473 pass
476 pass
474 raise error.RepoLookupError(
477 raise error.RepoLookupError(
475 _("unknown revision '%s'") % changeid)
478 _("unknown revision '%s'") % changeid)
476
479
477 def __hash__(self):
480 def __hash__(self):
478 try:
481 try:
479 return hash(self._rev)
482 return hash(self._rev)
480 except AttributeError:
483 except AttributeError:
481 return id(self)
484 return id(self)
482
485
483 def __nonzero__(self):
486 def __nonzero__(self):
484 return self._rev != nullrev
487 return self._rev != nullrev
485
488
486 @propertycache
489 @propertycache
487 def _changeset(self):
490 def _changeset(self):
488 return self._repo.changelog.read(self.rev())
491 return self._repo.changelog.read(self.rev())
489
492
490 @propertycache
493 @propertycache
491 def _manifest(self):
494 def _manifest(self):
492 return self._repo.manifest.read(self._changeset[0])
495 return self._repo.manifest.read(self._changeset[0])
493
496
494 @propertycache
497 @propertycache
495 def _manifestdelta(self):
498 def _manifestdelta(self):
496 return self._repo.manifest.readdelta(self._changeset[0])
499 return self._repo.manifest.readdelta(self._changeset[0])
497
500
498 @propertycache
501 @propertycache
499 def _parents(self):
502 def _parents(self):
500 p = self._repo.changelog.parentrevs(self._rev)
503 p = self._repo.changelog.parentrevs(self._rev)
501 if p[1] == nullrev:
504 if p[1] == nullrev:
502 p = p[:-1]
505 p = p[:-1]
503 return [changectx(self._repo, x) for x in p]
506 return [changectx(self._repo, x) for x in p]
504
507
505 def changeset(self):
508 def changeset(self):
506 return self._changeset
509 return self._changeset
507 def manifestnode(self):
510 def manifestnode(self):
508 return self._changeset[0]
511 return self._changeset[0]
509
512
510 def user(self):
513 def user(self):
511 return self._changeset[1]
514 return self._changeset[1]
512 def date(self):
515 def date(self):
513 return self._changeset[2]
516 return self._changeset[2]
514 def files(self):
517 def files(self):
515 return self._changeset[3]
518 return self._changeset[3]
516 def description(self):
519 def description(self):
517 return self._changeset[4]
520 return self._changeset[4]
518 def branch(self):
521 def branch(self):
519 return encoding.tolocal(self._changeset[5].get("branch"))
522 return encoding.tolocal(self._changeset[5].get("branch"))
520 def closesbranch(self):
523 def closesbranch(self):
521 return 'close' in self._changeset[5]
524 return 'close' in self._changeset[5]
522 def extra(self):
525 def extra(self):
523 return self._changeset[5]
526 return self._changeset[5]
524 def tags(self):
527 def tags(self):
525 return self._repo.nodetags(self._node)
528 return self._repo.nodetags(self._node)
526 def bookmarks(self):
529 def bookmarks(self):
527 return self._repo.nodebookmarks(self._node)
530 return self._repo.nodebookmarks(self._node)
528 def phase(self):
531 def phase(self):
529 return self._repo._phasecache.phase(self._repo, self._rev)
532 return self._repo._phasecache.phase(self._repo, self._rev)
530 def hidden(self):
533 def hidden(self):
531 return self._rev in repoview.filterrevs(self._repo, 'visible')
534 return self._rev in repoview.filterrevs(self._repo, 'visible')
532
535
533 def children(self):
536 def children(self):
534 """return contexts for each child changeset"""
537 """return contexts for each child changeset"""
535 c = self._repo.changelog.children(self._node)
538 c = self._repo.changelog.children(self._node)
536 return [changectx(self._repo, x) for x in c]
539 return [changectx(self._repo, x) for x in c]
537
540
538 def ancestors(self):
541 def ancestors(self):
539 for a in self._repo.changelog.ancestors([self._rev]):
542 for a in self._repo.changelog.ancestors([self._rev]):
540 yield changectx(self._repo, a)
543 yield changectx(self._repo, a)
541
544
542 def descendants(self):
545 def descendants(self):
543 for d in self._repo.changelog.descendants([self._rev]):
546 for d in self._repo.changelog.descendants([self._rev]):
544 yield changectx(self._repo, d)
547 yield changectx(self._repo, d)
545
548
546 def filectx(self, path, fileid=None, filelog=None):
549 def filectx(self, path, fileid=None, filelog=None):
547 """get a file context from this changeset"""
550 """get a file context from this changeset"""
548 if fileid is None:
551 if fileid is None:
549 fileid = self.filenode(path)
552 fileid = self.filenode(path)
550 return filectx(self._repo, path, fileid=fileid,
553 return filectx(self._repo, path, fileid=fileid,
551 changectx=self, filelog=filelog)
554 changectx=self, filelog=filelog)
552
555
553 def ancestor(self, c2, warn=False):
556 def ancestor(self, c2, warn=False):
554 """return the "best" ancestor context of self and c2
557 """return the "best" ancestor context of self and c2
555
558
556 If there are multiple candidates, it will show a message and check
559 If there are multiple candidates, it will show a message and check
557 merge.preferancestor configuration before falling back to the
560 merge.preferancestor configuration before falling back to the
558 revlog ancestor."""
561 revlog ancestor."""
559 # deal with workingctxs
562 # deal with workingctxs
560 n2 = c2._node
563 n2 = c2._node
561 if n2 is None:
564 if n2 is None:
562 n2 = c2._parents[0]._node
565 n2 = c2._parents[0]._node
563 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
564 if not cahs:
567 if not cahs:
565 anc = nullid
568 anc = nullid
566 elif len(cahs) == 1:
569 elif len(cahs) == 1:
567 anc = cahs[0]
570 anc = cahs[0]
568 else:
571 else:
569 for r in self._repo.ui.configlist('merge', 'preferancestor'):
572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
570 try:
573 try:
571 ctx = changectx(self._repo, r)
574 ctx = changectx(self._repo, r)
572 except error.RepoLookupError:
575 except error.RepoLookupError:
573 continue
576 continue
574 anc = ctx.node()
577 anc = ctx.node()
575 if anc in cahs:
578 if anc in cahs:
576 break
579 break
577 else:
580 else:
578 anc = self._repo.changelog.ancestor(self._node, n2)
581 anc = self._repo.changelog.ancestor(self._node, n2)
579 if warn:
582 if warn:
580 self._repo.ui.status(
583 self._repo.ui.status(
581 (_("note: using %s as ancestor of %s and %s\n") %
584 (_("note: using %s as ancestor of %s and %s\n") %
582 (short(anc), short(self._node), short(n2))) +
585 (short(anc), short(self._node), short(n2))) +
583 ''.join(_(" alternatively, use --config "
586 ''.join(_(" alternatively, use --config "
584 "merge.preferancestor=%s\n") %
587 "merge.preferancestor=%s\n") %
585 short(n) for n in sorted(cahs) if n != anc))
588 short(n) for n in sorted(cahs) if n != anc))
586 return changectx(self._repo, anc)
589 return changectx(self._repo, anc)
587
590
588 def descendant(self, other):
591 def descendant(self, other):
589 """True if other is descendant of this changeset"""
592 """True if other is descendant of this changeset"""
590 return self._repo.changelog.descendant(self._rev, other._rev)
593 return self._repo.changelog.descendant(self._rev, other._rev)
591
594
592 def walk(self, match):
595 def walk(self, match):
593 '''Generates matching file names.'''
596 '''Generates matching file names.'''
594
597
595 # Override match.bad method to have message with nodeid
598 # Override match.bad method to have message with nodeid
596 match = copy.copy(match)
599 match = copy.copy(match)
597 oldbad = match.bad
600 oldbad = match.bad
598 def bad(fn, msg):
601 def bad(fn, msg):
599 # The manifest doesn't know about subrepos, so don't complain about
602 # The manifest doesn't know about subrepos, so don't complain about
600 # paths into valid subrepos.
603 # paths into valid subrepos.
601 if any(fn == s or fn.startswith(s + '/')
604 if any(fn == s or fn.startswith(s + '/')
602 for s in self.substate):
605 for s in self.substate):
603 return
606 return
604 oldbad(fn, _('no such file in rev %s') % self)
607 oldbad(fn, _('no such file in rev %s') % self)
605 match.bad = bad
608 match.bad = bad
606
609
607 return self._manifest.walk(match)
610 return self._manifest.walk(match)
608
611
609 def matches(self, match):
612 def matches(self, match):
610 return self.walk(match)
613 return self.walk(match)
611
614
612 class basefilectx(object):
615 class basefilectx(object):
613 """A filecontext object represents the common logic for its children:
616 """A filecontext object represents the common logic for its children:
614 filectx: read-only access to a filerevision that is already present
617 filectx: read-only access to a filerevision that is already present
615 in the repo,
618 in the repo,
616 workingfilectx: a filecontext that represents files from the working
619 workingfilectx: a filecontext that represents files from the working
617 directory,
620 directory,
618 memfilectx: a filecontext that represents files in-memory."""
621 memfilectx: a filecontext that represents files in-memory."""
619 def __new__(cls, repo, path, *args, **kwargs):
622 def __new__(cls, repo, path, *args, **kwargs):
620 return super(basefilectx, cls).__new__(cls)
623 return super(basefilectx, cls).__new__(cls)
621
624
622 @propertycache
625 @propertycache
623 def _filelog(self):
626 def _filelog(self):
624 return self._repo.file(self._path)
627 return self._repo.file(self._path)
625
628
626 @propertycache
629 @propertycache
627 def _changeid(self):
630 def _changeid(self):
628 if '_changeid' in self.__dict__:
631 if '_changeid' in self.__dict__:
629 return self._changeid
632 return self._changeid
630 elif '_changectx' in self.__dict__:
633 elif '_changectx' in self.__dict__:
631 return self._changectx.rev()
634 return self._changectx.rev()
632 elif '_descendantrev' in self.__dict__:
635 elif '_descendantrev' in self.__dict__:
633 # this file context was created from a revision with a known
636 # this file context was created from a revision with a known
634 # descendant, we can (lazily) correct for linkrev aliases
637 # descendant, we can (lazily) correct for linkrev aliases
635 return self._adjustlinkrev(self._path, self._filelog,
638 return self._adjustlinkrev(self._path, self._filelog,
636 self._filenode, self._descendantrev)
639 self._filenode, self._descendantrev)
637 else:
640 else:
638 return self._filelog.linkrev(self._filerev)
641 return self._filelog.linkrev(self._filerev)
639
642
640 @propertycache
643 @propertycache
641 def _filenode(self):
644 def _filenode(self):
642 if '_fileid' in self.__dict__:
645 if '_fileid' in self.__dict__:
643 return self._filelog.lookup(self._fileid)
646 return self._filelog.lookup(self._fileid)
644 else:
647 else:
645 return self._changectx.filenode(self._path)
648 return self._changectx.filenode(self._path)
646
649
647 @propertycache
650 @propertycache
648 def _filerev(self):
651 def _filerev(self):
649 return self._filelog.rev(self._filenode)
652 return self._filelog.rev(self._filenode)
650
653
651 @propertycache
654 @propertycache
652 def _repopath(self):
655 def _repopath(self):
653 return self._path
656 return self._path
654
657
655 def __nonzero__(self):
658 def __nonzero__(self):
656 try:
659 try:
657 self._filenode
660 self._filenode
658 return True
661 return True
659 except error.LookupError:
662 except error.LookupError:
660 # file is missing
663 # file is missing
661 return False
664 return False
662
665
663 def __str__(self):
666 def __str__(self):
664 return "%s@%s" % (self.path(), self._changectx)
667 return "%s@%s" % (self.path(), self._changectx)
665
668
666 def __repr__(self):
669 def __repr__(self):
667 return "<%s %s>" % (type(self).__name__, str(self))
670 return "<%s %s>" % (type(self).__name__, str(self))
668
671
669 def __hash__(self):
672 def __hash__(self):
670 try:
673 try:
671 return hash((self._path, self._filenode))
674 return hash((self._path, self._filenode))
672 except AttributeError:
675 except AttributeError:
673 return id(self)
676 return id(self)
674
677
675 def __eq__(self, other):
678 def __eq__(self, other):
676 try:
679 try:
677 return (type(self) == type(other) and self._path == other._path
680 return (type(self) == type(other) and self._path == other._path
678 and self._filenode == other._filenode)
681 and self._filenode == other._filenode)
679 except AttributeError:
682 except AttributeError:
680 return False
683 return False
681
684
682 def __ne__(self, other):
685 def __ne__(self, other):
683 return not (self == other)
686 return not (self == other)
684
687
685 def filerev(self):
688 def filerev(self):
686 return self._filerev
689 return self._filerev
687 def filenode(self):
690 def filenode(self):
688 return self._filenode
691 return self._filenode
689 def flags(self):
692 def flags(self):
690 return self._changectx.flags(self._path)
693 return self._changectx.flags(self._path)
691 def filelog(self):
694 def filelog(self):
692 return self._filelog
695 return self._filelog
693 def rev(self):
696 def rev(self):
694 return self._changeid
697 return self._changeid
695 def linkrev(self):
698 def linkrev(self):
696 return self._filelog.linkrev(self._filerev)
699 return self._filelog.linkrev(self._filerev)
697 def node(self):
700 def node(self):
698 return self._changectx.node()
701 return self._changectx.node()
699 def hex(self):
702 def hex(self):
700 return self._changectx.hex()
703 return self._changectx.hex()
701 def user(self):
704 def user(self):
702 return self._changectx.user()
705 return self._changectx.user()
703 def date(self):
706 def date(self):
704 return self._changectx.date()
707 return self._changectx.date()
705 def files(self):
708 def files(self):
706 return self._changectx.files()
709 return self._changectx.files()
707 def description(self):
710 def description(self):
708 return self._changectx.description()
711 return self._changectx.description()
709 def branch(self):
712 def branch(self):
710 return self._changectx.branch()
713 return self._changectx.branch()
711 def extra(self):
714 def extra(self):
712 return self._changectx.extra()
715 return self._changectx.extra()
713 def phase(self):
716 def phase(self):
714 return self._changectx.phase()
717 return self._changectx.phase()
715 def phasestr(self):
718 def phasestr(self):
716 return self._changectx.phasestr()
719 return self._changectx.phasestr()
717 def manifest(self):
720 def manifest(self):
718 return self._changectx.manifest()
721 return self._changectx.manifest()
719 def changectx(self):
722 def changectx(self):
720 return self._changectx
723 return self._changectx
721 def repo(self):
724 def repo(self):
722 return self._repo
725 return self._repo
723
726
724 def path(self):
727 def path(self):
725 return self._path
728 return self._path
726
729
727 def isbinary(self):
730 def isbinary(self):
728 try:
731 try:
729 return util.binary(self.data())
732 return util.binary(self.data())
730 except IOError:
733 except IOError:
731 return False
734 return False
732 def isexec(self):
735 def isexec(self):
733 return 'x' in self.flags()
736 return 'x' in self.flags()
734 def islink(self):
737 def islink(self):
735 return 'l' in self.flags()
738 return 'l' in self.flags()
736
739
737 def cmp(self, fctx):
740 def cmp(self, fctx):
738 """compare with other file context
741 """compare with other file context
739
742
740 returns True if different than fctx.
743 returns True if different than fctx.
741 """
744 """
742 if (fctx._filerev is None
745 if (fctx._filerev is None
743 and (self._repo._encodefilterpats
746 and (self._repo._encodefilterpats
744 # if file data starts with '\1\n', empty metadata block is
747 # if file data starts with '\1\n', empty metadata block is
745 # prepended, which adds 4 bytes to filelog.size().
748 # prepended, which adds 4 bytes to filelog.size().
746 or self.size() - 4 == fctx.size())
749 or self.size() - 4 == fctx.size())
747 or self.size() == fctx.size()):
750 or self.size() == fctx.size()):
748 return self._filelog.cmp(self._filenode, fctx.data())
751 return self._filelog.cmp(self._filenode, fctx.data())
749
752
750 return True
753 return True
751
754
752 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
755 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
753 """return the first ancestor of <srcrev> introducing <fnode>
756 """return the first ancestor of <srcrev> introducing <fnode>
754
757
755 If the linkrev of the file revision does not point to an ancestor of
758 If the linkrev of the file revision does not point to an ancestor of
756 srcrev, we'll walk down the ancestors until we find one introducing
759 srcrev, we'll walk down the ancestors until we find one introducing
757 this file revision.
760 this file revision.
758
761
759 :repo: a localrepository object (used to access changelog and manifest)
762 :repo: a localrepository object (used to access changelog and manifest)
760 :path: the file path
763 :path: the file path
761 :fnode: the nodeid of the file revision
764 :fnode: the nodeid of the file revision
762 :filelog: the filelog of this path
765 :filelog: the filelog of this path
763 :srcrev: the changeset revision we search ancestors from
766 :srcrev: the changeset revision we search ancestors from
764 :inclusive: if true, the src revision will also be checked
767 :inclusive: if true, the src revision will also be checked
765 """
768 """
766 repo = self._repo
769 repo = self._repo
767 cl = repo.unfiltered().changelog
770 cl = repo.unfiltered().changelog
768 ma = repo.manifest
771 ma = repo.manifest
769 # fetch the linkrev
772 # fetch the linkrev
770 fr = filelog.rev(fnode)
773 fr = filelog.rev(fnode)
771 lkr = filelog.linkrev(fr)
774 lkr = filelog.linkrev(fr)
772 # hack to reuse ancestor computation when searching for renames
775 # hack to reuse ancestor computation when searching for renames
773 memberanc = getattr(self, '_ancestrycontext', None)
776 memberanc = getattr(self, '_ancestrycontext', None)
774 iteranc = None
777 iteranc = None
775 if srcrev is None:
778 if srcrev is None:
776 # wctx case, used by workingfilectx during mergecopy
779 # wctx case, used by workingfilectx during mergecopy
777 revs = [p.rev() for p in self._repo[None].parents()]
780 revs = [p.rev() for p in self._repo[None].parents()]
778 inclusive = True # we skipped the real (revless) source
781 inclusive = True # we skipped the real (revless) source
779 else:
782 else:
780 revs = [srcrev]
783 revs = [srcrev]
781 if memberanc is None:
784 if memberanc is None:
782 memberanc = iteranc = cl.ancestors(revs, lkr,
785 memberanc = iteranc = cl.ancestors(revs, lkr,
783 inclusive=inclusive)
786 inclusive=inclusive)
784 # check if this linkrev is an ancestor of srcrev
787 # check if this linkrev is an ancestor of srcrev
785 if lkr not in memberanc:
788 if lkr not in memberanc:
786 if iteranc is None:
789 if iteranc is None:
787 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
790 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
788 for a in iteranc:
791 for a in iteranc:
789 ac = cl.read(a) # get changeset data (we avoid object creation)
792 ac = cl.read(a) # get changeset data (we avoid object creation)
790 if path in ac[3]: # checking the 'files' field.
793 if path in ac[3]: # checking the 'files' field.
791 # The file has been touched, check if the content is
794 # The file has been touched, check if the content is
792 # similar to the one we search for.
795 # similar to the one we search for.
793 if fnode == ma.readfast(ac[0]).get(path):
796 if fnode == ma.readfast(ac[0]).get(path):
794 return a
797 return a
795 # In theory, we should never get out of that loop without a result.
798 # In theory, we should never get out of that loop without a result.
796 # But if manifest uses a buggy file revision (not children of the
799 # But if manifest uses a buggy file revision (not children of the
797 # one it replaces) we could. Such a buggy situation will likely
800 # one it replaces) we could. Such a buggy situation will likely
798 # result is crash somewhere else at to some point.
801 # result is crash somewhere else at to some point.
799 return lkr
802 return lkr
800
803
801 def introrev(self):
804 def introrev(self):
802 """return the rev of the changeset which introduced this file revision
805 """return the rev of the changeset which introduced this file revision
803
806
804 This method is different from linkrev because it take into account the
807 This method is different from linkrev because it take into account the
805 changeset the filectx was created from. It ensures the returned
808 changeset the filectx was created from. It ensures the returned
806 revision is one of its ancestors. This prevents bugs from
809 revision is one of its ancestors. This prevents bugs from
807 'linkrev-shadowing' when a file revision is used by multiple
810 'linkrev-shadowing' when a file revision is used by multiple
808 changesets.
811 changesets.
809 """
812 """
810 lkr = self.linkrev()
813 lkr = self.linkrev()
811 attrs = vars(self)
814 attrs = vars(self)
812 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
815 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
813 if noctx or self.rev() == lkr:
816 if noctx or self.rev() == lkr:
814 return self.linkrev()
817 return self.linkrev()
815 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
818 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
816 self.rev(), inclusive=True)
819 self.rev(), inclusive=True)
817
820
818 def _parentfilectx(self, path, fileid, filelog):
821 def _parentfilectx(self, path, fileid, filelog):
819 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
822 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
820 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
823 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
821 if '_changeid' in vars(self) or '_changectx' in vars(self):
824 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 # If self is associated with a changeset (probably explicitly
825 # If self is associated with a changeset (probably explicitly
823 # fed), ensure the created filectx is associated with a
826 # fed), ensure the created filectx is associated with a
824 # changeset that is an ancestor of self.changectx.
827 # changeset that is an ancestor of self.changectx.
825 # This lets us later use _adjustlinkrev to get a correct link.
828 # This lets us later use _adjustlinkrev to get a correct link.
826 fctx._descendantrev = self.rev()
829 fctx._descendantrev = self.rev()
827 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
830 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
828 elif '_descendantrev' in vars(self):
831 elif '_descendantrev' in vars(self):
829 # Otherwise propagate _descendantrev if we have one associated.
832 # Otherwise propagate _descendantrev if we have one associated.
830 fctx._descendantrev = self._descendantrev
833 fctx._descendantrev = self._descendantrev
831 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
834 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
832 return fctx
835 return fctx
833
836
834 def parents(self):
837 def parents(self):
835 _path = self._path
838 _path = self._path
836 fl = self._filelog
839 fl = self._filelog
837 parents = self._filelog.parents(self._filenode)
840 parents = self._filelog.parents(self._filenode)
838 pl = [(_path, node, fl) for node in parents if node != nullid]
841 pl = [(_path, node, fl) for node in parents if node != nullid]
839
842
840 r = fl.renamed(self._filenode)
843 r = fl.renamed(self._filenode)
841 if r:
844 if r:
842 # - In the simple rename case, both parent are nullid, pl is empty.
845 # - In the simple rename case, both parent are nullid, pl is empty.
843 # - In case of merge, only one of the parent is null id and should
846 # - In case of merge, only one of the parent is null id and should
844 # be replaced with the rename information. This parent is -always-
847 # be replaced with the rename information. This parent is -always-
845 # the first one.
848 # the first one.
846 #
849 #
847 # As null id have always been filtered out in the previous list
850 # As null id have always been filtered out in the previous list
848 # comprehension, inserting to 0 will always result in "replacing
851 # comprehension, inserting to 0 will always result in "replacing
849 # first nullid parent with rename information.
852 # first nullid parent with rename information.
850 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
853 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
851
854
852 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
855 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
853
856
854 def p1(self):
857 def p1(self):
855 return self.parents()[0]
858 return self.parents()[0]
856
859
857 def p2(self):
860 def p2(self):
858 p = self.parents()
861 p = self.parents()
859 if len(p) == 2:
862 if len(p) == 2:
860 return p[1]
863 return p[1]
861 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
864 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
862
865
863 def annotate(self, follow=False, linenumber=None, diffopts=None):
866 def annotate(self, follow=False, linenumber=None, diffopts=None):
864 '''returns a list of tuples of (ctx, line) for each line
867 '''returns a list of tuples of (ctx, line) for each line
865 in the file, where ctx is the filectx of the node where
868 in the file, where ctx is the filectx of the node where
866 that line was last changed.
869 that line was last changed.
867 This returns tuples of ((ctx, linenumber), line) for each line,
870 This returns tuples of ((ctx, linenumber), line) for each line,
868 if "linenumber" parameter is NOT "None".
871 if "linenumber" parameter is NOT "None".
869 In such tuples, linenumber means one at the first appearance
872 In such tuples, linenumber means one at the first appearance
870 in the managed file.
873 in the managed file.
871 To reduce annotation cost,
874 To reduce annotation cost,
872 this returns fixed value(False is used) as linenumber,
875 this returns fixed value(False is used) as linenumber,
873 if "linenumber" parameter is "False".'''
876 if "linenumber" parameter is "False".'''
874
877
875 if linenumber is None:
878 if linenumber is None:
876 def decorate(text, rev):
879 def decorate(text, rev):
877 return ([rev] * len(text.splitlines()), text)
880 return ([rev] * len(text.splitlines()), text)
878 elif linenumber:
881 elif linenumber:
879 def decorate(text, rev):
882 def decorate(text, rev):
880 size = len(text.splitlines())
883 size = len(text.splitlines())
881 return ([(rev, i) for i in xrange(1, size + 1)], text)
884 return ([(rev, i) for i in xrange(1, size + 1)], text)
882 else:
885 else:
883 def decorate(text, rev):
886 def decorate(text, rev):
884 return ([(rev, False)] * len(text.splitlines()), text)
887 return ([(rev, False)] * len(text.splitlines()), text)
885
888
886 def pair(parent, child):
889 def pair(parent, child):
887 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
890 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
888 refine=True)
891 refine=True)
889 for (a1, a2, b1, b2), t in blocks:
892 for (a1, a2, b1, b2), t in blocks:
890 # Changed blocks ('!') or blocks made only of blank lines ('~')
893 # Changed blocks ('!') or blocks made only of blank lines ('~')
891 # belong to the child.
894 # belong to the child.
892 if t == '=':
895 if t == '=':
893 child[0][b1:b2] = parent[0][a1:a2]
896 child[0][b1:b2] = parent[0][a1:a2]
894 return child
897 return child
895
898
896 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
899 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
897
900
898 def parents(f):
901 def parents(f):
899 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
902 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
900 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
903 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
901 # from the topmost introrev (= srcrev) down to p.linkrev() if it
904 # from the topmost introrev (= srcrev) down to p.linkrev() if it
902 # isn't an ancestor of the srcrev.
905 # isn't an ancestor of the srcrev.
903 f._changeid
906 f._changeid
904 pl = f.parents()
907 pl = f.parents()
905
908
906 # Don't return renamed parents if we aren't following.
909 # Don't return renamed parents if we aren't following.
907 if not follow:
910 if not follow:
908 pl = [p for p in pl if p.path() == f.path()]
911 pl = [p for p in pl if p.path() == f.path()]
909
912
910 # renamed filectx won't have a filelog yet, so set it
913 # renamed filectx won't have a filelog yet, so set it
911 # from the cache to save time
914 # from the cache to save time
912 for p in pl:
915 for p in pl:
913 if not '_filelog' in p.__dict__:
916 if not '_filelog' in p.__dict__:
914 p._filelog = getlog(p.path())
917 p._filelog = getlog(p.path())
915
918
916 return pl
919 return pl
917
920
918 # use linkrev to find the first changeset where self appeared
921 # use linkrev to find the first changeset where self appeared
919 base = self
922 base = self
920 introrev = self.introrev()
923 introrev = self.introrev()
921 if self.rev() != introrev:
924 if self.rev() != introrev:
922 base = self.filectx(self.filenode(), changeid=introrev)
925 base = self.filectx(self.filenode(), changeid=introrev)
923 if getattr(base, '_ancestrycontext', None) is None:
926 if getattr(base, '_ancestrycontext', None) is None:
924 cl = self._repo.changelog
927 cl = self._repo.changelog
925 if introrev is None:
928 if introrev is None:
926 # wctx is not inclusive, but works because _ancestrycontext
929 # wctx is not inclusive, but works because _ancestrycontext
927 # is used to test filelog revisions
930 # is used to test filelog revisions
928 ac = cl.ancestors([p.rev() for p in base.parents()],
931 ac = cl.ancestors([p.rev() for p in base.parents()],
929 inclusive=True)
932 inclusive=True)
930 else:
933 else:
931 ac = cl.ancestors([introrev], inclusive=True)
934 ac = cl.ancestors([introrev], inclusive=True)
932 base._ancestrycontext = ac
935 base._ancestrycontext = ac
933
936
934 # This algorithm would prefer to be recursive, but Python is a
937 # This algorithm would prefer to be recursive, but Python is a
935 # bit recursion-hostile. Instead we do an iterative
938 # bit recursion-hostile. Instead we do an iterative
936 # depth-first search.
939 # depth-first search.
937
940
938 visit = [base]
941 visit = [base]
939 hist = {}
942 hist = {}
940 pcache = {}
943 pcache = {}
941 needed = {base: 1}
944 needed = {base: 1}
942 while visit:
945 while visit:
943 f = visit[-1]
946 f = visit[-1]
944 pcached = f in pcache
947 pcached = f in pcache
945 if not pcached:
948 if not pcached:
946 pcache[f] = parents(f)
949 pcache[f] = parents(f)
947
950
948 ready = True
951 ready = True
949 pl = pcache[f]
952 pl = pcache[f]
950 for p in pl:
953 for p in pl:
951 if p not in hist:
954 if p not in hist:
952 ready = False
955 ready = False
953 visit.append(p)
956 visit.append(p)
954 if not pcached:
957 if not pcached:
955 needed[p] = needed.get(p, 0) + 1
958 needed[p] = needed.get(p, 0) + 1
956 if ready:
959 if ready:
957 visit.pop()
960 visit.pop()
958 reusable = f in hist
961 reusable = f in hist
959 if reusable:
962 if reusable:
960 curr = hist[f]
963 curr = hist[f]
961 else:
964 else:
962 curr = decorate(f.data(), f)
965 curr = decorate(f.data(), f)
963 for p in pl:
966 for p in pl:
964 if not reusable:
967 if not reusable:
965 curr = pair(hist[p], curr)
968 curr = pair(hist[p], curr)
966 if needed[p] == 1:
969 if needed[p] == 1:
967 del hist[p]
970 del hist[p]
968 del needed[p]
971 del needed[p]
969 else:
972 else:
970 needed[p] -= 1
973 needed[p] -= 1
971
974
972 hist[f] = curr
975 hist[f] = curr
973 pcache[f] = []
976 pcache[f] = []
974
977
975 return zip(hist[base][0], hist[base][1].splitlines(True))
978 return zip(hist[base][0], hist[base][1].splitlines(True))
976
979
977 def ancestors(self, followfirst=False):
980 def ancestors(self, followfirst=False):
978 visit = {}
981 visit = {}
979 c = self
982 c = self
980 if followfirst:
983 if followfirst:
981 cut = 1
984 cut = 1
982 else:
985 else:
983 cut = None
986 cut = None
984
987
985 while True:
988 while True:
986 for parent in c.parents()[:cut]:
989 for parent in c.parents()[:cut]:
987 visit[(parent.linkrev(), parent.filenode())] = parent
990 visit[(parent.linkrev(), parent.filenode())] = parent
988 if not visit:
991 if not visit:
989 break
992 break
990 c = visit.pop(max(visit))
993 c = visit.pop(max(visit))
991 yield c
994 yield c
992
995
993 class filectx(basefilectx):
996 class filectx(basefilectx):
994 """A filecontext object makes access to data related to a particular
997 """A filecontext object makes access to data related to a particular
995 filerevision convenient."""
998 filerevision convenient."""
996 def __init__(self, repo, path, changeid=None, fileid=None,
999 def __init__(self, repo, path, changeid=None, fileid=None,
997 filelog=None, changectx=None):
1000 filelog=None, changectx=None):
998 """changeid can be a changeset revision, node, or tag.
1001 """changeid can be a changeset revision, node, or tag.
999 fileid can be a file revision or node."""
1002 fileid can be a file revision or node."""
1000 self._repo = repo
1003 self._repo = repo
1001 self._path = path
1004 self._path = path
1002
1005
1003 assert (changeid is not None
1006 assert (changeid is not None
1004 or fileid is not None
1007 or fileid is not None
1005 or changectx is not None), \
1008 or changectx is not None), \
1006 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1009 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1007 % (changeid, fileid, changectx))
1010 % (changeid, fileid, changectx))
1008
1011
1009 if filelog is not None:
1012 if filelog is not None:
1010 self._filelog = filelog
1013 self._filelog = filelog
1011
1014
1012 if changeid is not None:
1015 if changeid is not None:
1013 self._changeid = changeid
1016 self._changeid = changeid
1014 if changectx is not None:
1017 if changectx is not None:
1015 self._changectx = changectx
1018 self._changectx = changectx
1016 if fileid is not None:
1019 if fileid is not None:
1017 self._fileid = fileid
1020 self._fileid = fileid
1018
1021
1019 @propertycache
1022 @propertycache
1020 def _changectx(self):
1023 def _changectx(self):
1021 try:
1024 try:
1022 return changectx(self._repo, self._changeid)
1025 return changectx(self._repo, self._changeid)
1023 except error.FilteredRepoLookupError:
1026 except error.FilteredRepoLookupError:
1024 # Linkrev may point to any revision in the repository. When the
1027 # Linkrev may point to any revision in the repository. When the
1025 # repository is filtered this may lead to `filectx` trying to build
1028 # repository is filtered this may lead to `filectx` trying to build
1026 # `changectx` for filtered revision. In such case we fallback to
1029 # `changectx` for filtered revision. In such case we fallback to
1027 # creating `changectx` on the unfiltered version of the reposition.
1030 # creating `changectx` on the unfiltered version of the reposition.
1028 # This fallback should not be an issue because `changectx` from
1031 # This fallback should not be an issue because `changectx` from
1029 # `filectx` are not used in complex operations that care about
1032 # `filectx` are not used in complex operations that care about
1030 # filtering.
1033 # filtering.
1031 #
1034 #
1032 # This fallback is a cheap and dirty fix that prevent several
1035 # This fallback is a cheap and dirty fix that prevent several
1033 # crashes. It does not ensure the behavior is correct. However the
1036 # crashes. It does not ensure the behavior is correct. However the
1034 # behavior was not correct before filtering either and "incorrect
1037 # behavior was not correct before filtering either and "incorrect
1035 # behavior" is seen as better as "crash"
1038 # behavior" is seen as better as "crash"
1036 #
1039 #
1037 # Linkrevs have several serious troubles with filtering that are
1040 # Linkrevs have several serious troubles with filtering that are
1038 # complicated to solve. Proper handling of the issue here should be
1041 # complicated to solve. Proper handling of the issue here should be
1039 # considered when solving linkrev issue are on the table.
1042 # considered when solving linkrev issue are on the table.
1040 return changectx(self._repo.unfiltered(), self._changeid)
1043 return changectx(self._repo.unfiltered(), self._changeid)
1041
1044
1042 def filectx(self, fileid, changeid=None):
1045 def filectx(self, fileid, changeid=None):
1043 '''opens an arbitrary revision of the file without
1046 '''opens an arbitrary revision of the file without
1044 opening a new filelog'''
1047 opening a new filelog'''
1045 return filectx(self._repo, self._path, fileid=fileid,
1048 return filectx(self._repo, self._path, fileid=fileid,
1046 filelog=self._filelog, changeid=changeid)
1049 filelog=self._filelog, changeid=changeid)
1047
1050
1048 def data(self):
1051 def data(self):
1049 try:
1052 try:
1050 return self._filelog.read(self._filenode)
1053 return self._filelog.read(self._filenode)
1051 except error.CensoredNodeError:
1054 except error.CensoredNodeError:
1052 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1055 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1053 return ""
1056 return ""
1054 raise util.Abort(_("censored node: %s") % short(self._filenode),
1057 raise util.Abort(_("censored node: %s") % short(self._filenode),
1055 hint=_("set censor.policy to ignore errors"))
1058 hint=_("set censor.policy to ignore errors"))
1056
1059
1057 def size(self):
1060 def size(self):
1058 return self._filelog.size(self._filerev)
1061 return self._filelog.size(self._filerev)
1059
1062
1060 def renamed(self):
1063 def renamed(self):
1061 """check if file was actually renamed in this changeset revision
1064 """check if file was actually renamed in this changeset revision
1062
1065
1063 If rename logged in file revision, we report copy for changeset only
1066 If rename logged in file revision, we report copy for changeset only
1064 if file revisions linkrev points back to the changeset in question
1067 if file revisions linkrev points back to the changeset in question
1065 or both changeset parents contain different file revisions.
1068 or both changeset parents contain different file revisions.
1066 """
1069 """
1067
1070
1068 renamed = self._filelog.renamed(self._filenode)
1071 renamed = self._filelog.renamed(self._filenode)
1069 if not renamed:
1072 if not renamed:
1070 return renamed
1073 return renamed
1071
1074
1072 if self.rev() == self.linkrev():
1075 if self.rev() == self.linkrev():
1073 return renamed
1076 return renamed
1074
1077
1075 name = self.path()
1078 name = self.path()
1076 fnode = self._filenode
1079 fnode = self._filenode
1077 for p in self._changectx.parents():
1080 for p in self._changectx.parents():
1078 try:
1081 try:
1079 if fnode == p.filenode(name):
1082 if fnode == p.filenode(name):
1080 return None
1083 return None
1081 except error.LookupError:
1084 except error.LookupError:
1082 pass
1085 pass
1083 return renamed
1086 return renamed
1084
1087
1085 def children(self):
1088 def children(self):
1086 # hard for renames
1089 # hard for renames
1087 c = self._filelog.children(self._filenode)
1090 c = self._filelog.children(self._filenode)
1088 return [filectx(self._repo, self._path, fileid=x,
1091 return [filectx(self._repo, self._path, fileid=x,
1089 filelog=self._filelog) for x in c]
1092 filelog=self._filelog) for x in c]
1090
1093
1091 class committablectx(basectx):
1094 class committablectx(basectx):
1092 """A committablectx object provides common functionality for a context that
1095 """A committablectx object provides common functionality for a context that
1093 wants the ability to commit, e.g. workingctx or memctx."""
1096 wants the ability to commit, e.g. workingctx or memctx."""
1094 def __init__(self, repo, text="", user=None, date=None, extra=None,
1097 def __init__(self, repo, text="", user=None, date=None, extra=None,
1095 changes=None):
1098 changes=None):
1096 self._repo = repo
1099 self._repo = repo
1097 self._rev = None
1100 self._rev = None
1098 self._node = None
1101 self._node = None
1099 self._text = text
1102 self._text = text
1100 if date:
1103 if date:
1101 self._date = util.parsedate(date)
1104 self._date = util.parsedate(date)
1102 if user:
1105 if user:
1103 self._user = user
1106 self._user = user
1104 if changes:
1107 if changes:
1105 self._status = changes
1108 self._status = changes
1106
1109
1107 self._extra = {}
1110 self._extra = {}
1108 if extra:
1111 if extra:
1109 self._extra = extra.copy()
1112 self._extra = extra.copy()
1110 if 'branch' not in self._extra:
1113 if 'branch' not in self._extra:
1111 try:
1114 try:
1112 branch = encoding.fromlocal(self._repo.dirstate.branch())
1115 branch = encoding.fromlocal(self._repo.dirstate.branch())
1113 except UnicodeDecodeError:
1116 except UnicodeDecodeError:
1114 raise util.Abort(_('branch name not in UTF-8!'))
1117 raise util.Abort(_('branch name not in UTF-8!'))
1115 self._extra['branch'] = branch
1118 self._extra['branch'] = branch
1116 if self._extra['branch'] == '':
1119 if self._extra['branch'] == '':
1117 self._extra['branch'] = 'default'
1120 self._extra['branch'] = 'default'
1118
1121
1119 def __str__(self):
1122 def __str__(self):
1120 return str(self._parents[0]) + "+"
1123 return str(self._parents[0]) + "+"
1121
1124
1122 def __nonzero__(self):
1125 def __nonzero__(self):
1123 return True
1126 return True
1124
1127
1125 def _buildflagfunc(self):
1128 def _buildflagfunc(self):
1126 # Create a fallback function for getting file flags when the
1129 # Create a fallback function for getting file flags when the
1127 # filesystem doesn't support them
1130 # filesystem doesn't support them
1128
1131
1129 copiesget = self._repo.dirstate.copies().get
1132 copiesget = self._repo.dirstate.copies().get
1130
1133
1131 if len(self._parents) < 2:
1134 if len(self._parents) < 2:
1132 # when we have one parent, it's easy: copy from parent
1135 # when we have one parent, it's easy: copy from parent
1133 man = self._parents[0].manifest()
1136 man = self._parents[0].manifest()
1134 def func(f):
1137 def func(f):
1135 f = copiesget(f, f)
1138 f = copiesget(f, f)
1136 return man.flags(f)
1139 return man.flags(f)
1137 else:
1140 else:
1138 # merges are tricky: we try to reconstruct the unstored
1141 # merges are tricky: we try to reconstruct the unstored
1139 # result from the merge (issue1802)
1142 # result from the merge (issue1802)
1140 p1, p2 = self._parents
1143 p1, p2 = self._parents
1141 pa = p1.ancestor(p2)
1144 pa = p1.ancestor(p2)
1142 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1145 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1143
1146
1144 def func(f):
1147 def func(f):
1145 f = copiesget(f, f) # may be wrong for merges with copies
1148 f = copiesget(f, f) # may be wrong for merges with copies
1146 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1149 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1147 if fl1 == fl2:
1150 if fl1 == fl2:
1148 return fl1
1151 return fl1
1149 if fl1 == fla:
1152 if fl1 == fla:
1150 return fl2
1153 return fl2
1151 if fl2 == fla:
1154 if fl2 == fla:
1152 return fl1
1155 return fl1
1153 return '' # punt for conflicts
1156 return '' # punt for conflicts
1154
1157
1155 return func
1158 return func
1156
1159
1157 @propertycache
1160 @propertycache
1158 def _flagfunc(self):
1161 def _flagfunc(self):
1159 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1162 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1160
1163
1161 @propertycache
1164 @propertycache
1162 def _manifest(self):
1165 def _manifest(self):
1163 """generate a manifest corresponding to the values in self._status
1166 """generate a manifest corresponding to the values in self._status
1164
1167
1165 This reuse the file nodeid from parent, but we append an extra letter
1168 This reuse the file nodeid from parent, but we append an extra letter
1166 when modified. Modified files get an extra 'm' while added files get
1169 when modified. Modified files get an extra 'm' while added files get
1167 an extra 'a'. This is used by manifests merge to see that files
1170 an extra 'a'. This is used by manifests merge to see that files
1168 are different and by update logic to avoid deleting newly added files.
1171 are different and by update logic to avoid deleting newly added files.
1169 """
1172 """
1170
1173
1171 man1 = self._parents[0].manifest()
1174 man1 = self._parents[0].manifest()
1172 man = man1.copy()
1175 man = man1.copy()
1173 if len(self._parents) > 1:
1176 if len(self._parents) > 1:
1174 man2 = self.p2().manifest()
1177 man2 = self.p2().manifest()
1175 def getman(f):
1178 def getman(f):
1176 if f in man1:
1179 if f in man1:
1177 return man1
1180 return man1
1178 return man2
1181 return man2
1179 else:
1182 else:
1180 getman = lambda f: man1
1183 getman = lambda f: man1
1181
1184
1182 copied = self._repo.dirstate.copies()
1185 copied = self._repo.dirstate.copies()
1183 ff = self._flagfunc
1186 ff = self._flagfunc
1184 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1187 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1185 for f in l:
1188 for f in l:
1186 orig = copied.get(f, f)
1189 orig = copied.get(f, f)
1187 man[f] = getman(orig).get(orig, nullid) + i
1190 man[f] = getman(orig).get(orig, nullid) + i
1188 try:
1191 try:
1189 man.setflag(f, ff(f))
1192 man.setflag(f, ff(f))
1190 except OSError:
1193 except OSError:
1191 pass
1194 pass
1192
1195
1193 for f in self._status.deleted + self._status.removed:
1196 for f in self._status.deleted + self._status.removed:
1194 if f in man:
1197 if f in man:
1195 del man[f]
1198 del man[f]
1196
1199
1197 return man
1200 return man
1198
1201
1199 @propertycache
1202 @propertycache
1200 def _status(self):
1203 def _status(self):
1201 return self._repo.status()
1204 return self._repo.status()
1202
1205
1203 @propertycache
1206 @propertycache
1204 def _user(self):
1207 def _user(self):
1205 return self._repo.ui.username()
1208 return self._repo.ui.username()
1206
1209
1207 @propertycache
1210 @propertycache
1208 def _date(self):
1211 def _date(self):
1209 return util.makedate()
1212 return util.makedate()
1210
1213
1211 def subrev(self, subpath):
1214 def subrev(self, subpath):
1212 return None
1215 return None
1213
1216
1214 def manifestnode(self):
1217 def manifestnode(self):
1215 return None
1218 return None
1216 def user(self):
1219 def user(self):
1217 return self._user or self._repo.ui.username()
1220 return self._user or self._repo.ui.username()
1218 def date(self):
1221 def date(self):
1219 return self._date
1222 return self._date
1220 def description(self):
1223 def description(self):
1221 return self._text
1224 return self._text
1222 def files(self):
1225 def files(self):
1223 return sorted(self._status.modified + self._status.added +
1226 return sorted(self._status.modified + self._status.added +
1224 self._status.removed)
1227 self._status.removed)
1225
1228
1226 def modified(self):
1229 def modified(self):
1227 return self._status.modified
1230 return self._status.modified
1228 def added(self):
1231 def added(self):
1229 return self._status.added
1232 return self._status.added
1230 def removed(self):
1233 def removed(self):
1231 return self._status.removed
1234 return self._status.removed
1232 def deleted(self):
1235 def deleted(self):
1233 return self._status.deleted
1236 return self._status.deleted
1234 def branch(self):
1237 def branch(self):
1235 return encoding.tolocal(self._extra['branch'])
1238 return encoding.tolocal(self._extra['branch'])
1236 def closesbranch(self):
1239 def closesbranch(self):
1237 return 'close' in self._extra
1240 return 'close' in self._extra
1238 def extra(self):
1241 def extra(self):
1239 return self._extra
1242 return self._extra
1240
1243
1241 def tags(self):
1244 def tags(self):
1242 t = []
1245 t = []
1243 for p in self.parents():
1246 for p in self.parents():
1244 t.extend(p.tags())
1247 t.extend(p.tags())
1245 return t
1248 return t
1246
1249
1247 def bookmarks(self):
1250 def bookmarks(self):
1248 b = []
1251 b = []
1249 for p in self.parents():
1252 for p in self.parents():
1250 b.extend(p.bookmarks())
1253 b.extend(p.bookmarks())
1251 return b
1254 return b
1252
1255
1253 def phase(self):
1256 def phase(self):
1254 phase = phases.draft # default phase to draft
1257 phase = phases.draft # default phase to draft
1255 for p in self.parents():
1258 for p in self.parents():
1256 phase = max(phase, p.phase())
1259 phase = max(phase, p.phase())
1257 return phase
1260 return phase
1258
1261
1259 def hidden(self):
1262 def hidden(self):
1260 return False
1263 return False
1261
1264
1262 def children(self):
1265 def children(self):
1263 return []
1266 return []
1264
1267
1265 def flags(self, path):
1268 def flags(self, path):
1266 if '_manifest' in self.__dict__:
1269 if '_manifest' in self.__dict__:
1267 try:
1270 try:
1268 return self._manifest.flags(path)
1271 return self._manifest.flags(path)
1269 except KeyError:
1272 except KeyError:
1270 return ''
1273 return ''
1271
1274
1272 try:
1275 try:
1273 return self._flagfunc(path)
1276 return self._flagfunc(path)
1274 except OSError:
1277 except OSError:
1275 return ''
1278 return ''
1276
1279
1277 def ancestor(self, c2):
1280 def ancestor(self, c2):
1278 """return the "best" ancestor context of self and c2"""
1281 """return the "best" ancestor context of self and c2"""
1279 return self._parents[0].ancestor(c2) # punt on two parents for now
1282 return self._parents[0].ancestor(c2) # punt on two parents for now
1280
1283
1281 def walk(self, match):
1284 def walk(self, match):
1282 '''Generates matching file names.'''
1285 '''Generates matching file names.'''
1283 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1286 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1284 True, False))
1287 True, False))
1285
1288
1286 def matches(self, match):
1289 def matches(self, match):
1287 return sorted(self._repo.dirstate.matches(match))
1290 return sorted(self._repo.dirstate.matches(match))
1288
1291
1289 def ancestors(self):
1292 def ancestors(self):
1290 for p in self._parents:
1293 for p in self._parents:
1291 yield p
1294 yield p
1292 for a in self._repo.changelog.ancestors(
1295 for a in self._repo.changelog.ancestors(
1293 [p.rev() for p in self._parents]):
1296 [p.rev() for p in self._parents]):
1294 yield changectx(self._repo, a)
1297 yield changectx(self._repo, a)
1295
1298
1296 def markcommitted(self, node):
1299 def markcommitted(self, node):
1297 """Perform post-commit cleanup necessary after committing this ctx
1300 """Perform post-commit cleanup necessary after committing this ctx
1298
1301
1299 Specifically, this updates backing stores this working context
1302 Specifically, this updates backing stores this working context
1300 wraps to reflect the fact that the changes reflected by this
1303 wraps to reflect the fact that the changes reflected by this
1301 workingctx have been committed. For example, it marks
1304 workingctx have been committed. For example, it marks
1302 modified and added files as normal in the dirstate.
1305 modified and added files as normal in the dirstate.
1303
1306
1304 """
1307 """
1305
1308
1306 self._repo.dirstate.beginparentchange()
1309 self._repo.dirstate.beginparentchange()
1307 for f in self.modified() + self.added():
1310 for f in self.modified() + self.added():
1308 self._repo.dirstate.normal(f)
1311 self._repo.dirstate.normal(f)
1309 for f in self.removed():
1312 for f in self.removed():
1310 self._repo.dirstate.drop(f)
1313 self._repo.dirstate.drop(f)
1311 self._repo.dirstate.setparents(node)
1314 self._repo.dirstate.setparents(node)
1312 self._repo.dirstate.endparentchange()
1315 self._repo.dirstate.endparentchange()
1313
1316
1314 class workingctx(committablectx):
1317 class workingctx(committablectx):
1315 """A workingctx object makes access to data related to
1318 """A workingctx object makes access to data related to
1316 the current working directory convenient.
1319 the current working directory convenient.
1317 date - any valid date string or (unixtime, offset), or None.
1320 date - any valid date string or (unixtime, offset), or None.
1318 user - username string, or None.
1321 user - username string, or None.
1319 extra - a dictionary of extra values, or None.
1322 extra - a dictionary of extra values, or None.
1320 changes - a list of file lists as returned by localrepo.status()
1323 changes - a list of file lists as returned by localrepo.status()
1321 or None to use the repository status.
1324 or None to use the repository status.
1322 """
1325 """
1323 def __init__(self, repo, text="", user=None, date=None, extra=None,
1326 def __init__(self, repo, text="", user=None, date=None, extra=None,
1324 changes=None):
1327 changes=None):
1325 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1328 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1326
1329
1327 def __iter__(self):
1330 def __iter__(self):
1328 d = self._repo.dirstate
1331 d = self._repo.dirstate
1329 for f in d:
1332 for f in d:
1330 if d[f] != 'r':
1333 if d[f] != 'r':
1331 yield f
1334 yield f
1332
1335
1333 def __contains__(self, key):
1336 def __contains__(self, key):
1334 return self._repo.dirstate[key] not in "?r"
1337 return self._repo.dirstate[key] not in "?r"
1335
1338
1336 @propertycache
1339 @propertycache
1337 def _parents(self):
1340 def _parents(self):
1338 p = self._repo.dirstate.parents()
1341 p = self._repo.dirstate.parents()
1339 if p[1] == nullid:
1342 if p[1] == nullid:
1340 p = p[:-1]
1343 p = p[:-1]
1341 return [changectx(self._repo, x) for x in p]
1344 return [changectx(self._repo, x) for x in p]
1342
1345
1343 def filectx(self, path, filelog=None):
1346 def filectx(self, path, filelog=None):
1344 """get a file context from the working directory"""
1347 """get a file context from the working directory"""
1345 return workingfilectx(self._repo, path, workingctx=self,
1348 return workingfilectx(self._repo, path, workingctx=self,
1346 filelog=filelog)
1349 filelog=filelog)
1347
1350
1348 def dirty(self, missing=False, merge=True, branch=True):
1351 def dirty(self, missing=False, merge=True, branch=True):
1349 "check whether a working directory is modified"
1352 "check whether a working directory is modified"
1350 # check subrepos first
1353 # check subrepos first
1351 for s in sorted(self.substate):
1354 for s in sorted(self.substate):
1352 if self.sub(s).dirty():
1355 if self.sub(s).dirty():
1353 return True
1356 return True
1354 # check current working dir
1357 # check current working dir
1355 return ((merge and self.p2()) or
1358 return ((merge and self.p2()) or
1356 (branch and self.branch() != self.p1().branch()) or
1359 (branch and self.branch() != self.p1().branch()) or
1357 self.modified() or self.added() or self.removed() or
1360 self.modified() or self.added() or self.removed() or
1358 (missing and self.deleted()))
1361 (missing and self.deleted()))
1359
1362
1360 def add(self, list, prefix=""):
1363 def add(self, list, prefix=""):
1361 join = lambda f: os.path.join(prefix, f)
1364 join = lambda f: os.path.join(prefix, f)
1362 wlock = self._repo.wlock()
1365 wlock = self._repo.wlock()
1363 ui, ds = self._repo.ui, self._repo.dirstate
1366 ui, ds = self._repo.ui, self._repo.dirstate
1364 try:
1367 try:
1365 rejected = []
1368 rejected = []
1366 lstat = self._repo.wvfs.lstat
1369 lstat = self._repo.wvfs.lstat
1367 for f in list:
1370 for f in list:
1368 scmutil.checkportable(ui, join(f))
1371 scmutil.checkportable(ui, join(f))
1369 try:
1372 try:
1370 st = lstat(f)
1373 st = lstat(f)
1371 except OSError:
1374 except OSError:
1372 ui.warn(_("%s does not exist!\n") % join(f))
1375 ui.warn(_("%s does not exist!\n") % join(f))
1373 rejected.append(f)
1376 rejected.append(f)
1374 continue
1377 continue
1375 if st.st_size > 10000000:
1378 if st.st_size > 10000000:
1376 ui.warn(_("%s: up to %d MB of RAM may be required "
1379 ui.warn(_("%s: up to %d MB of RAM may be required "
1377 "to manage this file\n"
1380 "to manage this file\n"
1378 "(use 'hg revert %s' to cancel the "
1381 "(use 'hg revert %s' to cancel the "
1379 "pending addition)\n")
1382 "pending addition)\n")
1380 % (f, 3 * st.st_size // 1000000, join(f)))
1383 % (f, 3 * st.st_size // 1000000, join(f)))
1381 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1384 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1382 ui.warn(_("%s not added: only files and symlinks "
1385 ui.warn(_("%s not added: only files and symlinks "
1383 "supported currently\n") % join(f))
1386 "supported currently\n") % join(f))
1384 rejected.append(f)
1387 rejected.append(f)
1385 elif ds[f] in 'amn':
1388 elif ds[f] in 'amn':
1386 ui.warn(_("%s already tracked!\n") % join(f))
1389 ui.warn(_("%s already tracked!\n") % join(f))
1387 elif ds[f] == 'r':
1390 elif ds[f] == 'r':
1388 ds.normallookup(f)
1391 ds.normallookup(f)
1389 else:
1392 else:
1390 ds.add(f)
1393 ds.add(f)
1391 return rejected
1394 return rejected
1392 finally:
1395 finally:
1393 wlock.release()
1396 wlock.release()
1394
1397
1395 def forget(self, files, prefix=""):
1398 def forget(self, files, prefix=""):
1396 join = lambda f: os.path.join(prefix, f)
1399 join = lambda f: os.path.join(prefix, f)
1397 wlock = self._repo.wlock()
1400 wlock = self._repo.wlock()
1398 try:
1401 try:
1399 rejected = []
1402 rejected = []
1400 for f in files:
1403 for f in files:
1401 if f not in self._repo.dirstate:
1404 if f not in self._repo.dirstate:
1402 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1405 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1403 rejected.append(f)
1406 rejected.append(f)
1404 elif self._repo.dirstate[f] != 'a':
1407 elif self._repo.dirstate[f] != 'a':
1405 self._repo.dirstate.remove(f)
1408 self._repo.dirstate.remove(f)
1406 else:
1409 else:
1407 self._repo.dirstate.drop(f)
1410 self._repo.dirstate.drop(f)
1408 return rejected
1411 return rejected
1409 finally:
1412 finally:
1410 wlock.release()
1413 wlock.release()
1411
1414
1412 def undelete(self, list):
1415 def undelete(self, list):
1413 pctxs = self.parents()
1416 pctxs = self.parents()
1414 wlock = self._repo.wlock()
1417 wlock = self._repo.wlock()
1415 try:
1418 try:
1416 for f in list:
1419 for f in list:
1417 if self._repo.dirstate[f] != 'r':
1420 if self._repo.dirstate[f] != 'r':
1418 self._repo.ui.warn(_("%s not removed!\n") % f)
1421 self._repo.ui.warn(_("%s not removed!\n") % f)
1419 else:
1422 else:
1420 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1423 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1421 t = fctx.data()
1424 t = fctx.data()
1422 self._repo.wwrite(f, t, fctx.flags())
1425 self._repo.wwrite(f, t, fctx.flags())
1423 self._repo.dirstate.normal(f)
1426 self._repo.dirstate.normal(f)
1424 finally:
1427 finally:
1425 wlock.release()
1428 wlock.release()
1426
1429
1427 def copy(self, source, dest):
1430 def copy(self, source, dest):
1428 try:
1431 try:
1429 st = self._repo.wvfs.lstat(dest)
1432 st = self._repo.wvfs.lstat(dest)
1430 except OSError, err:
1433 except OSError, err:
1431 if err.errno != errno.ENOENT:
1434 if err.errno != errno.ENOENT:
1432 raise
1435 raise
1433 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1436 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1434 return
1437 return
1435 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1438 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1436 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1439 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1437 "symbolic link\n") % dest)
1440 "symbolic link\n") % dest)
1438 else:
1441 else:
1439 wlock = self._repo.wlock()
1442 wlock = self._repo.wlock()
1440 try:
1443 try:
1441 if self._repo.dirstate[dest] in '?':
1444 if self._repo.dirstate[dest] in '?':
1442 self._repo.dirstate.add(dest)
1445 self._repo.dirstate.add(dest)
1443 elif self._repo.dirstate[dest] in 'r':
1446 elif self._repo.dirstate[dest] in 'r':
1444 self._repo.dirstate.normallookup(dest)
1447 self._repo.dirstate.normallookup(dest)
1445 self._repo.dirstate.copy(source, dest)
1448 self._repo.dirstate.copy(source, dest)
1446 finally:
1449 finally:
1447 wlock.release()
1450 wlock.release()
1448
1451
1449 def match(self, pats=[], include=None, exclude=None, default='glob',
1452 def match(self, pats=[], include=None, exclude=None, default='glob',
1450 listsubrepos=False):
1453 listsubrepos=False):
1451 r = self._repo
1454 r = self._repo
1452
1455
1453 # Only a case insensitive filesystem needs magic to translate user input
1456 # Only a case insensitive filesystem needs magic to translate user input
1454 # to actual case in the filesystem.
1457 # to actual case in the filesystem.
1455 if not util.checkcase(r.root):
1458 if not util.checkcase(r.root):
1456 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1459 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1457 exclude, default, r.auditor, self,
1460 exclude, default, r.auditor, self,
1458 listsubrepos=listsubrepos)
1461 listsubrepos=listsubrepos)
1459 return matchmod.match(r.root, r.getcwd(), pats,
1462 return matchmod.match(r.root, r.getcwd(), pats,
1460 include, exclude, default,
1463 include, exclude, default,
1461 auditor=r.auditor, ctx=self,
1464 auditor=r.auditor, ctx=self,
1462 listsubrepos=listsubrepos)
1465 listsubrepos=listsubrepos)
1463
1466
1464 def _filtersuspectsymlink(self, files):
1467 def _filtersuspectsymlink(self, files):
1465 if not files or self._repo.dirstate._checklink:
1468 if not files or self._repo.dirstate._checklink:
1466 return files
1469 return files
1467
1470
1468 # Symlink placeholders may get non-symlink-like contents
1471 # Symlink placeholders may get non-symlink-like contents
1469 # via user error or dereferencing by NFS or Samba servers,
1472 # via user error or dereferencing by NFS or Samba servers,
1470 # so we filter out any placeholders that don't look like a
1473 # so we filter out any placeholders that don't look like a
1471 # symlink
1474 # symlink
1472 sane = []
1475 sane = []
1473 for f in files:
1476 for f in files:
1474 if self.flags(f) == 'l':
1477 if self.flags(f) == 'l':
1475 d = self[f].data()
1478 d = self[f].data()
1476 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1479 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1477 self._repo.ui.debug('ignoring suspect symlink placeholder'
1480 self._repo.ui.debug('ignoring suspect symlink placeholder'
1478 ' "%s"\n' % f)
1481 ' "%s"\n' % f)
1479 continue
1482 continue
1480 sane.append(f)
1483 sane.append(f)
1481 return sane
1484 return sane
1482
1485
1483 def _checklookup(self, files):
1486 def _checklookup(self, files):
1484 # check for any possibly clean files
1487 # check for any possibly clean files
1485 if not files:
1488 if not files:
1486 return [], []
1489 return [], []
1487
1490
1488 modified = []
1491 modified = []
1489 fixup = []
1492 fixup = []
1490 pctx = self._parents[0]
1493 pctx = self._parents[0]
1491 # do a full compare of any files that might have changed
1494 # do a full compare of any files that might have changed
1492 for f in sorted(files):
1495 for f in sorted(files):
1493 if (f not in pctx or self.flags(f) != pctx.flags(f)
1496 if (f not in pctx or self.flags(f) != pctx.flags(f)
1494 or pctx[f].cmp(self[f])):
1497 or pctx[f].cmp(self[f])):
1495 modified.append(f)
1498 modified.append(f)
1496 else:
1499 else:
1497 fixup.append(f)
1500 fixup.append(f)
1498
1501
1499 # update dirstate for files that are actually clean
1502 # update dirstate for files that are actually clean
1500 if fixup:
1503 if fixup:
1501 try:
1504 try:
1502 # updating the dirstate is optional
1505 # updating the dirstate is optional
1503 # so we don't wait on the lock
1506 # so we don't wait on the lock
1504 # wlock can invalidate the dirstate, so cache normal _after_
1507 # wlock can invalidate the dirstate, so cache normal _after_
1505 # taking the lock
1508 # taking the lock
1506 wlock = self._repo.wlock(False)
1509 wlock = self._repo.wlock(False)
1507 normal = self._repo.dirstate.normal
1510 normal = self._repo.dirstate.normal
1508 try:
1511 try:
1509 for f in fixup:
1512 for f in fixup:
1510 normal(f)
1513 normal(f)
1511 finally:
1514 finally:
1512 wlock.release()
1515 wlock.release()
1513 except error.LockError:
1516 except error.LockError:
1514 pass
1517 pass
1515 return modified, fixup
1518 return modified, fixup
1516
1519
1517 def _manifestmatches(self, match, s):
1520 def _manifestmatches(self, match, s):
1518 """Slow path for workingctx
1521 """Slow path for workingctx
1519
1522
1520 The fast path is when we compare the working directory to its parent
1523 The fast path is when we compare the working directory to its parent
1521 which means this function is comparing with a non-parent; therefore we
1524 which means this function is comparing with a non-parent; therefore we
1522 need to build a manifest and return what matches.
1525 need to build a manifest and return what matches.
1523 """
1526 """
1524 mf = self._repo['.']._manifestmatches(match, s)
1527 mf = self._repo['.']._manifestmatches(match, s)
1525 for f in s.modified + s.added:
1528 for f in s.modified + s.added:
1526 mf[f] = _newnode
1529 mf[f] = _newnode
1527 mf.setflag(f, self.flags(f))
1530 mf.setflag(f, self.flags(f))
1528 for f in s.removed:
1531 for f in s.removed:
1529 if f in mf:
1532 if f in mf:
1530 del mf[f]
1533 del mf[f]
1531 return mf
1534 return mf
1532
1535
1533 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1536 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1534 unknown=False):
1537 unknown=False):
1535 '''Gets the status from the dirstate -- internal use only.'''
1538 '''Gets the status from the dirstate -- internal use only.'''
1536 listignored, listclean, listunknown = ignored, clean, unknown
1539 listignored, listclean, listunknown = ignored, clean, unknown
1537 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1540 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1538 subrepos = []
1541 subrepos = []
1539 if '.hgsub' in self:
1542 if '.hgsub' in self:
1540 subrepos = sorted(self.substate)
1543 subrepos = sorted(self.substate)
1541 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1544 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1542 listclean, listunknown)
1545 listclean, listunknown)
1543
1546
1544 # check for any possibly clean files
1547 # check for any possibly clean files
1545 if cmp:
1548 if cmp:
1546 modified2, fixup = self._checklookup(cmp)
1549 modified2, fixup = self._checklookup(cmp)
1547 s.modified.extend(modified2)
1550 s.modified.extend(modified2)
1548
1551
1549 # update dirstate for files that are actually clean
1552 # update dirstate for files that are actually clean
1550 if fixup and listclean:
1553 if fixup and listclean:
1551 s.clean.extend(fixup)
1554 s.clean.extend(fixup)
1552
1555
1553 if match.always():
1556 if match.always():
1554 # cache for performance
1557 # cache for performance
1555 if s.unknown or s.ignored or s.clean:
1558 if s.unknown or s.ignored or s.clean:
1556 # "_status" is cached with list*=False in the normal route
1559 # "_status" is cached with list*=False in the normal route
1557 self._status = scmutil.status(s.modified, s.added, s.removed,
1560 self._status = scmutil.status(s.modified, s.added, s.removed,
1558 s.deleted, [], [], [])
1561 s.deleted, [], [], [])
1559 else:
1562 else:
1560 self._status = s
1563 self._status = s
1561
1564
1562 return s
1565 return s
1563
1566
1564 def _buildstatus(self, other, s, match, listignored, listclean,
1567 def _buildstatus(self, other, s, match, listignored, listclean,
1565 listunknown):
1568 listunknown):
1566 """build a status with respect to another context
1569 """build a status with respect to another context
1567
1570
1568 This includes logic for maintaining the fast path of status when
1571 This includes logic for maintaining the fast path of status when
1569 comparing the working directory against its parent, which is to skip
1572 comparing the working directory against its parent, which is to skip
1570 building a new manifest if self (working directory) is not comparing
1573 building a new manifest if self (working directory) is not comparing
1571 against its parent (repo['.']).
1574 against its parent (repo['.']).
1572 """
1575 """
1573 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1576 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1574 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1577 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1575 # might have accidentally ended up with the entire contents of the file
1578 # might have accidentally ended up with the entire contents of the file
1576 # they are supposed to be linking to.
1579 # they are supposed to be linking to.
1577 s.modified[:] = self._filtersuspectsymlink(s.modified)
1580 s.modified[:] = self._filtersuspectsymlink(s.modified)
1578 if other != self._repo['.']:
1581 if other != self._repo['.']:
1579 s = super(workingctx, self)._buildstatus(other, s, match,
1582 s = super(workingctx, self)._buildstatus(other, s, match,
1580 listignored, listclean,
1583 listignored, listclean,
1581 listunknown)
1584 listunknown)
1582 return s
1585 return s
1583
1586
1584 def _matchstatus(self, other, match):
1587 def _matchstatus(self, other, match):
1585 """override the match method with a filter for directory patterns
1588 """override the match method with a filter for directory patterns
1586
1589
1587 We use inheritance to customize the match.bad method only in cases of
1590 We use inheritance to customize the match.bad method only in cases of
1588 workingctx since it belongs only to the working directory when
1591 workingctx since it belongs only to the working directory when
1589 comparing against the parent changeset.
1592 comparing against the parent changeset.
1590
1593
1591 If we aren't comparing against the working directory's parent, then we
1594 If we aren't comparing against the working directory's parent, then we
1592 just use the default match object sent to us.
1595 just use the default match object sent to us.
1593 """
1596 """
1594 superself = super(workingctx, self)
1597 superself = super(workingctx, self)
1595 match = superself._matchstatus(other, match)
1598 match = superself._matchstatus(other, match)
1596 if other != self._repo['.']:
1599 if other != self._repo['.']:
1597 def bad(f, msg):
1600 def bad(f, msg):
1598 # 'f' may be a directory pattern from 'match.files()',
1601 # 'f' may be a directory pattern from 'match.files()',
1599 # so 'f not in ctx1' is not enough
1602 # so 'f not in ctx1' is not enough
1600 if f not in other and not other.hasdir(f):
1603 if f not in other and not other.hasdir(f):
1601 self._repo.ui.warn('%s: %s\n' %
1604 self._repo.ui.warn('%s: %s\n' %
1602 (self._repo.dirstate.pathto(f), msg))
1605 (self._repo.dirstate.pathto(f), msg))
1603 match.bad = bad
1606 match.bad = bad
1604 return match
1607 return match
1605
1608
1606 class committablefilectx(basefilectx):
1609 class committablefilectx(basefilectx):
1607 """A committablefilectx provides common functionality for a file context
1610 """A committablefilectx provides common functionality for a file context
1608 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1611 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1609 def __init__(self, repo, path, filelog=None, ctx=None):
1612 def __init__(self, repo, path, filelog=None, ctx=None):
1610 self._repo = repo
1613 self._repo = repo
1611 self._path = path
1614 self._path = path
1612 self._changeid = None
1615 self._changeid = None
1613 self._filerev = self._filenode = None
1616 self._filerev = self._filenode = None
1614
1617
1615 if filelog is not None:
1618 if filelog is not None:
1616 self._filelog = filelog
1619 self._filelog = filelog
1617 if ctx:
1620 if ctx:
1618 self._changectx = ctx
1621 self._changectx = ctx
1619
1622
1620 def __nonzero__(self):
1623 def __nonzero__(self):
1621 return True
1624 return True
1622
1625
1623 def linkrev(self):
1626 def linkrev(self):
1624 # linked to self._changectx no matter if file is modified or not
1627 # linked to self._changectx no matter if file is modified or not
1625 return self.rev()
1628 return self.rev()
1626
1629
1627 def parents(self):
1630 def parents(self):
1628 '''return parent filectxs, following copies if necessary'''
1631 '''return parent filectxs, following copies if necessary'''
1629 def filenode(ctx, path):
1632 def filenode(ctx, path):
1630 return ctx._manifest.get(path, nullid)
1633 return ctx._manifest.get(path, nullid)
1631
1634
1632 path = self._path
1635 path = self._path
1633 fl = self._filelog
1636 fl = self._filelog
1634 pcl = self._changectx._parents
1637 pcl = self._changectx._parents
1635 renamed = self.renamed()
1638 renamed = self.renamed()
1636
1639
1637 if renamed:
1640 if renamed:
1638 pl = [renamed + (None,)]
1641 pl = [renamed + (None,)]
1639 else:
1642 else:
1640 pl = [(path, filenode(pcl[0], path), fl)]
1643 pl = [(path, filenode(pcl[0], path), fl)]
1641
1644
1642 for pc in pcl[1:]:
1645 for pc in pcl[1:]:
1643 pl.append((path, filenode(pc, path), fl))
1646 pl.append((path, filenode(pc, path), fl))
1644
1647
1645 return [self._parentfilectx(p, fileid=n, filelog=l)
1648 return [self._parentfilectx(p, fileid=n, filelog=l)
1646 for p, n, l in pl if n != nullid]
1649 for p, n, l in pl if n != nullid]
1647
1650
1648 def children(self):
1651 def children(self):
1649 return []
1652 return []
1650
1653
1651 class workingfilectx(committablefilectx):
1654 class workingfilectx(committablefilectx):
1652 """A workingfilectx object makes access to data related to a particular
1655 """A workingfilectx object makes access to data related to a particular
1653 file in the working directory convenient."""
1656 file in the working directory convenient."""
1654 def __init__(self, repo, path, filelog=None, workingctx=None):
1657 def __init__(self, repo, path, filelog=None, workingctx=None):
1655 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1658 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1656
1659
1657 @propertycache
1660 @propertycache
1658 def _changectx(self):
1661 def _changectx(self):
1659 return workingctx(self._repo)
1662 return workingctx(self._repo)
1660
1663
1661 def data(self):
1664 def data(self):
1662 return self._repo.wread(self._path)
1665 return self._repo.wread(self._path)
1663 def renamed(self):
1666 def renamed(self):
1664 rp = self._repo.dirstate.copied(self._path)
1667 rp = self._repo.dirstate.copied(self._path)
1665 if not rp:
1668 if not rp:
1666 return None
1669 return None
1667 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1670 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1668
1671
1669 def size(self):
1672 def size(self):
1670 return self._repo.wvfs.lstat(self._path).st_size
1673 return self._repo.wvfs.lstat(self._path).st_size
1671 def date(self):
1674 def date(self):
1672 t, tz = self._changectx.date()
1675 t, tz = self._changectx.date()
1673 try:
1676 try:
1674 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1677 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1675 except OSError, err:
1678 except OSError, err:
1676 if err.errno != errno.ENOENT:
1679 if err.errno != errno.ENOENT:
1677 raise
1680 raise
1678 return (t, tz)
1681 return (t, tz)
1679
1682
1680 def cmp(self, fctx):
1683 def cmp(self, fctx):
1681 """compare with other file context
1684 """compare with other file context
1682
1685
1683 returns True if different than fctx.
1686 returns True if different than fctx.
1684 """
1687 """
1685 # fctx should be a filectx (not a workingfilectx)
1688 # fctx should be a filectx (not a workingfilectx)
1686 # invert comparison to reuse the same code path
1689 # invert comparison to reuse the same code path
1687 return fctx.cmp(self)
1690 return fctx.cmp(self)
1688
1691
1689 def remove(self, ignoremissing=False):
1692 def remove(self, ignoremissing=False):
1690 """wraps unlink for a repo's working directory"""
1693 """wraps unlink for a repo's working directory"""
1691 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1694 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1692
1695
1693 def write(self, data, flags):
1696 def write(self, data, flags):
1694 """wraps repo.wwrite"""
1697 """wraps repo.wwrite"""
1695 self._repo.wwrite(self._path, data, flags)
1698 self._repo.wwrite(self._path, data, flags)
1696
1699
1697 class workingcommitctx(workingctx):
1700 class workingcommitctx(workingctx):
1698 """A workingcommitctx object makes access to data related to
1701 """A workingcommitctx object makes access to data related to
1699 the revision being committed convenient.
1702 the revision being committed convenient.
1700
1703
1701 This hides changes in the working directory, if they aren't
1704 This hides changes in the working directory, if they aren't
1702 committed in this context.
1705 committed in this context.
1703 """
1706 """
1704 def __init__(self, repo, changes,
1707 def __init__(self, repo, changes,
1705 text="", user=None, date=None, extra=None):
1708 text="", user=None, date=None, extra=None):
1706 super(workingctx, self).__init__(repo, text, user, date, extra,
1709 super(workingctx, self).__init__(repo, text, user, date, extra,
1707 changes)
1710 changes)
1708
1711
1709 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1712 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1710 unknown=False):
1713 unknown=False):
1711 """Return matched files only in ``self._status``
1714 """Return matched files only in ``self._status``
1712
1715
1713 Uncommitted files appear "clean" via this context, even if
1716 Uncommitted files appear "clean" via this context, even if
1714 they aren't actually so in the working directory.
1717 they aren't actually so in the working directory.
1715 """
1718 """
1716 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1719 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1717 if clean:
1720 if clean:
1718 clean = [f for f in self._manifest if f not in self._changedset]
1721 clean = [f for f in self._manifest if f not in self._changedset]
1719 else:
1722 else:
1720 clean = []
1723 clean = []
1721 return scmutil.status([f for f in self._status.modified if match(f)],
1724 return scmutil.status([f for f in self._status.modified if match(f)],
1722 [f for f in self._status.added if match(f)],
1725 [f for f in self._status.added if match(f)],
1723 [f for f in self._status.removed if match(f)],
1726 [f for f in self._status.removed if match(f)],
1724 [], [], [], clean)
1727 [], [], [], clean)
1725
1728
1726 @propertycache
1729 @propertycache
1727 def _changedset(self):
1730 def _changedset(self):
1728 """Return the set of files changed in this context
1731 """Return the set of files changed in this context
1729 """
1732 """
1730 changed = set(self._status.modified)
1733 changed = set(self._status.modified)
1731 changed.update(self._status.added)
1734 changed.update(self._status.added)
1732 changed.update(self._status.removed)
1735 changed.update(self._status.removed)
1733 return changed
1736 return changed
1734
1737
1735 class memctx(committablectx):
1738 class memctx(committablectx):
1736 """Use memctx to perform in-memory commits via localrepo.commitctx().
1739 """Use memctx to perform in-memory commits via localrepo.commitctx().
1737
1740
1738 Revision information is supplied at initialization time while
1741 Revision information is supplied at initialization time while
1739 related files data and is made available through a callback
1742 related files data and is made available through a callback
1740 mechanism. 'repo' is the current localrepo, 'parents' is a
1743 mechanism. 'repo' is the current localrepo, 'parents' is a
1741 sequence of two parent revisions identifiers (pass None for every
1744 sequence of two parent revisions identifiers (pass None for every
1742 missing parent), 'text' is the commit message and 'files' lists
1745 missing parent), 'text' is the commit message and 'files' lists
1743 names of files touched by the revision (normalized and relative to
1746 names of files touched by the revision (normalized and relative to
1744 repository root).
1747 repository root).
1745
1748
1746 filectxfn(repo, memctx, path) is a callable receiving the
1749 filectxfn(repo, memctx, path) is a callable receiving the
1747 repository, the current memctx object and the normalized path of
1750 repository, the current memctx object and the normalized path of
1748 requested file, relative to repository root. It is fired by the
1751 requested file, relative to repository root. It is fired by the
1749 commit function for every file in 'files', but calls order is
1752 commit function for every file in 'files', but calls order is
1750 undefined. If the file is available in the revision being
1753 undefined. If the file is available in the revision being
1751 committed (updated or added), filectxfn returns a memfilectx
1754 committed (updated or added), filectxfn returns a memfilectx
1752 object. If the file was removed, filectxfn raises an
1755 object. If the file was removed, filectxfn raises an
1753 IOError. Moved files are represented by marking the source file
1756 IOError. Moved files are represented by marking the source file
1754 removed and the new file added with copy information (see
1757 removed and the new file added with copy information (see
1755 memfilectx).
1758 memfilectx).
1756
1759
1757 user receives the committer name and defaults to current
1760 user receives the committer name and defaults to current
1758 repository username, date is the commit date in any format
1761 repository username, date is the commit date in any format
1759 supported by util.parsedate() and defaults to current date, extra
1762 supported by util.parsedate() and defaults to current date, extra
1760 is a dictionary of metadata or is left empty.
1763 is a dictionary of metadata or is left empty.
1761 """
1764 """
1762
1765
1763 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1766 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1764 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1767 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1765 # this field to determine what to do in filectxfn.
1768 # this field to determine what to do in filectxfn.
1766 _returnnoneformissingfiles = True
1769 _returnnoneformissingfiles = True
1767
1770
1768 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1771 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1769 date=None, extra=None, editor=False):
1772 date=None, extra=None, editor=False):
1770 super(memctx, self).__init__(repo, text, user, date, extra)
1773 super(memctx, self).__init__(repo, text, user, date, extra)
1771 self._rev = None
1774 self._rev = None
1772 self._node = None
1775 self._node = None
1773 parents = [(p or nullid) for p in parents]
1776 parents = [(p or nullid) for p in parents]
1774 p1, p2 = parents
1777 p1, p2 = parents
1775 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1778 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1776 files = sorted(set(files))
1779 files = sorted(set(files))
1777 self._files = files
1780 self._files = files
1778 self.substate = {}
1781 self.substate = {}
1779
1782
1780 # if store is not callable, wrap it in a function
1783 # if store is not callable, wrap it in a function
1781 if not callable(filectxfn):
1784 if not callable(filectxfn):
1782 def getfilectx(repo, memctx, path):
1785 def getfilectx(repo, memctx, path):
1783 fctx = filectxfn[path]
1786 fctx = filectxfn[path]
1784 # this is weird but apparently we only keep track of one parent
1787 # this is weird but apparently we only keep track of one parent
1785 # (why not only store that instead of a tuple?)
1788 # (why not only store that instead of a tuple?)
1786 copied = fctx.renamed()
1789 copied = fctx.renamed()
1787 if copied:
1790 if copied:
1788 copied = copied[0]
1791 copied = copied[0]
1789 return memfilectx(repo, path, fctx.data(),
1792 return memfilectx(repo, path, fctx.data(),
1790 islink=fctx.islink(), isexec=fctx.isexec(),
1793 islink=fctx.islink(), isexec=fctx.isexec(),
1791 copied=copied, memctx=memctx)
1794 copied=copied, memctx=memctx)
1792 self._filectxfn = getfilectx
1795 self._filectxfn = getfilectx
1793 else:
1796 else:
1794 # "util.cachefunc" reduces invocation of possibly expensive
1797 # "util.cachefunc" reduces invocation of possibly expensive
1795 # "filectxfn" for performance (e.g. converting from another VCS)
1798 # "filectxfn" for performance (e.g. converting from another VCS)
1796 self._filectxfn = util.cachefunc(filectxfn)
1799 self._filectxfn = util.cachefunc(filectxfn)
1797
1800
1798 if extra:
1801 if extra:
1799 self._extra = extra.copy()
1802 self._extra = extra.copy()
1800 else:
1803 else:
1801 self._extra = {}
1804 self._extra = {}
1802
1805
1803 if self._extra.get('branch', '') == '':
1806 if self._extra.get('branch', '') == '':
1804 self._extra['branch'] = 'default'
1807 self._extra['branch'] = 'default'
1805
1808
1806 if editor:
1809 if editor:
1807 self._text = editor(self._repo, self, [])
1810 self._text = editor(self._repo, self, [])
1808 self._repo.savecommitmessage(self._text)
1811 self._repo.savecommitmessage(self._text)
1809
1812
1810 def filectx(self, path, filelog=None):
1813 def filectx(self, path, filelog=None):
1811 """get a file context from the working directory
1814 """get a file context from the working directory
1812
1815
1813 Returns None if file doesn't exist and should be removed."""
1816 Returns None if file doesn't exist and should be removed."""
1814 return self._filectxfn(self._repo, self, path)
1817 return self._filectxfn(self._repo, self, path)
1815
1818
1816 def commit(self):
1819 def commit(self):
1817 """commit context to the repo"""
1820 """commit context to the repo"""
1818 return self._repo.commitctx(self)
1821 return self._repo.commitctx(self)
1819
1822
1820 @propertycache
1823 @propertycache
1821 def _manifest(self):
1824 def _manifest(self):
1822 """generate a manifest based on the return values of filectxfn"""
1825 """generate a manifest based on the return values of filectxfn"""
1823
1826
1824 # keep this simple for now; just worry about p1
1827 # keep this simple for now; just worry about p1
1825 pctx = self._parents[0]
1828 pctx = self._parents[0]
1826 man = pctx.manifest().copy()
1829 man = pctx.manifest().copy()
1827
1830
1828 for f in self._status.modified:
1831 for f in self._status.modified:
1829 p1node = nullid
1832 p1node = nullid
1830 p2node = nullid
1833 p2node = nullid
1831 p = pctx[f].parents() # if file isn't in pctx, check p2?
1834 p = pctx[f].parents() # if file isn't in pctx, check p2?
1832 if len(p) > 0:
1835 if len(p) > 0:
1833 p1node = p[0].node()
1836 p1node = p[0].node()
1834 if len(p) > 1:
1837 if len(p) > 1:
1835 p2node = p[1].node()
1838 p2node = p[1].node()
1836 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1839 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1837
1840
1838 for f in self._status.added:
1841 for f in self._status.added:
1839 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1842 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1840
1843
1841 for f in self._status.removed:
1844 for f in self._status.removed:
1842 if f in man:
1845 if f in man:
1843 del man[f]
1846 del man[f]
1844
1847
1845 return man
1848 return man
1846
1849
1847 @propertycache
1850 @propertycache
1848 def _status(self):
1851 def _status(self):
1849 """Calculate exact status from ``files`` specified at construction
1852 """Calculate exact status from ``files`` specified at construction
1850 """
1853 """
1851 man1 = self.p1().manifest()
1854 man1 = self.p1().manifest()
1852 p2 = self._parents[1]
1855 p2 = self._parents[1]
1853 # "1 < len(self._parents)" can't be used for checking
1856 # "1 < len(self._parents)" can't be used for checking
1854 # existence of the 2nd parent, because "memctx._parents" is
1857 # existence of the 2nd parent, because "memctx._parents" is
1855 # explicitly initialized by the list, of which length is 2.
1858 # explicitly initialized by the list, of which length is 2.
1856 if p2.node() != nullid:
1859 if p2.node() != nullid:
1857 man2 = p2.manifest()
1860 man2 = p2.manifest()
1858 managing = lambda f: f in man1 or f in man2
1861 managing = lambda f: f in man1 or f in man2
1859 else:
1862 else:
1860 managing = lambda f: f in man1
1863 managing = lambda f: f in man1
1861
1864
1862 modified, added, removed = [], [], []
1865 modified, added, removed = [], [], []
1863 for f in self._files:
1866 for f in self._files:
1864 if not managing(f):
1867 if not managing(f):
1865 added.append(f)
1868 added.append(f)
1866 elif self[f]:
1869 elif self[f]:
1867 modified.append(f)
1870 modified.append(f)
1868 else:
1871 else:
1869 removed.append(f)
1872 removed.append(f)
1870
1873
1871 return scmutil.status(modified, added, removed, [], [], [], [])
1874 return scmutil.status(modified, added, removed, [], [], [], [])
1872
1875
1873 class memfilectx(committablefilectx):
1876 class memfilectx(committablefilectx):
1874 """memfilectx represents an in-memory file to commit.
1877 """memfilectx represents an in-memory file to commit.
1875
1878
1876 See memctx and committablefilectx for more details.
1879 See memctx and committablefilectx for more details.
1877 """
1880 """
1878 def __init__(self, repo, path, data, islink=False,
1881 def __init__(self, repo, path, data, islink=False,
1879 isexec=False, copied=None, memctx=None):
1882 isexec=False, copied=None, memctx=None):
1880 """
1883 """
1881 path is the normalized file path relative to repository root.
1884 path is the normalized file path relative to repository root.
1882 data is the file content as a string.
1885 data is the file content as a string.
1883 islink is True if the file is a symbolic link.
1886 islink is True if the file is a symbolic link.
1884 isexec is True if the file is executable.
1887 isexec is True if the file is executable.
1885 copied is the source file path if current file was copied in the
1888 copied is the source file path if current file was copied in the
1886 revision being committed, or None."""
1889 revision being committed, or None."""
1887 super(memfilectx, self).__init__(repo, path, None, memctx)
1890 super(memfilectx, self).__init__(repo, path, None, memctx)
1888 self._data = data
1891 self._data = data
1889 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1892 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1890 self._copied = None
1893 self._copied = None
1891 if copied:
1894 if copied:
1892 self._copied = (copied, nullid)
1895 self._copied = (copied, nullid)
1893
1896
1894 def data(self):
1897 def data(self):
1895 return self._data
1898 return self._data
1896 def size(self):
1899 def size(self):
1897 return len(self.data())
1900 return len(self.data())
1898 def flags(self):
1901 def flags(self):
1899 return self._flags
1902 return self._flags
1900 def renamed(self):
1903 def renamed(self):
1901 return self._copied
1904 return self._copied
1902
1905
1903 def remove(self, ignoremissing=False):
1906 def remove(self, ignoremissing=False):
1904 """wraps unlink for a repo's working directory"""
1907 """wraps unlink for a repo's working directory"""
1905 # need to figure out what to do here
1908 # need to figure out what to do here
1906 del self._changectx[self._path]
1909 del self._changectx[self._path]
1907
1910
1908 def write(self, data, flags):
1911 def write(self, data, flags):
1909 """wraps repo.wwrite"""
1912 """wraps repo.wwrite"""
1910 self._data = data
1913 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now