##// END OF EJS Templates
with: use context manager for wlock in workingctx.forget
Bryan O'Sullivan -
r27810:8c81975f default
parent child Browse files
Show More
@@ -1,1968 +1,1965
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif self.rev() is not None:
145 elif self.rev() is not None:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset[0], path)
262 node, flag = self._repo.manifest.find(self._changeset[0], path)
263 if not node:
263 if not node:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266
266
267 return node, flag
267 return node, flag
268
268
269 def filenode(self, path):
269 def filenode(self, path):
270 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
271
271
272 def flags(self, path):
272 def flags(self, path):
273 try:
273 try:
274 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
275 except error.LookupError:
275 except error.LookupError:
276 return ''
276 return ''
277
277
278 def sub(self, path):
278 def sub(self, path):
279 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path)
280 return subrepo.subrepo(self, path)
281
281
282 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
284
284
285 def workingsub(self, path):
285 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
287 context.
288 '''
288 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
290
290
291 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
293 r = self._repo
293 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
295 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
298
298
299 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
307
308 def dirs(self):
308 def dirs(self):
309 return self._manifest.dirs()
309 return self._manifest.dirs()
310
310
311 def hasdir(self, dir):
311 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
313
313
314 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
315 return False
315 return False
316
316
317 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
320 directory.
320 directory.
321
321
322 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
323
323
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
325 """
326
326
327 ctx1 = self
327 ctx1 = self
328 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
329
329
330 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
333 # with its first parent.
334 #
334 #
335 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
336 #
336 #
337 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
338 #
338 #
339 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
342 reversed = False
342 reversed = False
343 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
345 reversed = True
345 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
347
347
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.narrowmatcher(subpath, match)
368 submatch = matchmod.narrowmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380
380
381 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
382 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
385 if data is None:
385 if data is None:
386 return None
386 return None
387 islink, isexec = mode
387 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
390 if extra is None:
390 if extra is None:
391 extra = {}
391 extra = {}
392 if branch:
392 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
395 date, extra, editor)
396 return ctx
396 return ctx
397
397
398 class changectx(basectx):
398 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
401 the repo."""
401 the repo."""
402 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
404
404
405 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
408 return
408 return
409
409
410 if changeid == '':
410 if changeid == '':
411 changeid = '.'
411 changeid = '.'
412 self._repo = repo
412 self._repo = repo
413
413
414 try:
414 try:
415 if isinstance(changeid, int):
415 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
417 self._rev = changeid
418 return
418 return
419 if isinstance(changeid, long):
419 if isinstance(changeid, long):
420 changeid = str(changeid)
420 changeid = str(changeid)
421 if changeid == 'null':
421 if changeid == 'null':
422 self._node = nullid
422 self._node = nullid
423 self._rev = nullrev
423 self._rev = nullrev
424 return
424 return
425 if changeid == 'tip':
425 if changeid == 'tip':
426 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
434 return
435 if len(changeid) == 20:
435 if len(changeid) == 20:
436 try:
436 try:
437 self._node = changeid
437 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
439 return
439 return
440 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
441 raise
441 raise
442 except LookupError:
442 except LookupError:
443 pass
443 pass
444
444
445 try:
445 try:
446 r = int(changeid)
446 r = int(changeid)
447 if str(r) != changeid:
447 if str(r) != changeid:
448 raise ValueError
448 raise ValueError
449 l = len(repo.changelog)
449 l = len(repo.changelog)
450 if r < 0:
450 if r < 0:
451 r += l
451 r += l
452 if r < 0 or r >= l:
452 if r < 0 or r >= l:
453 raise ValueError
453 raise ValueError
454 self._rev = r
454 self._rev = r
455 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
456 return
456 return
457 except error.FilteredIndexError:
457 except error.FilteredIndexError:
458 raise
458 raise
459 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
460 pass
460 pass
461
461
462 if len(changeid) == 40:
462 if len(changeid) == 40:
463 try:
463 try:
464 self._node = bin(changeid)
464 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
466 return
466 return
467 except error.FilteredLookupError:
467 except error.FilteredLookupError:
468 raise
468 raise
469 except (TypeError, LookupError):
469 except (TypeError, LookupError):
470 pass
470 pass
471
471
472 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
473 try:
473 try:
474 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
480 raise
480 raise
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 pass
482 pass
483
483
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
485 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488
488
489 # lookup failed
489 # lookup failed
490 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
491 #
491 #
492 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
493 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
497 try:
497 try:
498 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
499 changeid = hex(changeid)
500 except TypeError:
500 except TypeError:
501 pass
501 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
511 except IndexError:
512 pass
512 pass
513 raise error.RepoLookupError(
513 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
515
515
516 def __hash__(self):
516 def __hash__(self):
517 try:
517 try:
518 return hash(self._rev)
518 return hash(self._rev)
519 except AttributeError:
519 except AttributeError:
520 return id(self)
520 return id(self)
521
521
522 def __nonzero__(self):
522 def __nonzero__(self):
523 return self._rev != nullrev
523 return self._rev != nullrev
524
524
525 @propertycache
525 @propertycache
526 def _changeset(self):
526 def _changeset(self):
527 return self._repo.changelog.read(self.rev())
527 return self._repo.changelog.read(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._repo.manifest.read(self._changeset[0])
531 return self._repo.manifest.read(self._changeset[0])
532
532
533 @propertycache
533 @propertycache
534 def _manifestdelta(self):
534 def _manifestdelta(self):
535 return self._repo.manifest.readdelta(self._changeset[0])
535 return self._repo.manifest.readdelta(self._changeset[0])
536
536
537 @propertycache
537 @propertycache
538 def _parents(self):
538 def _parents(self):
539 repo = self._repo
539 repo = self._repo
540 p1, p2 = repo.changelog.parentrevs(self._rev)
540 p1, p2 = repo.changelog.parentrevs(self._rev)
541 if p2 == nullrev:
541 if p2 == nullrev:
542 return [changectx(repo, p1)]
542 return [changectx(repo, p1)]
543 return [changectx(repo, p1), changectx(repo, p2)]
543 return [changectx(repo, p1), changectx(repo, p2)]
544
544
545 def changeset(self):
545 def changeset(self):
546 return self._changeset
546 return self._changeset
547 def manifestnode(self):
547 def manifestnode(self):
548 return self._changeset[0]
548 return self._changeset[0]
549
549
550 def user(self):
550 def user(self):
551 return self._changeset[1]
551 return self._changeset[1]
552 def date(self):
552 def date(self):
553 return self._changeset[2]
553 return self._changeset[2]
554 def files(self):
554 def files(self):
555 return self._changeset[3]
555 return self._changeset[3]
556 def description(self):
556 def description(self):
557 return self._changeset[4]
557 return self._changeset[4]
558 def branch(self):
558 def branch(self):
559 return encoding.tolocal(self._changeset[5].get("branch"))
559 return encoding.tolocal(self._changeset[5].get("branch"))
560 def closesbranch(self):
560 def closesbranch(self):
561 return 'close' in self._changeset[5]
561 return 'close' in self._changeset[5]
562 def extra(self):
562 def extra(self):
563 return self._changeset[5]
563 return self._changeset[5]
564 def tags(self):
564 def tags(self):
565 return self._repo.nodetags(self._node)
565 return self._repo.nodetags(self._node)
566 def bookmarks(self):
566 def bookmarks(self):
567 return self._repo.nodebookmarks(self._node)
567 return self._repo.nodebookmarks(self._node)
568 def phase(self):
568 def phase(self):
569 return self._repo._phasecache.phase(self._repo, self._rev)
569 return self._repo._phasecache.phase(self._repo, self._rev)
570 def hidden(self):
570 def hidden(self):
571 return self._rev in repoview.filterrevs(self._repo, 'visible')
571 return self._rev in repoview.filterrevs(self._repo, 'visible')
572
572
573 def children(self):
573 def children(self):
574 """return contexts for each child changeset"""
574 """return contexts for each child changeset"""
575 c = self._repo.changelog.children(self._node)
575 c = self._repo.changelog.children(self._node)
576 return [changectx(self._repo, x) for x in c]
576 return [changectx(self._repo, x) for x in c]
577
577
578 def ancestors(self):
578 def ancestors(self):
579 for a in self._repo.changelog.ancestors([self._rev]):
579 for a in self._repo.changelog.ancestors([self._rev]):
580 yield changectx(self._repo, a)
580 yield changectx(self._repo, a)
581
581
582 def descendants(self):
582 def descendants(self):
583 for d in self._repo.changelog.descendants([self._rev]):
583 for d in self._repo.changelog.descendants([self._rev]):
584 yield changectx(self._repo, d)
584 yield changectx(self._repo, d)
585
585
586 def filectx(self, path, fileid=None, filelog=None):
586 def filectx(self, path, fileid=None, filelog=None):
587 """get a file context from this changeset"""
587 """get a file context from this changeset"""
588 if fileid is None:
588 if fileid is None:
589 fileid = self.filenode(path)
589 fileid = self.filenode(path)
590 return filectx(self._repo, path, fileid=fileid,
590 return filectx(self._repo, path, fileid=fileid,
591 changectx=self, filelog=filelog)
591 changectx=self, filelog=filelog)
592
592
593 def ancestor(self, c2, warn=False):
593 def ancestor(self, c2, warn=False):
594 """return the "best" ancestor context of self and c2
594 """return the "best" ancestor context of self and c2
595
595
596 If there are multiple candidates, it will show a message and check
596 If there are multiple candidates, it will show a message and check
597 merge.preferancestor configuration before falling back to the
597 merge.preferancestor configuration before falling back to the
598 revlog ancestor."""
598 revlog ancestor."""
599 # deal with workingctxs
599 # deal with workingctxs
600 n2 = c2._node
600 n2 = c2._node
601 if n2 is None:
601 if n2 is None:
602 n2 = c2._parents[0]._node
602 n2 = c2._parents[0]._node
603 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
603 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
604 if not cahs:
604 if not cahs:
605 anc = nullid
605 anc = nullid
606 elif len(cahs) == 1:
606 elif len(cahs) == 1:
607 anc = cahs[0]
607 anc = cahs[0]
608 else:
608 else:
609 # experimental config: merge.preferancestor
609 # experimental config: merge.preferancestor
610 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
610 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
611 try:
611 try:
612 ctx = changectx(self._repo, r)
612 ctx = changectx(self._repo, r)
613 except error.RepoLookupError:
613 except error.RepoLookupError:
614 continue
614 continue
615 anc = ctx.node()
615 anc = ctx.node()
616 if anc in cahs:
616 if anc in cahs:
617 break
617 break
618 else:
618 else:
619 anc = self._repo.changelog.ancestor(self._node, n2)
619 anc = self._repo.changelog.ancestor(self._node, n2)
620 if warn:
620 if warn:
621 self._repo.ui.status(
621 self._repo.ui.status(
622 (_("note: using %s as ancestor of %s and %s\n") %
622 (_("note: using %s as ancestor of %s and %s\n") %
623 (short(anc), short(self._node), short(n2))) +
623 (short(anc), short(self._node), short(n2))) +
624 ''.join(_(" alternatively, use --config "
624 ''.join(_(" alternatively, use --config "
625 "merge.preferancestor=%s\n") %
625 "merge.preferancestor=%s\n") %
626 short(n) for n in sorted(cahs) if n != anc))
626 short(n) for n in sorted(cahs) if n != anc))
627 return changectx(self._repo, anc)
627 return changectx(self._repo, anc)
628
628
629 def descendant(self, other):
629 def descendant(self, other):
630 """True if other is descendant of this changeset"""
630 """True if other is descendant of this changeset"""
631 return self._repo.changelog.descendant(self._rev, other._rev)
631 return self._repo.changelog.descendant(self._rev, other._rev)
632
632
633 def walk(self, match):
633 def walk(self, match):
634 '''Generates matching file names.'''
634 '''Generates matching file names.'''
635
635
636 # Wrap match.bad method to have message with nodeid
636 # Wrap match.bad method to have message with nodeid
637 def bad(fn, msg):
637 def bad(fn, msg):
638 # The manifest doesn't know about subrepos, so don't complain about
638 # The manifest doesn't know about subrepos, so don't complain about
639 # paths into valid subrepos.
639 # paths into valid subrepos.
640 if any(fn == s or fn.startswith(s + '/')
640 if any(fn == s or fn.startswith(s + '/')
641 for s in self.substate):
641 for s in self.substate):
642 return
642 return
643 match.bad(fn, _('no such file in rev %s') % self)
643 match.bad(fn, _('no such file in rev %s') % self)
644
644
645 m = matchmod.badmatch(match, bad)
645 m = matchmod.badmatch(match, bad)
646 return self._manifest.walk(m)
646 return self._manifest.walk(m)
647
647
648 def matches(self, match):
648 def matches(self, match):
649 return self.walk(match)
649 return self.walk(match)
650
650
651 class basefilectx(object):
651 class basefilectx(object):
652 """A filecontext object represents the common logic for its children:
652 """A filecontext object represents the common logic for its children:
653 filectx: read-only access to a filerevision that is already present
653 filectx: read-only access to a filerevision that is already present
654 in the repo,
654 in the repo,
655 workingfilectx: a filecontext that represents files from the working
655 workingfilectx: a filecontext that represents files from the working
656 directory,
656 directory,
657 memfilectx: a filecontext that represents files in-memory."""
657 memfilectx: a filecontext that represents files in-memory."""
658 def __new__(cls, repo, path, *args, **kwargs):
658 def __new__(cls, repo, path, *args, **kwargs):
659 return super(basefilectx, cls).__new__(cls)
659 return super(basefilectx, cls).__new__(cls)
660
660
661 @propertycache
661 @propertycache
662 def _filelog(self):
662 def _filelog(self):
663 return self._repo.file(self._path)
663 return self._repo.file(self._path)
664
664
665 @propertycache
665 @propertycache
666 def _changeid(self):
666 def _changeid(self):
667 if '_changeid' in self.__dict__:
667 if '_changeid' in self.__dict__:
668 return self._changeid
668 return self._changeid
669 elif '_changectx' in self.__dict__:
669 elif '_changectx' in self.__dict__:
670 return self._changectx.rev()
670 return self._changectx.rev()
671 elif '_descendantrev' in self.__dict__:
671 elif '_descendantrev' in self.__dict__:
672 # this file context was created from a revision with a known
672 # this file context was created from a revision with a known
673 # descendant, we can (lazily) correct for linkrev aliases
673 # descendant, we can (lazily) correct for linkrev aliases
674 return self._adjustlinkrev(self._path, self._filelog,
674 return self._adjustlinkrev(self._path, self._filelog,
675 self._filenode, self._descendantrev)
675 self._filenode, self._descendantrev)
676 else:
676 else:
677 return self._filelog.linkrev(self._filerev)
677 return self._filelog.linkrev(self._filerev)
678
678
679 @propertycache
679 @propertycache
680 def _filenode(self):
680 def _filenode(self):
681 if '_fileid' in self.__dict__:
681 if '_fileid' in self.__dict__:
682 return self._filelog.lookup(self._fileid)
682 return self._filelog.lookup(self._fileid)
683 else:
683 else:
684 return self._changectx.filenode(self._path)
684 return self._changectx.filenode(self._path)
685
685
686 @propertycache
686 @propertycache
687 def _filerev(self):
687 def _filerev(self):
688 return self._filelog.rev(self._filenode)
688 return self._filelog.rev(self._filenode)
689
689
690 @propertycache
690 @propertycache
691 def _repopath(self):
691 def _repopath(self):
692 return self._path
692 return self._path
693
693
694 def __nonzero__(self):
694 def __nonzero__(self):
695 try:
695 try:
696 self._filenode
696 self._filenode
697 return True
697 return True
698 except error.LookupError:
698 except error.LookupError:
699 # file is missing
699 # file is missing
700 return False
700 return False
701
701
702 def __str__(self):
702 def __str__(self):
703 return "%s@%s" % (self.path(), self._changectx)
703 return "%s@%s" % (self.path(), self._changectx)
704
704
705 def __repr__(self):
705 def __repr__(self):
706 return "<%s %s>" % (type(self).__name__, str(self))
706 return "<%s %s>" % (type(self).__name__, str(self))
707
707
708 def __hash__(self):
708 def __hash__(self):
709 try:
709 try:
710 return hash((self._path, self._filenode))
710 return hash((self._path, self._filenode))
711 except AttributeError:
711 except AttributeError:
712 return id(self)
712 return id(self)
713
713
714 def __eq__(self, other):
714 def __eq__(self, other):
715 try:
715 try:
716 return (type(self) == type(other) and self._path == other._path
716 return (type(self) == type(other) and self._path == other._path
717 and self._filenode == other._filenode)
717 and self._filenode == other._filenode)
718 except AttributeError:
718 except AttributeError:
719 return False
719 return False
720
720
721 def __ne__(self, other):
721 def __ne__(self, other):
722 return not (self == other)
722 return not (self == other)
723
723
724 def filerev(self):
724 def filerev(self):
725 return self._filerev
725 return self._filerev
726 def filenode(self):
726 def filenode(self):
727 return self._filenode
727 return self._filenode
728 def flags(self):
728 def flags(self):
729 return self._changectx.flags(self._path)
729 return self._changectx.flags(self._path)
730 def filelog(self):
730 def filelog(self):
731 return self._filelog
731 return self._filelog
732 def rev(self):
732 def rev(self):
733 return self._changeid
733 return self._changeid
734 def linkrev(self):
734 def linkrev(self):
735 return self._filelog.linkrev(self._filerev)
735 return self._filelog.linkrev(self._filerev)
736 def node(self):
736 def node(self):
737 return self._changectx.node()
737 return self._changectx.node()
738 def hex(self):
738 def hex(self):
739 return self._changectx.hex()
739 return self._changectx.hex()
740 def user(self):
740 def user(self):
741 return self._changectx.user()
741 return self._changectx.user()
742 def date(self):
742 def date(self):
743 return self._changectx.date()
743 return self._changectx.date()
744 def files(self):
744 def files(self):
745 return self._changectx.files()
745 return self._changectx.files()
746 def description(self):
746 def description(self):
747 return self._changectx.description()
747 return self._changectx.description()
748 def branch(self):
748 def branch(self):
749 return self._changectx.branch()
749 return self._changectx.branch()
750 def extra(self):
750 def extra(self):
751 return self._changectx.extra()
751 return self._changectx.extra()
752 def phase(self):
752 def phase(self):
753 return self._changectx.phase()
753 return self._changectx.phase()
754 def phasestr(self):
754 def phasestr(self):
755 return self._changectx.phasestr()
755 return self._changectx.phasestr()
756 def manifest(self):
756 def manifest(self):
757 return self._changectx.manifest()
757 return self._changectx.manifest()
758 def changectx(self):
758 def changectx(self):
759 return self._changectx
759 return self._changectx
760 def repo(self):
760 def repo(self):
761 return self._repo
761 return self._repo
762
762
763 def path(self):
763 def path(self):
764 return self._path
764 return self._path
765
765
766 def isbinary(self):
766 def isbinary(self):
767 try:
767 try:
768 return util.binary(self.data())
768 return util.binary(self.data())
769 except IOError:
769 except IOError:
770 return False
770 return False
771 def isexec(self):
771 def isexec(self):
772 return 'x' in self.flags()
772 return 'x' in self.flags()
773 def islink(self):
773 def islink(self):
774 return 'l' in self.flags()
774 return 'l' in self.flags()
775
775
776 def isabsent(self):
776 def isabsent(self):
777 """whether this filectx represents a file not in self._changectx
777 """whether this filectx represents a file not in self._changectx
778
778
779 This is mainly for merge code to detect change/delete conflicts. This is
779 This is mainly for merge code to detect change/delete conflicts. This is
780 expected to be True for all subclasses of basectx."""
780 expected to be True for all subclasses of basectx."""
781 return False
781 return False
782
782
783 _customcmp = False
783 _customcmp = False
784 def cmp(self, fctx):
784 def cmp(self, fctx):
785 """compare with other file context
785 """compare with other file context
786
786
787 returns True if different than fctx.
787 returns True if different than fctx.
788 """
788 """
789 if fctx._customcmp:
789 if fctx._customcmp:
790 return fctx.cmp(self)
790 return fctx.cmp(self)
791
791
792 if (fctx._filerev is None
792 if (fctx._filerev is None
793 and (self._repo._encodefilterpats
793 and (self._repo._encodefilterpats
794 # if file data starts with '\1\n', empty metadata block is
794 # if file data starts with '\1\n', empty metadata block is
795 # prepended, which adds 4 bytes to filelog.size().
795 # prepended, which adds 4 bytes to filelog.size().
796 or self.size() - 4 == fctx.size())
796 or self.size() - 4 == fctx.size())
797 or self.size() == fctx.size()):
797 or self.size() == fctx.size()):
798 return self._filelog.cmp(self._filenode, fctx.data())
798 return self._filelog.cmp(self._filenode, fctx.data())
799
799
800 return True
800 return True
801
801
802 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
802 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
803 """return the first ancestor of <srcrev> introducing <fnode>
803 """return the first ancestor of <srcrev> introducing <fnode>
804
804
805 If the linkrev of the file revision does not point to an ancestor of
805 If the linkrev of the file revision does not point to an ancestor of
806 srcrev, we'll walk down the ancestors until we find one introducing
806 srcrev, we'll walk down the ancestors until we find one introducing
807 this file revision.
807 this file revision.
808
808
809 :repo: a localrepository object (used to access changelog and manifest)
809 :repo: a localrepository object (used to access changelog and manifest)
810 :path: the file path
810 :path: the file path
811 :fnode: the nodeid of the file revision
811 :fnode: the nodeid of the file revision
812 :filelog: the filelog of this path
812 :filelog: the filelog of this path
813 :srcrev: the changeset revision we search ancestors from
813 :srcrev: the changeset revision we search ancestors from
814 :inclusive: if true, the src revision will also be checked
814 :inclusive: if true, the src revision will also be checked
815 """
815 """
816 repo = self._repo
816 repo = self._repo
817 cl = repo.unfiltered().changelog
817 cl = repo.unfiltered().changelog
818 ma = repo.manifest
818 ma = repo.manifest
819 # fetch the linkrev
819 # fetch the linkrev
820 fr = filelog.rev(fnode)
820 fr = filelog.rev(fnode)
821 lkr = filelog.linkrev(fr)
821 lkr = filelog.linkrev(fr)
822 # hack to reuse ancestor computation when searching for renames
822 # hack to reuse ancestor computation when searching for renames
823 memberanc = getattr(self, '_ancestrycontext', None)
823 memberanc = getattr(self, '_ancestrycontext', None)
824 iteranc = None
824 iteranc = None
825 if srcrev is None:
825 if srcrev is None:
826 # wctx case, used by workingfilectx during mergecopy
826 # wctx case, used by workingfilectx during mergecopy
827 revs = [p.rev() for p in self._repo[None].parents()]
827 revs = [p.rev() for p in self._repo[None].parents()]
828 inclusive = True # we skipped the real (revless) source
828 inclusive = True # we skipped the real (revless) source
829 else:
829 else:
830 revs = [srcrev]
830 revs = [srcrev]
831 if memberanc is None:
831 if memberanc is None:
832 memberanc = iteranc = cl.ancestors(revs, lkr,
832 memberanc = iteranc = cl.ancestors(revs, lkr,
833 inclusive=inclusive)
833 inclusive=inclusive)
834 # check if this linkrev is an ancestor of srcrev
834 # check if this linkrev is an ancestor of srcrev
835 if lkr not in memberanc:
835 if lkr not in memberanc:
836 if iteranc is None:
836 if iteranc is None:
837 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
837 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
838 for a in iteranc:
838 for a in iteranc:
839 ac = cl.read(a) # get changeset data (we avoid object creation)
839 ac = cl.read(a) # get changeset data (we avoid object creation)
840 if path in ac[3]: # checking the 'files' field.
840 if path in ac[3]: # checking the 'files' field.
841 # The file has been touched, check if the content is
841 # The file has been touched, check if the content is
842 # similar to the one we search for.
842 # similar to the one we search for.
843 if fnode == ma.readfast(ac[0]).get(path):
843 if fnode == ma.readfast(ac[0]).get(path):
844 return a
844 return a
845 # In theory, we should never get out of that loop without a result.
845 # In theory, we should never get out of that loop without a result.
846 # But if manifest uses a buggy file revision (not children of the
846 # But if manifest uses a buggy file revision (not children of the
847 # one it replaces) we could. Such a buggy situation will likely
847 # one it replaces) we could. Such a buggy situation will likely
848 # result is crash somewhere else at to some point.
848 # result is crash somewhere else at to some point.
849 return lkr
849 return lkr
850
850
851 def introrev(self):
851 def introrev(self):
852 """return the rev of the changeset which introduced this file revision
852 """return the rev of the changeset which introduced this file revision
853
853
854 This method is different from linkrev because it take into account the
854 This method is different from linkrev because it take into account the
855 changeset the filectx was created from. It ensures the returned
855 changeset the filectx was created from. It ensures the returned
856 revision is one of its ancestors. This prevents bugs from
856 revision is one of its ancestors. This prevents bugs from
857 'linkrev-shadowing' when a file revision is used by multiple
857 'linkrev-shadowing' when a file revision is used by multiple
858 changesets.
858 changesets.
859 """
859 """
860 lkr = self.linkrev()
860 lkr = self.linkrev()
861 attrs = vars(self)
861 attrs = vars(self)
862 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
862 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
863 if noctx or self.rev() == lkr:
863 if noctx or self.rev() == lkr:
864 return self.linkrev()
864 return self.linkrev()
865 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
865 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
866 self.rev(), inclusive=True)
866 self.rev(), inclusive=True)
867
867
868 def _parentfilectx(self, path, fileid, filelog):
868 def _parentfilectx(self, path, fileid, filelog):
869 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
869 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
870 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
870 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
871 if '_changeid' in vars(self) or '_changectx' in vars(self):
871 if '_changeid' in vars(self) or '_changectx' in vars(self):
872 # If self is associated with a changeset (probably explicitly
872 # If self is associated with a changeset (probably explicitly
873 # fed), ensure the created filectx is associated with a
873 # fed), ensure the created filectx is associated with a
874 # changeset that is an ancestor of self.changectx.
874 # changeset that is an ancestor of self.changectx.
875 # This lets us later use _adjustlinkrev to get a correct link.
875 # This lets us later use _adjustlinkrev to get a correct link.
876 fctx._descendantrev = self.rev()
876 fctx._descendantrev = self.rev()
877 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
877 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
878 elif '_descendantrev' in vars(self):
878 elif '_descendantrev' in vars(self):
879 # Otherwise propagate _descendantrev if we have one associated.
879 # Otherwise propagate _descendantrev if we have one associated.
880 fctx._descendantrev = self._descendantrev
880 fctx._descendantrev = self._descendantrev
881 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
881 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
882 return fctx
882 return fctx
883
883
884 def parents(self):
884 def parents(self):
885 _path = self._path
885 _path = self._path
886 fl = self._filelog
886 fl = self._filelog
887 parents = self._filelog.parents(self._filenode)
887 parents = self._filelog.parents(self._filenode)
888 pl = [(_path, node, fl) for node in parents if node != nullid]
888 pl = [(_path, node, fl) for node in parents if node != nullid]
889
889
890 r = fl.renamed(self._filenode)
890 r = fl.renamed(self._filenode)
891 if r:
891 if r:
892 # - In the simple rename case, both parent are nullid, pl is empty.
892 # - In the simple rename case, both parent are nullid, pl is empty.
893 # - In case of merge, only one of the parent is null id and should
893 # - In case of merge, only one of the parent is null id and should
894 # be replaced with the rename information. This parent is -always-
894 # be replaced with the rename information. This parent is -always-
895 # the first one.
895 # the first one.
896 #
896 #
897 # As null id have always been filtered out in the previous list
897 # As null id have always been filtered out in the previous list
898 # comprehension, inserting to 0 will always result in "replacing
898 # comprehension, inserting to 0 will always result in "replacing
899 # first nullid parent with rename information.
899 # first nullid parent with rename information.
900 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
900 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
901
901
902 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
902 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
903
903
904 def p1(self):
904 def p1(self):
905 return self.parents()[0]
905 return self.parents()[0]
906
906
907 def p2(self):
907 def p2(self):
908 p = self.parents()
908 p = self.parents()
909 if len(p) == 2:
909 if len(p) == 2:
910 return p[1]
910 return p[1]
911 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
911 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
912
912
913 def annotate(self, follow=False, linenumber=None, diffopts=None):
913 def annotate(self, follow=False, linenumber=None, diffopts=None):
914 '''returns a list of tuples of (ctx, line) for each line
914 '''returns a list of tuples of (ctx, line) for each line
915 in the file, where ctx is the filectx of the node where
915 in the file, where ctx is the filectx of the node where
916 that line was last changed.
916 that line was last changed.
917 This returns tuples of ((ctx, linenumber), line) for each line,
917 This returns tuples of ((ctx, linenumber), line) for each line,
918 if "linenumber" parameter is NOT "None".
918 if "linenumber" parameter is NOT "None".
919 In such tuples, linenumber means one at the first appearance
919 In such tuples, linenumber means one at the first appearance
920 in the managed file.
920 in the managed file.
921 To reduce annotation cost,
921 To reduce annotation cost,
922 this returns fixed value(False is used) as linenumber,
922 this returns fixed value(False is used) as linenumber,
923 if "linenumber" parameter is "False".'''
923 if "linenumber" parameter is "False".'''
924
924
925 if linenumber is None:
925 if linenumber is None:
926 def decorate(text, rev):
926 def decorate(text, rev):
927 return ([rev] * len(text.splitlines()), text)
927 return ([rev] * len(text.splitlines()), text)
928 elif linenumber:
928 elif linenumber:
929 def decorate(text, rev):
929 def decorate(text, rev):
930 size = len(text.splitlines())
930 size = len(text.splitlines())
931 return ([(rev, i) for i in xrange(1, size + 1)], text)
931 return ([(rev, i) for i in xrange(1, size + 1)], text)
932 else:
932 else:
933 def decorate(text, rev):
933 def decorate(text, rev):
934 return ([(rev, False)] * len(text.splitlines()), text)
934 return ([(rev, False)] * len(text.splitlines()), text)
935
935
936 def pair(parent, child):
936 def pair(parent, child):
937 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
937 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
938 refine=True)
938 refine=True)
939 for (a1, a2, b1, b2), t in blocks:
939 for (a1, a2, b1, b2), t in blocks:
940 # Changed blocks ('!') or blocks made only of blank lines ('~')
940 # Changed blocks ('!') or blocks made only of blank lines ('~')
941 # belong to the child.
941 # belong to the child.
942 if t == '=':
942 if t == '=':
943 child[0][b1:b2] = parent[0][a1:a2]
943 child[0][b1:b2] = parent[0][a1:a2]
944 return child
944 return child
945
945
946 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
946 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
947
947
948 def parents(f):
948 def parents(f):
949 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
949 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
950 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
950 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
951 # from the topmost introrev (= srcrev) down to p.linkrev() if it
951 # from the topmost introrev (= srcrev) down to p.linkrev() if it
952 # isn't an ancestor of the srcrev.
952 # isn't an ancestor of the srcrev.
953 f._changeid
953 f._changeid
954 pl = f.parents()
954 pl = f.parents()
955
955
956 # Don't return renamed parents if we aren't following.
956 # Don't return renamed parents if we aren't following.
957 if not follow:
957 if not follow:
958 pl = [p for p in pl if p.path() == f.path()]
958 pl = [p for p in pl if p.path() == f.path()]
959
959
960 # renamed filectx won't have a filelog yet, so set it
960 # renamed filectx won't have a filelog yet, so set it
961 # from the cache to save time
961 # from the cache to save time
962 for p in pl:
962 for p in pl:
963 if not '_filelog' in p.__dict__:
963 if not '_filelog' in p.__dict__:
964 p._filelog = getlog(p.path())
964 p._filelog = getlog(p.path())
965
965
966 return pl
966 return pl
967
967
968 # use linkrev to find the first changeset where self appeared
968 # use linkrev to find the first changeset where self appeared
969 base = self
969 base = self
970 introrev = self.introrev()
970 introrev = self.introrev()
971 if self.rev() != introrev:
971 if self.rev() != introrev:
972 base = self.filectx(self.filenode(), changeid=introrev)
972 base = self.filectx(self.filenode(), changeid=introrev)
973 if getattr(base, '_ancestrycontext', None) is None:
973 if getattr(base, '_ancestrycontext', None) is None:
974 cl = self._repo.changelog
974 cl = self._repo.changelog
975 if introrev is None:
975 if introrev is None:
976 # wctx is not inclusive, but works because _ancestrycontext
976 # wctx is not inclusive, but works because _ancestrycontext
977 # is used to test filelog revisions
977 # is used to test filelog revisions
978 ac = cl.ancestors([p.rev() for p in base.parents()],
978 ac = cl.ancestors([p.rev() for p in base.parents()],
979 inclusive=True)
979 inclusive=True)
980 else:
980 else:
981 ac = cl.ancestors([introrev], inclusive=True)
981 ac = cl.ancestors([introrev], inclusive=True)
982 base._ancestrycontext = ac
982 base._ancestrycontext = ac
983
983
984 # This algorithm would prefer to be recursive, but Python is a
984 # This algorithm would prefer to be recursive, but Python is a
985 # bit recursion-hostile. Instead we do an iterative
985 # bit recursion-hostile. Instead we do an iterative
986 # depth-first search.
986 # depth-first search.
987
987
988 visit = [base]
988 visit = [base]
989 hist = {}
989 hist = {}
990 pcache = {}
990 pcache = {}
991 needed = {base: 1}
991 needed = {base: 1}
992 while visit:
992 while visit:
993 f = visit[-1]
993 f = visit[-1]
994 pcached = f in pcache
994 pcached = f in pcache
995 if not pcached:
995 if not pcached:
996 pcache[f] = parents(f)
996 pcache[f] = parents(f)
997
997
998 ready = True
998 ready = True
999 pl = pcache[f]
999 pl = pcache[f]
1000 for p in pl:
1000 for p in pl:
1001 if p not in hist:
1001 if p not in hist:
1002 ready = False
1002 ready = False
1003 visit.append(p)
1003 visit.append(p)
1004 if not pcached:
1004 if not pcached:
1005 needed[p] = needed.get(p, 0) + 1
1005 needed[p] = needed.get(p, 0) + 1
1006 if ready:
1006 if ready:
1007 visit.pop()
1007 visit.pop()
1008 reusable = f in hist
1008 reusable = f in hist
1009 if reusable:
1009 if reusable:
1010 curr = hist[f]
1010 curr = hist[f]
1011 else:
1011 else:
1012 curr = decorate(f.data(), f)
1012 curr = decorate(f.data(), f)
1013 for p in pl:
1013 for p in pl:
1014 if not reusable:
1014 if not reusable:
1015 curr = pair(hist[p], curr)
1015 curr = pair(hist[p], curr)
1016 if needed[p] == 1:
1016 if needed[p] == 1:
1017 del hist[p]
1017 del hist[p]
1018 del needed[p]
1018 del needed[p]
1019 else:
1019 else:
1020 needed[p] -= 1
1020 needed[p] -= 1
1021
1021
1022 hist[f] = curr
1022 hist[f] = curr
1023 pcache[f] = []
1023 pcache[f] = []
1024
1024
1025 return zip(hist[base][0], hist[base][1].splitlines(True))
1025 return zip(hist[base][0], hist[base][1].splitlines(True))
1026
1026
1027 def ancestors(self, followfirst=False):
1027 def ancestors(self, followfirst=False):
1028 visit = {}
1028 visit = {}
1029 c = self
1029 c = self
1030 if followfirst:
1030 if followfirst:
1031 cut = 1
1031 cut = 1
1032 else:
1032 else:
1033 cut = None
1033 cut = None
1034
1034
1035 while True:
1035 while True:
1036 for parent in c.parents()[:cut]:
1036 for parent in c.parents()[:cut]:
1037 visit[(parent.linkrev(), parent.filenode())] = parent
1037 visit[(parent.linkrev(), parent.filenode())] = parent
1038 if not visit:
1038 if not visit:
1039 break
1039 break
1040 c = visit.pop(max(visit))
1040 c = visit.pop(max(visit))
1041 yield c
1041 yield c
1042
1042
1043 class filectx(basefilectx):
1043 class filectx(basefilectx):
1044 """A filecontext object makes access to data related to a particular
1044 """A filecontext object makes access to data related to a particular
1045 filerevision convenient."""
1045 filerevision convenient."""
1046 def __init__(self, repo, path, changeid=None, fileid=None,
1046 def __init__(self, repo, path, changeid=None, fileid=None,
1047 filelog=None, changectx=None):
1047 filelog=None, changectx=None):
1048 """changeid can be a changeset revision, node, or tag.
1048 """changeid can be a changeset revision, node, or tag.
1049 fileid can be a file revision or node."""
1049 fileid can be a file revision or node."""
1050 self._repo = repo
1050 self._repo = repo
1051 self._path = path
1051 self._path = path
1052
1052
1053 assert (changeid is not None
1053 assert (changeid is not None
1054 or fileid is not None
1054 or fileid is not None
1055 or changectx is not None), \
1055 or changectx is not None), \
1056 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1056 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1057 % (changeid, fileid, changectx))
1057 % (changeid, fileid, changectx))
1058
1058
1059 if filelog is not None:
1059 if filelog is not None:
1060 self._filelog = filelog
1060 self._filelog = filelog
1061
1061
1062 if changeid is not None:
1062 if changeid is not None:
1063 self._changeid = changeid
1063 self._changeid = changeid
1064 if changectx is not None:
1064 if changectx is not None:
1065 self._changectx = changectx
1065 self._changectx = changectx
1066 if fileid is not None:
1066 if fileid is not None:
1067 self._fileid = fileid
1067 self._fileid = fileid
1068
1068
1069 @propertycache
1069 @propertycache
1070 def _changectx(self):
1070 def _changectx(self):
1071 try:
1071 try:
1072 return changectx(self._repo, self._changeid)
1072 return changectx(self._repo, self._changeid)
1073 except error.FilteredRepoLookupError:
1073 except error.FilteredRepoLookupError:
1074 # Linkrev may point to any revision in the repository. When the
1074 # Linkrev may point to any revision in the repository. When the
1075 # repository is filtered this may lead to `filectx` trying to build
1075 # repository is filtered this may lead to `filectx` trying to build
1076 # `changectx` for filtered revision. In such case we fallback to
1076 # `changectx` for filtered revision. In such case we fallback to
1077 # creating `changectx` on the unfiltered version of the reposition.
1077 # creating `changectx` on the unfiltered version of the reposition.
1078 # This fallback should not be an issue because `changectx` from
1078 # This fallback should not be an issue because `changectx` from
1079 # `filectx` are not used in complex operations that care about
1079 # `filectx` are not used in complex operations that care about
1080 # filtering.
1080 # filtering.
1081 #
1081 #
1082 # This fallback is a cheap and dirty fix that prevent several
1082 # This fallback is a cheap and dirty fix that prevent several
1083 # crashes. It does not ensure the behavior is correct. However the
1083 # crashes. It does not ensure the behavior is correct. However the
1084 # behavior was not correct before filtering either and "incorrect
1084 # behavior was not correct before filtering either and "incorrect
1085 # behavior" is seen as better as "crash"
1085 # behavior" is seen as better as "crash"
1086 #
1086 #
1087 # Linkrevs have several serious troubles with filtering that are
1087 # Linkrevs have several serious troubles with filtering that are
1088 # complicated to solve. Proper handling of the issue here should be
1088 # complicated to solve. Proper handling of the issue here should be
1089 # considered when solving linkrev issue are on the table.
1089 # considered when solving linkrev issue are on the table.
1090 return changectx(self._repo.unfiltered(), self._changeid)
1090 return changectx(self._repo.unfiltered(), self._changeid)
1091
1091
1092 def filectx(self, fileid, changeid=None):
1092 def filectx(self, fileid, changeid=None):
1093 '''opens an arbitrary revision of the file without
1093 '''opens an arbitrary revision of the file without
1094 opening a new filelog'''
1094 opening a new filelog'''
1095 return filectx(self._repo, self._path, fileid=fileid,
1095 return filectx(self._repo, self._path, fileid=fileid,
1096 filelog=self._filelog, changeid=changeid)
1096 filelog=self._filelog, changeid=changeid)
1097
1097
1098 def data(self):
1098 def data(self):
1099 try:
1099 try:
1100 return self._filelog.read(self._filenode)
1100 return self._filelog.read(self._filenode)
1101 except error.CensoredNodeError:
1101 except error.CensoredNodeError:
1102 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1102 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1103 return ""
1103 return ""
1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1105 hint=_("set censor.policy to ignore errors"))
1105 hint=_("set censor.policy to ignore errors"))
1106
1106
1107 def size(self):
1107 def size(self):
1108 return self._filelog.size(self._filerev)
1108 return self._filelog.size(self._filerev)
1109
1109
1110 def renamed(self):
1110 def renamed(self):
1111 """check if file was actually renamed in this changeset revision
1111 """check if file was actually renamed in this changeset revision
1112
1112
1113 If rename logged in file revision, we report copy for changeset only
1113 If rename logged in file revision, we report copy for changeset only
1114 if file revisions linkrev points back to the changeset in question
1114 if file revisions linkrev points back to the changeset in question
1115 or both changeset parents contain different file revisions.
1115 or both changeset parents contain different file revisions.
1116 """
1116 """
1117
1117
1118 renamed = self._filelog.renamed(self._filenode)
1118 renamed = self._filelog.renamed(self._filenode)
1119 if not renamed:
1119 if not renamed:
1120 return renamed
1120 return renamed
1121
1121
1122 if self.rev() == self.linkrev():
1122 if self.rev() == self.linkrev():
1123 return renamed
1123 return renamed
1124
1124
1125 name = self.path()
1125 name = self.path()
1126 fnode = self._filenode
1126 fnode = self._filenode
1127 for p in self._changectx.parents():
1127 for p in self._changectx.parents():
1128 try:
1128 try:
1129 if fnode == p.filenode(name):
1129 if fnode == p.filenode(name):
1130 return None
1130 return None
1131 except error.LookupError:
1131 except error.LookupError:
1132 pass
1132 pass
1133 return renamed
1133 return renamed
1134
1134
1135 def children(self):
1135 def children(self):
1136 # hard for renames
1136 # hard for renames
1137 c = self._filelog.children(self._filenode)
1137 c = self._filelog.children(self._filenode)
1138 return [filectx(self._repo, self._path, fileid=x,
1138 return [filectx(self._repo, self._path, fileid=x,
1139 filelog=self._filelog) for x in c]
1139 filelog=self._filelog) for x in c]
1140
1140
1141 class committablectx(basectx):
1141 class committablectx(basectx):
1142 """A committablectx object provides common functionality for a context that
1142 """A committablectx object provides common functionality for a context that
1143 wants the ability to commit, e.g. workingctx or memctx."""
1143 wants the ability to commit, e.g. workingctx or memctx."""
1144 def __init__(self, repo, text="", user=None, date=None, extra=None,
1144 def __init__(self, repo, text="", user=None, date=None, extra=None,
1145 changes=None):
1145 changes=None):
1146 self._repo = repo
1146 self._repo = repo
1147 self._rev = None
1147 self._rev = None
1148 self._node = None
1148 self._node = None
1149 self._text = text
1149 self._text = text
1150 if date:
1150 if date:
1151 self._date = util.parsedate(date)
1151 self._date = util.parsedate(date)
1152 if user:
1152 if user:
1153 self._user = user
1153 self._user = user
1154 if changes:
1154 if changes:
1155 self._status = changes
1155 self._status = changes
1156
1156
1157 self._extra = {}
1157 self._extra = {}
1158 if extra:
1158 if extra:
1159 self._extra = extra.copy()
1159 self._extra = extra.copy()
1160 if 'branch' not in self._extra:
1160 if 'branch' not in self._extra:
1161 try:
1161 try:
1162 branch = encoding.fromlocal(self._repo.dirstate.branch())
1162 branch = encoding.fromlocal(self._repo.dirstate.branch())
1163 except UnicodeDecodeError:
1163 except UnicodeDecodeError:
1164 raise error.Abort(_('branch name not in UTF-8!'))
1164 raise error.Abort(_('branch name not in UTF-8!'))
1165 self._extra['branch'] = branch
1165 self._extra['branch'] = branch
1166 if self._extra['branch'] == '':
1166 if self._extra['branch'] == '':
1167 self._extra['branch'] = 'default'
1167 self._extra['branch'] = 'default'
1168
1168
1169 def __str__(self):
1169 def __str__(self):
1170 return str(self._parents[0]) + "+"
1170 return str(self._parents[0]) + "+"
1171
1171
1172 def __nonzero__(self):
1172 def __nonzero__(self):
1173 return True
1173 return True
1174
1174
1175 def _buildflagfunc(self):
1175 def _buildflagfunc(self):
1176 # Create a fallback function for getting file flags when the
1176 # Create a fallback function for getting file flags when the
1177 # filesystem doesn't support them
1177 # filesystem doesn't support them
1178
1178
1179 copiesget = self._repo.dirstate.copies().get
1179 copiesget = self._repo.dirstate.copies().get
1180 parents = self.parents()
1180 parents = self.parents()
1181 if len(parents) < 2:
1181 if len(parents) < 2:
1182 # when we have one parent, it's easy: copy from parent
1182 # when we have one parent, it's easy: copy from parent
1183 man = parents[0].manifest()
1183 man = parents[0].manifest()
1184 def func(f):
1184 def func(f):
1185 f = copiesget(f, f)
1185 f = copiesget(f, f)
1186 return man.flags(f)
1186 return man.flags(f)
1187 else:
1187 else:
1188 # merges are tricky: we try to reconstruct the unstored
1188 # merges are tricky: we try to reconstruct the unstored
1189 # result from the merge (issue1802)
1189 # result from the merge (issue1802)
1190 p1, p2 = parents
1190 p1, p2 = parents
1191 pa = p1.ancestor(p2)
1191 pa = p1.ancestor(p2)
1192 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1192 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1193
1193
1194 def func(f):
1194 def func(f):
1195 f = copiesget(f, f) # may be wrong for merges with copies
1195 f = copiesget(f, f) # may be wrong for merges with copies
1196 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1196 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1197 if fl1 == fl2:
1197 if fl1 == fl2:
1198 return fl1
1198 return fl1
1199 if fl1 == fla:
1199 if fl1 == fla:
1200 return fl2
1200 return fl2
1201 if fl2 == fla:
1201 if fl2 == fla:
1202 return fl1
1202 return fl1
1203 return '' # punt for conflicts
1203 return '' # punt for conflicts
1204
1204
1205 return func
1205 return func
1206
1206
1207 @propertycache
1207 @propertycache
1208 def _flagfunc(self):
1208 def _flagfunc(self):
1209 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1209 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1210
1210
1211 @propertycache
1211 @propertycache
1212 def _manifest(self):
1212 def _manifest(self):
1213 """generate a manifest corresponding to the values in self._status
1213 """generate a manifest corresponding to the values in self._status
1214
1214
1215 This reuse the file nodeid from parent, but we append an extra letter
1215 This reuse the file nodeid from parent, but we append an extra letter
1216 when modified. Modified files get an extra 'm' while added files get
1216 when modified. Modified files get an extra 'm' while added files get
1217 an extra 'a'. This is used by manifests merge to see that files
1217 an extra 'a'. This is used by manifests merge to see that files
1218 are different and by update logic to avoid deleting newly added files.
1218 are different and by update logic to avoid deleting newly added files.
1219 """
1219 """
1220 parents = self.parents()
1220 parents = self.parents()
1221
1221
1222 man1 = parents[0].manifest()
1222 man1 = parents[0].manifest()
1223 man = man1.copy()
1223 man = man1.copy()
1224 if len(parents) > 1:
1224 if len(parents) > 1:
1225 man2 = self.p2().manifest()
1225 man2 = self.p2().manifest()
1226 def getman(f):
1226 def getman(f):
1227 if f in man1:
1227 if f in man1:
1228 return man1
1228 return man1
1229 return man2
1229 return man2
1230 else:
1230 else:
1231 getman = lambda f: man1
1231 getman = lambda f: man1
1232
1232
1233 copied = self._repo.dirstate.copies()
1233 copied = self._repo.dirstate.copies()
1234 ff = self._flagfunc
1234 ff = self._flagfunc
1235 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1235 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1236 for f in l:
1236 for f in l:
1237 orig = copied.get(f, f)
1237 orig = copied.get(f, f)
1238 man[f] = getman(orig).get(orig, nullid) + i
1238 man[f] = getman(orig).get(orig, nullid) + i
1239 try:
1239 try:
1240 man.setflag(f, ff(f))
1240 man.setflag(f, ff(f))
1241 except OSError:
1241 except OSError:
1242 pass
1242 pass
1243
1243
1244 for f in self._status.deleted + self._status.removed:
1244 for f in self._status.deleted + self._status.removed:
1245 if f in man:
1245 if f in man:
1246 del man[f]
1246 del man[f]
1247
1247
1248 return man
1248 return man
1249
1249
1250 @propertycache
1250 @propertycache
1251 def _status(self):
1251 def _status(self):
1252 return self._repo.status()
1252 return self._repo.status()
1253
1253
1254 @propertycache
1254 @propertycache
1255 def _user(self):
1255 def _user(self):
1256 return self._repo.ui.username()
1256 return self._repo.ui.username()
1257
1257
1258 @propertycache
1258 @propertycache
1259 def _date(self):
1259 def _date(self):
1260 return util.makedate()
1260 return util.makedate()
1261
1261
1262 def subrev(self, subpath):
1262 def subrev(self, subpath):
1263 return None
1263 return None
1264
1264
1265 def manifestnode(self):
1265 def manifestnode(self):
1266 return None
1266 return None
1267 def user(self):
1267 def user(self):
1268 return self._user or self._repo.ui.username()
1268 return self._user or self._repo.ui.username()
1269 def date(self):
1269 def date(self):
1270 return self._date
1270 return self._date
1271 def description(self):
1271 def description(self):
1272 return self._text
1272 return self._text
1273 def files(self):
1273 def files(self):
1274 return sorted(self._status.modified + self._status.added +
1274 return sorted(self._status.modified + self._status.added +
1275 self._status.removed)
1275 self._status.removed)
1276
1276
1277 def modified(self):
1277 def modified(self):
1278 return self._status.modified
1278 return self._status.modified
1279 def added(self):
1279 def added(self):
1280 return self._status.added
1280 return self._status.added
1281 def removed(self):
1281 def removed(self):
1282 return self._status.removed
1282 return self._status.removed
1283 def deleted(self):
1283 def deleted(self):
1284 return self._status.deleted
1284 return self._status.deleted
1285 def branch(self):
1285 def branch(self):
1286 return encoding.tolocal(self._extra['branch'])
1286 return encoding.tolocal(self._extra['branch'])
1287 def closesbranch(self):
1287 def closesbranch(self):
1288 return 'close' in self._extra
1288 return 'close' in self._extra
1289 def extra(self):
1289 def extra(self):
1290 return self._extra
1290 return self._extra
1291
1291
1292 def tags(self):
1292 def tags(self):
1293 return []
1293 return []
1294
1294
1295 def bookmarks(self):
1295 def bookmarks(self):
1296 b = []
1296 b = []
1297 for p in self.parents():
1297 for p in self.parents():
1298 b.extend(p.bookmarks())
1298 b.extend(p.bookmarks())
1299 return b
1299 return b
1300
1300
1301 def phase(self):
1301 def phase(self):
1302 phase = phases.draft # default phase to draft
1302 phase = phases.draft # default phase to draft
1303 for p in self.parents():
1303 for p in self.parents():
1304 phase = max(phase, p.phase())
1304 phase = max(phase, p.phase())
1305 return phase
1305 return phase
1306
1306
1307 def hidden(self):
1307 def hidden(self):
1308 return False
1308 return False
1309
1309
1310 def children(self):
1310 def children(self):
1311 return []
1311 return []
1312
1312
1313 def flags(self, path):
1313 def flags(self, path):
1314 if '_manifest' in self.__dict__:
1314 if '_manifest' in self.__dict__:
1315 try:
1315 try:
1316 return self._manifest.flags(path)
1316 return self._manifest.flags(path)
1317 except KeyError:
1317 except KeyError:
1318 return ''
1318 return ''
1319
1319
1320 try:
1320 try:
1321 return self._flagfunc(path)
1321 return self._flagfunc(path)
1322 except OSError:
1322 except OSError:
1323 return ''
1323 return ''
1324
1324
1325 def ancestor(self, c2):
1325 def ancestor(self, c2):
1326 """return the "best" ancestor context of self and c2"""
1326 """return the "best" ancestor context of self and c2"""
1327 return self._parents[0].ancestor(c2) # punt on two parents for now
1327 return self._parents[0].ancestor(c2) # punt on two parents for now
1328
1328
1329 def walk(self, match):
1329 def walk(self, match):
1330 '''Generates matching file names.'''
1330 '''Generates matching file names.'''
1331 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1331 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1332 True, False))
1332 True, False))
1333
1333
1334 def matches(self, match):
1334 def matches(self, match):
1335 return sorted(self._repo.dirstate.matches(match))
1335 return sorted(self._repo.dirstate.matches(match))
1336
1336
1337 def ancestors(self):
1337 def ancestors(self):
1338 for p in self._parents:
1338 for p in self._parents:
1339 yield p
1339 yield p
1340 for a in self._repo.changelog.ancestors(
1340 for a in self._repo.changelog.ancestors(
1341 [p.rev() for p in self._parents]):
1341 [p.rev() for p in self._parents]):
1342 yield changectx(self._repo, a)
1342 yield changectx(self._repo, a)
1343
1343
1344 def markcommitted(self, node):
1344 def markcommitted(self, node):
1345 """Perform post-commit cleanup necessary after committing this ctx
1345 """Perform post-commit cleanup necessary after committing this ctx
1346
1346
1347 Specifically, this updates backing stores this working context
1347 Specifically, this updates backing stores this working context
1348 wraps to reflect the fact that the changes reflected by this
1348 wraps to reflect the fact that the changes reflected by this
1349 workingctx have been committed. For example, it marks
1349 workingctx have been committed. For example, it marks
1350 modified and added files as normal in the dirstate.
1350 modified and added files as normal in the dirstate.
1351
1351
1352 """
1352 """
1353
1353
1354 self._repo.dirstate.beginparentchange()
1354 self._repo.dirstate.beginparentchange()
1355 for f in self.modified() + self.added():
1355 for f in self.modified() + self.added():
1356 self._repo.dirstate.normal(f)
1356 self._repo.dirstate.normal(f)
1357 for f in self.removed():
1357 for f in self.removed():
1358 self._repo.dirstate.drop(f)
1358 self._repo.dirstate.drop(f)
1359 self._repo.dirstate.setparents(node)
1359 self._repo.dirstate.setparents(node)
1360 self._repo.dirstate.endparentchange()
1360 self._repo.dirstate.endparentchange()
1361
1361
1362 # write changes out explicitly, because nesting wlock at
1362 # write changes out explicitly, because nesting wlock at
1363 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1363 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1364 # from immediately doing so for subsequent changing files
1364 # from immediately doing so for subsequent changing files
1365 self._repo.dirstate.write(self._repo.currenttransaction())
1365 self._repo.dirstate.write(self._repo.currenttransaction())
1366
1366
1367 class workingctx(committablectx):
1367 class workingctx(committablectx):
1368 """A workingctx object makes access to data related to
1368 """A workingctx object makes access to data related to
1369 the current working directory convenient.
1369 the current working directory convenient.
1370 date - any valid date string or (unixtime, offset), or None.
1370 date - any valid date string or (unixtime, offset), or None.
1371 user - username string, or None.
1371 user - username string, or None.
1372 extra - a dictionary of extra values, or None.
1372 extra - a dictionary of extra values, or None.
1373 changes - a list of file lists as returned by localrepo.status()
1373 changes - a list of file lists as returned by localrepo.status()
1374 or None to use the repository status.
1374 or None to use the repository status.
1375 """
1375 """
1376 def __init__(self, repo, text="", user=None, date=None, extra=None,
1376 def __init__(self, repo, text="", user=None, date=None, extra=None,
1377 changes=None):
1377 changes=None):
1378 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1378 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1379
1379
1380 def __iter__(self):
1380 def __iter__(self):
1381 d = self._repo.dirstate
1381 d = self._repo.dirstate
1382 for f in d:
1382 for f in d:
1383 if d[f] != 'r':
1383 if d[f] != 'r':
1384 yield f
1384 yield f
1385
1385
1386 def __contains__(self, key):
1386 def __contains__(self, key):
1387 return self._repo.dirstate[key] not in "?r"
1387 return self._repo.dirstate[key] not in "?r"
1388
1388
1389 def hex(self):
1389 def hex(self):
1390 return hex(wdirid)
1390 return hex(wdirid)
1391
1391
1392 @propertycache
1392 @propertycache
1393 def _parents(self):
1393 def _parents(self):
1394 p = self._repo.dirstate.parents()
1394 p = self._repo.dirstate.parents()
1395 if p[1] == nullid:
1395 if p[1] == nullid:
1396 p = p[:-1]
1396 p = p[:-1]
1397 return [changectx(self._repo, x) for x in p]
1397 return [changectx(self._repo, x) for x in p]
1398
1398
1399 def filectx(self, path, filelog=None):
1399 def filectx(self, path, filelog=None):
1400 """get a file context from the working directory"""
1400 """get a file context from the working directory"""
1401 return workingfilectx(self._repo, path, workingctx=self,
1401 return workingfilectx(self._repo, path, workingctx=self,
1402 filelog=filelog)
1402 filelog=filelog)
1403
1403
1404 def dirty(self, missing=False, merge=True, branch=True):
1404 def dirty(self, missing=False, merge=True, branch=True):
1405 "check whether a working directory is modified"
1405 "check whether a working directory is modified"
1406 # check subrepos first
1406 # check subrepos first
1407 for s in sorted(self.substate):
1407 for s in sorted(self.substate):
1408 if self.sub(s).dirty():
1408 if self.sub(s).dirty():
1409 return True
1409 return True
1410 # check current working dir
1410 # check current working dir
1411 return ((merge and self.p2()) or
1411 return ((merge and self.p2()) or
1412 (branch and self.branch() != self.p1().branch()) or
1412 (branch and self.branch() != self.p1().branch()) or
1413 self.modified() or self.added() or self.removed() or
1413 self.modified() or self.added() or self.removed() or
1414 (missing and self.deleted()))
1414 (missing and self.deleted()))
1415
1415
1416 def add(self, list, prefix=""):
1416 def add(self, list, prefix=""):
1417 join = lambda f: os.path.join(prefix, f)
1417 join = lambda f: os.path.join(prefix, f)
1418 with self._repo.wlock():
1418 with self._repo.wlock():
1419 ui, ds = self._repo.ui, self._repo.dirstate
1419 ui, ds = self._repo.ui, self._repo.dirstate
1420 rejected = []
1420 rejected = []
1421 lstat = self._repo.wvfs.lstat
1421 lstat = self._repo.wvfs.lstat
1422 for f in list:
1422 for f in list:
1423 scmutil.checkportable(ui, join(f))
1423 scmutil.checkportable(ui, join(f))
1424 try:
1424 try:
1425 st = lstat(f)
1425 st = lstat(f)
1426 except OSError:
1426 except OSError:
1427 ui.warn(_("%s does not exist!\n") % join(f))
1427 ui.warn(_("%s does not exist!\n") % join(f))
1428 rejected.append(f)
1428 rejected.append(f)
1429 continue
1429 continue
1430 if st.st_size > 10000000:
1430 if st.st_size > 10000000:
1431 ui.warn(_("%s: up to %d MB of RAM may be required "
1431 ui.warn(_("%s: up to %d MB of RAM may be required "
1432 "to manage this file\n"
1432 "to manage this file\n"
1433 "(use 'hg revert %s' to cancel the "
1433 "(use 'hg revert %s' to cancel the "
1434 "pending addition)\n")
1434 "pending addition)\n")
1435 % (f, 3 * st.st_size // 1000000, join(f)))
1435 % (f, 3 * st.st_size // 1000000, join(f)))
1436 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1436 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1437 ui.warn(_("%s not added: only files and symlinks "
1437 ui.warn(_("%s not added: only files and symlinks "
1438 "supported currently\n") % join(f))
1438 "supported currently\n") % join(f))
1439 rejected.append(f)
1439 rejected.append(f)
1440 elif ds[f] in 'amn':
1440 elif ds[f] in 'amn':
1441 ui.warn(_("%s already tracked!\n") % join(f))
1441 ui.warn(_("%s already tracked!\n") % join(f))
1442 elif ds[f] == 'r':
1442 elif ds[f] == 'r':
1443 ds.normallookup(f)
1443 ds.normallookup(f)
1444 else:
1444 else:
1445 ds.add(f)
1445 ds.add(f)
1446 return rejected
1446 return rejected
1447
1447
1448 def forget(self, files, prefix=""):
1448 def forget(self, files, prefix=""):
1449 join = lambda f: os.path.join(prefix, f)
1449 join = lambda f: os.path.join(prefix, f)
1450 wlock = self._repo.wlock()
1450 with self._repo.wlock():
1451 try:
1452 rejected = []
1451 rejected = []
1453 for f in files:
1452 for f in files:
1454 if f not in self._repo.dirstate:
1453 if f not in self._repo.dirstate:
1455 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1454 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1456 rejected.append(f)
1455 rejected.append(f)
1457 elif self._repo.dirstate[f] != 'a':
1456 elif self._repo.dirstate[f] != 'a':
1458 self._repo.dirstate.remove(f)
1457 self._repo.dirstate.remove(f)
1459 else:
1458 else:
1460 self._repo.dirstate.drop(f)
1459 self._repo.dirstate.drop(f)
1461 return rejected
1460 return rejected
1462 finally:
1463 wlock.release()
1464
1461
1465 def undelete(self, list):
1462 def undelete(self, list):
1466 pctxs = self.parents()
1463 pctxs = self.parents()
1467 wlock = self._repo.wlock()
1464 wlock = self._repo.wlock()
1468 try:
1465 try:
1469 for f in list:
1466 for f in list:
1470 if self._repo.dirstate[f] != 'r':
1467 if self._repo.dirstate[f] != 'r':
1471 self._repo.ui.warn(_("%s not removed!\n") % f)
1468 self._repo.ui.warn(_("%s not removed!\n") % f)
1472 else:
1469 else:
1473 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1470 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1474 t = fctx.data()
1471 t = fctx.data()
1475 self._repo.wwrite(f, t, fctx.flags())
1472 self._repo.wwrite(f, t, fctx.flags())
1476 self._repo.dirstate.normal(f)
1473 self._repo.dirstate.normal(f)
1477 finally:
1474 finally:
1478 wlock.release()
1475 wlock.release()
1479
1476
1480 def copy(self, source, dest):
1477 def copy(self, source, dest):
1481 try:
1478 try:
1482 st = self._repo.wvfs.lstat(dest)
1479 st = self._repo.wvfs.lstat(dest)
1483 except OSError as err:
1480 except OSError as err:
1484 if err.errno != errno.ENOENT:
1481 if err.errno != errno.ENOENT:
1485 raise
1482 raise
1486 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1483 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1487 return
1484 return
1488 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1485 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1489 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1486 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1490 "symbolic link\n") % dest)
1487 "symbolic link\n") % dest)
1491 else:
1488 else:
1492 wlock = self._repo.wlock()
1489 wlock = self._repo.wlock()
1493 try:
1490 try:
1494 if self._repo.dirstate[dest] in '?':
1491 if self._repo.dirstate[dest] in '?':
1495 self._repo.dirstate.add(dest)
1492 self._repo.dirstate.add(dest)
1496 elif self._repo.dirstate[dest] in 'r':
1493 elif self._repo.dirstate[dest] in 'r':
1497 self._repo.dirstate.normallookup(dest)
1494 self._repo.dirstate.normallookup(dest)
1498 self._repo.dirstate.copy(source, dest)
1495 self._repo.dirstate.copy(source, dest)
1499 finally:
1496 finally:
1500 wlock.release()
1497 wlock.release()
1501
1498
1502 def match(self, pats=[], include=None, exclude=None, default='glob',
1499 def match(self, pats=[], include=None, exclude=None, default='glob',
1503 listsubrepos=False, badfn=None):
1500 listsubrepos=False, badfn=None):
1504 r = self._repo
1501 r = self._repo
1505
1502
1506 # Only a case insensitive filesystem needs magic to translate user input
1503 # Only a case insensitive filesystem needs magic to translate user input
1507 # to actual case in the filesystem.
1504 # to actual case in the filesystem.
1508 if not util.checkcase(r.root):
1505 if not util.checkcase(r.root):
1509 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1506 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1510 exclude, default, r.auditor, self,
1507 exclude, default, r.auditor, self,
1511 listsubrepos=listsubrepos,
1508 listsubrepos=listsubrepos,
1512 badfn=badfn)
1509 badfn=badfn)
1513 return matchmod.match(r.root, r.getcwd(), pats,
1510 return matchmod.match(r.root, r.getcwd(), pats,
1514 include, exclude, default,
1511 include, exclude, default,
1515 auditor=r.auditor, ctx=self,
1512 auditor=r.auditor, ctx=self,
1516 listsubrepos=listsubrepos, badfn=badfn)
1513 listsubrepos=listsubrepos, badfn=badfn)
1517
1514
1518 def _filtersuspectsymlink(self, files):
1515 def _filtersuspectsymlink(self, files):
1519 if not files or self._repo.dirstate._checklink:
1516 if not files or self._repo.dirstate._checklink:
1520 return files
1517 return files
1521
1518
1522 # Symlink placeholders may get non-symlink-like contents
1519 # Symlink placeholders may get non-symlink-like contents
1523 # via user error or dereferencing by NFS or Samba servers,
1520 # via user error or dereferencing by NFS or Samba servers,
1524 # so we filter out any placeholders that don't look like a
1521 # so we filter out any placeholders that don't look like a
1525 # symlink
1522 # symlink
1526 sane = []
1523 sane = []
1527 for f in files:
1524 for f in files:
1528 if self.flags(f) == 'l':
1525 if self.flags(f) == 'l':
1529 d = self[f].data()
1526 d = self[f].data()
1530 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1527 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1531 self._repo.ui.debug('ignoring suspect symlink placeholder'
1528 self._repo.ui.debug('ignoring suspect symlink placeholder'
1532 ' "%s"\n' % f)
1529 ' "%s"\n' % f)
1533 continue
1530 continue
1534 sane.append(f)
1531 sane.append(f)
1535 return sane
1532 return sane
1536
1533
1537 def _checklookup(self, files):
1534 def _checklookup(self, files):
1538 # check for any possibly clean files
1535 # check for any possibly clean files
1539 if not files:
1536 if not files:
1540 return [], []
1537 return [], []
1541
1538
1542 modified = []
1539 modified = []
1543 fixup = []
1540 fixup = []
1544 pctx = self._parents[0]
1541 pctx = self._parents[0]
1545 # do a full compare of any files that might have changed
1542 # do a full compare of any files that might have changed
1546 for f in sorted(files):
1543 for f in sorted(files):
1547 if (f not in pctx or self.flags(f) != pctx.flags(f)
1544 if (f not in pctx or self.flags(f) != pctx.flags(f)
1548 or pctx[f].cmp(self[f])):
1545 or pctx[f].cmp(self[f])):
1549 modified.append(f)
1546 modified.append(f)
1550 else:
1547 else:
1551 fixup.append(f)
1548 fixup.append(f)
1552
1549
1553 # update dirstate for files that are actually clean
1550 # update dirstate for files that are actually clean
1554 if fixup:
1551 if fixup:
1555 try:
1552 try:
1556 # updating the dirstate is optional
1553 # updating the dirstate is optional
1557 # so we don't wait on the lock
1554 # so we don't wait on the lock
1558 # wlock can invalidate the dirstate, so cache normal _after_
1555 # wlock can invalidate the dirstate, so cache normal _after_
1559 # taking the lock
1556 # taking the lock
1560 wlock = self._repo.wlock(False)
1557 wlock = self._repo.wlock(False)
1561 normal = self._repo.dirstate.normal
1558 normal = self._repo.dirstate.normal
1562 try:
1559 try:
1563 for f in fixup:
1560 for f in fixup:
1564 normal(f)
1561 normal(f)
1565 # write changes out explicitly, because nesting
1562 # write changes out explicitly, because nesting
1566 # wlock at runtime may prevent 'wlock.release()'
1563 # wlock at runtime may prevent 'wlock.release()'
1567 # below from doing so for subsequent changing files
1564 # below from doing so for subsequent changing files
1568 self._repo.dirstate.write(self._repo.currenttransaction())
1565 self._repo.dirstate.write(self._repo.currenttransaction())
1569 finally:
1566 finally:
1570 wlock.release()
1567 wlock.release()
1571 except error.LockError:
1568 except error.LockError:
1572 pass
1569 pass
1573 return modified, fixup
1570 return modified, fixup
1574
1571
1575 def _manifestmatches(self, match, s):
1572 def _manifestmatches(self, match, s):
1576 """Slow path for workingctx
1573 """Slow path for workingctx
1577
1574
1578 The fast path is when we compare the working directory to its parent
1575 The fast path is when we compare the working directory to its parent
1579 which means this function is comparing with a non-parent; therefore we
1576 which means this function is comparing with a non-parent; therefore we
1580 need to build a manifest and return what matches.
1577 need to build a manifest and return what matches.
1581 """
1578 """
1582 mf = self._repo['.']._manifestmatches(match, s)
1579 mf = self._repo['.']._manifestmatches(match, s)
1583 for f in s.modified + s.added:
1580 for f in s.modified + s.added:
1584 mf[f] = _newnode
1581 mf[f] = _newnode
1585 mf.setflag(f, self.flags(f))
1582 mf.setflag(f, self.flags(f))
1586 for f in s.removed:
1583 for f in s.removed:
1587 if f in mf:
1584 if f in mf:
1588 del mf[f]
1585 del mf[f]
1589 return mf
1586 return mf
1590
1587
1591 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1588 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1592 unknown=False):
1589 unknown=False):
1593 '''Gets the status from the dirstate -- internal use only.'''
1590 '''Gets the status from the dirstate -- internal use only.'''
1594 listignored, listclean, listunknown = ignored, clean, unknown
1591 listignored, listclean, listunknown = ignored, clean, unknown
1595 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1592 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1596 subrepos = []
1593 subrepos = []
1597 if '.hgsub' in self:
1594 if '.hgsub' in self:
1598 subrepos = sorted(self.substate)
1595 subrepos = sorted(self.substate)
1599 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1596 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1600 listclean, listunknown)
1597 listclean, listunknown)
1601
1598
1602 # check for any possibly clean files
1599 # check for any possibly clean files
1603 if cmp:
1600 if cmp:
1604 modified2, fixup = self._checklookup(cmp)
1601 modified2, fixup = self._checklookup(cmp)
1605 s.modified.extend(modified2)
1602 s.modified.extend(modified2)
1606
1603
1607 # update dirstate for files that are actually clean
1604 # update dirstate for files that are actually clean
1608 if fixup and listclean:
1605 if fixup and listclean:
1609 s.clean.extend(fixup)
1606 s.clean.extend(fixup)
1610
1607
1611 if match.always():
1608 if match.always():
1612 # cache for performance
1609 # cache for performance
1613 if s.unknown or s.ignored or s.clean:
1610 if s.unknown or s.ignored or s.clean:
1614 # "_status" is cached with list*=False in the normal route
1611 # "_status" is cached with list*=False in the normal route
1615 self._status = scmutil.status(s.modified, s.added, s.removed,
1612 self._status = scmutil.status(s.modified, s.added, s.removed,
1616 s.deleted, [], [], [])
1613 s.deleted, [], [], [])
1617 else:
1614 else:
1618 self._status = s
1615 self._status = s
1619
1616
1620 return s
1617 return s
1621
1618
1622 def _buildstatus(self, other, s, match, listignored, listclean,
1619 def _buildstatus(self, other, s, match, listignored, listclean,
1623 listunknown):
1620 listunknown):
1624 """build a status with respect to another context
1621 """build a status with respect to another context
1625
1622
1626 This includes logic for maintaining the fast path of status when
1623 This includes logic for maintaining the fast path of status when
1627 comparing the working directory against its parent, which is to skip
1624 comparing the working directory against its parent, which is to skip
1628 building a new manifest if self (working directory) is not comparing
1625 building a new manifest if self (working directory) is not comparing
1629 against its parent (repo['.']).
1626 against its parent (repo['.']).
1630 """
1627 """
1631 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1628 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1632 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1629 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1633 # might have accidentally ended up with the entire contents of the file
1630 # might have accidentally ended up with the entire contents of the file
1634 # they are supposed to be linking to.
1631 # they are supposed to be linking to.
1635 s.modified[:] = self._filtersuspectsymlink(s.modified)
1632 s.modified[:] = self._filtersuspectsymlink(s.modified)
1636 if other != self._repo['.']:
1633 if other != self._repo['.']:
1637 s = super(workingctx, self)._buildstatus(other, s, match,
1634 s = super(workingctx, self)._buildstatus(other, s, match,
1638 listignored, listclean,
1635 listignored, listclean,
1639 listunknown)
1636 listunknown)
1640 return s
1637 return s
1641
1638
1642 def _matchstatus(self, other, match):
1639 def _matchstatus(self, other, match):
1643 """override the match method with a filter for directory patterns
1640 """override the match method with a filter for directory patterns
1644
1641
1645 We use inheritance to customize the match.bad method only in cases of
1642 We use inheritance to customize the match.bad method only in cases of
1646 workingctx since it belongs only to the working directory when
1643 workingctx since it belongs only to the working directory when
1647 comparing against the parent changeset.
1644 comparing against the parent changeset.
1648
1645
1649 If we aren't comparing against the working directory's parent, then we
1646 If we aren't comparing against the working directory's parent, then we
1650 just use the default match object sent to us.
1647 just use the default match object sent to us.
1651 """
1648 """
1652 superself = super(workingctx, self)
1649 superself = super(workingctx, self)
1653 match = superself._matchstatus(other, match)
1650 match = superself._matchstatus(other, match)
1654 if other != self._repo['.']:
1651 if other != self._repo['.']:
1655 def bad(f, msg):
1652 def bad(f, msg):
1656 # 'f' may be a directory pattern from 'match.files()',
1653 # 'f' may be a directory pattern from 'match.files()',
1657 # so 'f not in ctx1' is not enough
1654 # so 'f not in ctx1' is not enough
1658 if f not in other and not other.hasdir(f):
1655 if f not in other and not other.hasdir(f):
1659 self._repo.ui.warn('%s: %s\n' %
1656 self._repo.ui.warn('%s: %s\n' %
1660 (self._repo.dirstate.pathto(f), msg))
1657 (self._repo.dirstate.pathto(f), msg))
1661 match.bad = bad
1658 match.bad = bad
1662 return match
1659 return match
1663
1660
1664 class committablefilectx(basefilectx):
1661 class committablefilectx(basefilectx):
1665 """A committablefilectx provides common functionality for a file context
1662 """A committablefilectx provides common functionality for a file context
1666 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1663 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1667 def __init__(self, repo, path, filelog=None, ctx=None):
1664 def __init__(self, repo, path, filelog=None, ctx=None):
1668 self._repo = repo
1665 self._repo = repo
1669 self._path = path
1666 self._path = path
1670 self._changeid = None
1667 self._changeid = None
1671 self._filerev = self._filenode = None
1668 self._filerev = self._filenode = None
1672
1669
1673 if filelog is not None:
1670 if filelog is not None:
1674 self._filelog = filelog
1671 self._filelog = filelog
1675 if ctx:
1672 if ctx:
1676 self._changectx = ctx
1673 self._changectx = ctx
1677
1674
1678 def __nonzero__(self):
1675 def __nonzero__(self):
1679 return True
1676 return True
1680
1677
1681 def linkrev(self):
1678 def linkrev(self):
1682 # linked to self._changectx no matter if file is modified or not
1679 # linked to self._changectx no matter if file is modified or not
1683 return self.rev()
1680 return self.rev()
1684
1681
1685 def parents(self):
1682 def parents(self):
1686 '''return parent filectxs, following copies if necessary'''
1683 '''return parent filectxs, following copies if necessary'''
1687 def filenode(ctx, path):
1684 def filenode(ctx, path):
1688 return ctx._manifest.get(path, nullid)
1685 return ctx._manifest.get(path, nullid)
1689
1686
1690 path = self._path
1687 path = self._path
1691 fl = self._filelog
1688 fl = self._filelog
1692 pcl = self._changectx._parents
1689 pcl = self._changectx._parents
1693 renamed = self.renamed()
1690 renamed = self.renamed()
1694
1691
1695 if renamed:
1692 if renamed:
1696 pl = [renamed + (None,)]
1693 pl = [renamed + (None,)]
1697 else:
1694 else:
1698 pl = [(path, filenode(pcl[0], path), fl)]
1695 pl = [(path, filenode(pcl[0], path), fl)]
1699
1696
1700 for pc in pcl[1:]:
1697 for pc in pcl[1:]:
1701 pl.append((path, filenode(pc, path), fl))
1698 pl.append((path, filenode(pc, path), fl))
1702
1699
1703 return [self._parentfilectx(p, fileid=n, filelog=l)
1700 return [self._parentfilectx(p, fileid=n, filelog=l)
1704 for p, n, l in pl if n != nullid]
1701 for p, n, l in pl if n != nullid]
1705
1702
1706 def children(self):
1703 def children(self):
1707 return []
1704 return []
1708
1705
1709 class workingfilectx(committablefilectx):
1706 class workingfilectx(committablefilectx):
1710 """A workingfilectx object makes access to data related to a particular
1707 """A workingfilectx object makes access to data related to a particular
1711 file in the working directory convenient."""
1708 file in the working directory convenient."""
1712 def __init__(self, repo, path, filelog=None, workingctx=None):
1709 def __init__(self, repo, path, filelog=None, workingctx=None):
1713 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1710 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1714
1711
1715 @propertycache
1712 @propertycache
1716 def _changectx(self):
1713 def _changectx(self):
1717 return workingctx(self._repo)
1714 return workingctx(self._repo)
1718
1715
1719 def data(self):
1716 def data(self):
1720 return self._repo.wread(self._path)
1717 return self._repo.wread(self._path)
1721 def renamed(self):
1718 def renamed(self):
1722 rp = self._repo.dirstate.copied(self._path)
1719 rp = self._repo.dirstate.copied(self._path)
1723 if not rp:
1720 if not rp:
1724 return None
1721 return None
1725 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1722 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1726
1723
1727 def size(self):
1724 def size(self):
1728 return self._repo.wvfs.lstat(self._path).st_size
1725 return self._repo.wvfs.lstat(self._path).st_size
1729 def date(self):
1726 def date(self):
1730 t, tz = self._changectx.date()
1727 t, tz = self._changectx.date()
1731 try:
1728 try:
1732 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1729 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1733 except OSError as err:
1730 except OSError as err:
1734 if err.errno != errno.ENOENT:
1731 if err.errno != errno.ENOENT:
1735 raise
1732 raise
1736 return (t, tz)
1733 return (t, tz)
1737
1734
1738 def cmp(self, fctx):
1735 def cmp(self, fctx):
1739 """compare with other file context
1736 """compare with other file context
1740
1737
1741 returns True if different than fctx.
1738 returns True if different than fctx.
1742 """
1739 """
1743 # fctx should be a filectx (not a workingfilectx)
1740 # fctx should be a filectx (not a workingfilectx)
1744 # invert comparison to reuse the same code path
1741 # invert comparison to reuse the same code path
1745 return fctx.cmp(self)
1742 return fctx.cmp(self)
1746
1743
1747 def remove(self, ignoremissing=False):
1744 def remove(self, ignoremissing=False):
1748 """wraps unlink for a repo's working directory"""
1745 """wraps unlink for a repo's working directory"""
1749 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1746 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1750
1747
1751 def write(self, data, flags):
1748 def write(self, data, flags):
1752 """wraps repo.wwrite"""
1749 """wraps repo.wwrite"""
1753 self._repo.wwrite(self._path, data, flags)
1750 self._repo.wwrite(self._path, data, flags)
1754
1751
1755 class workingcommitctx(workingctx):
1752 class workingcommitctx(workingctx):
1756 """A workingcommitctx object makes access to data related to
1753 """A workingcommitctx object makes access to data related to
1757 the revision being committed convenient.
1754 the revision being committed convenient.
1758
1755
1759 This hides changes in the working directory, if they aren't
1756 This hides changes in the working directory, if they aren't
1760 committed in this context.
1757 committed in this context.
1761 """
1758 """
1762 def __init__(self, repo, changes,
1759 def __init__(self, repo, changes,
1763 text="", user=None, date=None, extra=None):
1760 text="", user=None, date=None, extra=None):
1764 super(workingctx, self).__init__(repo, text, user, date, extra,
1761 super(workingctx, self).__init__(repo, text, user, date, extra,
1765 changes)
1762 changes)
1766
1763
1767 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1764 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1768 unknown=False):
1765 unknown=False):
1769 """Return matched files only in ``self._status``
1766 """Return matched files only in ``self._status``
1770
1767
1771 Uncommitted files appear "clean" via this context, even if
1768 Uncommitted files appear "clean" via this context, even if
1772 they aren't actually so in the working directory.
1769 they aren't actually so in the working directory.
1773 """
1770 """
1774 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1771 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1775 if clean:
1772 if clean:
1776 clean = [f for f in self._manifest if f not in self._changedset]
1773 clean = [f for f in self._manifest if f not in self._changedset]
1777 else:
1774 else:
1778 clean = []
1775 clean = []
1779 return scmutil.status([f for f in self._status.modified if match(f)],
1776 return scmutil.status([f for f in self._status.modified if match(f)],
1780 [f for f in self._status.added if match(f)],
1777 [f for f in self._status.added if match(f)],
1781 [f for f in self._status.removed if match(f)],
1778 [f for f in self._status.removed if match(f)],
1782 [], [], [], clean)
1779 [], [], [], clean)
1783
1780
1784 @propertycache
1781 @propertycache
1785 def _changedset(self):
1782 def _changedset(self):
1786 """Return the set of files changed in this context
1783 """Return the set of files changed in this context
1787 """
1784 """
1788 changed = set(self._status.modified)
1785 changed = set(self._status.modified)
1789 changed.update(self._status.added)
1786 changed.update(self._status.added)
1790 changed.update(self._status.removed)
1787 changed.update(self._status.removed)
1791 return changed
1788 return changed
1792
1789
1793 class memctx(committablectx):
1790 class memctx(committablectx):
1794 """Use memctx to perform in-memory commits via localrepo.commitctx().
1791 """Use memctx to perform in-memory commits via localrepo.commitctx().
1795
1792
1796 Revision information is supplied at initialization time while
1793 Revision information is supplied at initialization time while
1797 related files data and is made available through a callback
1794 related files data and is made available through a callback
1798 mechanism. 'repo' is the current localrepo, 'parents' is a
1795 mechanism. 'repo' is the current localrepo, 'parents' is a
1799 sequence of two parent revisions identifiers (pass None for every
1796 sequence of two parent revisions identifiers (pass None for every
1800 missing parent), 'text' is the commit message and 'files' lists
1797 missing parent), 'text' is the commit message and 'files' lists
1801 names of files touched by the revision (normalized and relative to
1798 names of files touched by the revision (normalized and relative to
1802 repository root).
1799 repository root).
1803
1800
1804 filectxfn(repo, memctx, path) is a callable receiving the
1801 filectxfn(repo, memctx, path) is a callable receiving the
1805 repository, the current memctx object and the normalized path of
1802 repository, the current memctx object and the normalized path of
1806 requested file, relative to repository root. It is fired by the
1803 requested file, relative to repository root. It is fired by the
1807 commit function for every file in 'files', but calls order is
1804 commit function for every file in 'files', but calls order is
1808 undefined. If the file is available in the revision being
1805 undefined. If the file is available in the revision being
1809 committed (updated or added), filectxfn returns a memfilectx
1806 committed (updated or added), filectxfn returns a memfilectx
1810 object. If the file was removed, filectxfn raises an
1807 object. If the file was removed, filectxfn raises an
1811 IOError. Moved files are represented by marking the source file
1808 IOError. Moved files are represented by marking the source file
1812 removed and the new file added with copy information (see
1809 removed and the new file added with copy information (see
1813 memfilectx).
1810 memfilectx).
1814
1811
1815 user receives the committer name and defaults to current
1812 user receives the committer name and defaults to current
1816 repository username, date is the commit date in any format
1813 repository username, date is the commit date in any format
1817 supported by util.parsedate() and defaults to current date, extra
1814 supported by util.parsedate() and defaults to current date, extra
1818 is a dictionary of metadata or is left empty.
1815 is a dictionary of metadata or is left empty.
1819 """
1816 """
1820
1817
1821 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1818 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1822 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1819 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1823 # this field to determine what to do in filectxfn.
1820 # this field to determine what to do in filectxfn.
1824 _returnnoneformissingfiles = True
1821 _returnnoneformissingfiles = True
1825
1822
1826 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1823 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1827 date=None, extra=None, editor=False):
1824 date=None, extra=None, editor=False):
1828 super(memctx, self).__init__(repo, text, user, date, extra)
1825 super(memctx, self).__init__(repo, text, user, date, extra)
1829 self._rev = None
1826 self._rev = None
1830 self._node = None
1827 self._node = None
1831 parents = [(p or nullid) for p in parents]
1828 parents = [(p or nullid) for p in parents]
1832 p1, p2 = parents
1829 p1, p2 = parents
1833 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1830 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1834 files = sorted(set(files))
1831 files = sorted(set(files))
1835 self._files = files
1832 self._files = files
1836 self.substate = {}
1833 self.substate = {}
1837
1834
1838 # if store is not callable, wrap it in a function
1835 # if store is not callable, wrap it in a function
1839 if not callable(filectxfn):
1836 if not callable(filectxfn):
1840 def getfilectx(repo, memctx, path):
1837 def getfilectx(repo, memctx, path):
1841 fctx = filectxfn[path]
1838 fctx = filectxfn[path]
1842 # this is weird but apparently we only keep track of one parent
1839 # this is weird but apparently we only keep track of one parent
1843 # (why not only store that instead of a tuple?)
1840 # (why not only store that instead of a tuple?)
1844 copied = fctx.renamed()
1841 copied = fctx.renamed()
1845 if copied:
1842 if copied:
1846 copied = copied[0]
1843 copied = copied[0]
1847 return memfilectx(repo, path, fctx.data(),
1844 return memfilectx(repo, path, fctx.data(),
1848 islink=fctx.islink(), isexec=fctx.isexec(),
1845 islink=fctx.islink(), isexec=fctx.isexec(),
1849 copied=copied, memctx=memctx)
1846 copied=copied, memctx=memctx)
1850 self._filectxfn = getfilectx
1847 self._filectxfn = getfilectx
1851 else:
1848 else:
1852 # "util.cachefunc" reduces invocation of possibly expensive
1849 # "util.cachefunc" reduces invocation of possibly expensive
1853 # "filectxfn" for performance (e.g. converting from another VCS)
1850 # "filectxfn" for performance (e.g. converting from another VCS)
1854 self._filectxfn = util.cachefunc(filectxfn)
1851 self._filectxfn = util.cachefunc(filectxfn)
1855
1852
1856 if extra:
1853 if extra:
1857 self._extra = extra.copy()
1854 self._extra = extra.copy()
1858 else:
1855 else:
1859 self._extra = {}
1856 self._extra = {}
1860
1857
1861 if self._extra.get('branch', '') == '':
1858 if self._extra.get('branch', '') == '':
1862 self._extra['branch'] = 'default'
1859 self._extra['branch'] = 'default'
1863
1860
1864 if editor:
1861 if editor:
1865 self._text = editor(self._repo, self, [])
1862 self._text = editor(self._repo, self, [])
1866 self._repo.savecommitmessage(self._text)
1863 self._repo.savecommitmessage(self._text)
1867
1864
1868 def filectx(self, path, filelog=None):
1865 def filectx(self, path, filelog=None):
1869 """get a file context from the working directory
1866 """get a file context from the working directory
1870
1867
1871 Returns None if file doesn't exist and should be removed."""
1868 Returns None if file doesn't exist and should be removed."""
1872 return self._filectxfn(self._repo, self, path)
1869 return self._filectxfn(self._repo, self, path)
1873
1870
1874 def commit(self):
1871 def commit(self):
1875 """commit context to the repo"""
1872 """commit context to the repo"""
1876 return self._repo.commitctx(self)
1873 return self._repo.commitctx(self)
1877
1874
1878 @propertycache
1875 @propertycache
1879 def _manifest(self):
1876 def _manifest(self):
1880 """generate a manifest based on the return values of filectxfn"""
1877 """generate a manifest based on the return values of filectxfn"""
1881
1878
1882 # keep this simple for now; just worry about p1
1879 # keep this simple for now; just worry about p1
1883 pctx = self._parents[0]
1880 pctx = self._parents[0]
1884 man = pctx.manifest().copy()
1881 man = pctx.manifest().copy()
1885
1882
1886 for f in self._status.modified:
1883 for f in self._status.modified:
1887 p1node = nullid
1884 p1node = nullid
1888 p2node = nullid
1885 p2node = nullid
1889 p = pctx[f].parents() # if file isn't in pctx, check p2?
1886 p = pctx[f].parents() # if file isn't in pctx, check p2?
1890 if len(p) > 0:
1887 if len(p) > 0:
1891 p1node = p[0].node()
1888 p1node = p[0].node()
1892 if len(p) > 1:
1889 if len(p) > 1:
1893 p2node = p[1].node()
1890 p2node = p[1].node()
1894 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1891 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1895
1892
1896 for f in self._status.added:
1893 for f in self._status.added:
1897 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1894 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1898
1895
1899 for f in self._status.removed:
1896 for f in self._status.removed:
1900 if f in man:
1897 if f in man:
1901 del man[f]
1898 del man[f]
1902
1899
1903 return man
1900 return man
1904
1901
1905 @propertycache
1902 @propertycache
1906 def _status(self):
1903 def _status(self):
1907 """Calculate exact status from ``files`` specified at construction
1904 """Calculate exact status from ``files`` specified at construction
1908 """
1905 """
1909 man1 = self.p1().manifest()
1906 man1 = self.p1().manifest()
1910 p2 = self._parents[1]
1907 p2 = self._parents[1]
1911 # "1 < len(self._parents)" can't be used for checking
1908 # "1 < len(self._parents)" can't be used for checking
1912 # existence of the 2nd parent, because "memctx._parents" is
1909 # existence of the 2nd parent, because "memctx._parents" is
1913 # explicitly initialized by the list, of which length is 2.
1910 # explicitly initialized by the list, of which length is 2.
1914 if p2.node() != nullid:
1911 if p2.node() != nullid:
1915 man2 = p2.manifest()
1912 man2 = p2.manifest()
1916 managing = lambda f: f in man1 or f in man2
1913 managing = lambda f: f in man1 or f in man2
1917 else:
1914 else:
1918 managing = lambda f: f in man1
1915 managing = lambda f: f in man1
1919
1916
1920 modified, added, removed = [], [], []
1917 modified, added, removed = [], [], []
1921 for f in self._files:
1918 for f in self._files:
1922 if not managing(f):
1919 if not managing(f):
1923 added.append(f)
1920 added.append(f)
1924 elif self[f]:
1921 elif self[f]:
1925 modified.append(f)
1922 modified.append(f)
1926 else:
1923 else:
1927 removed.append(f)
1924 removed.append(f)
1928
1925
1929 return scmutil.status(modified, added, removed, [], [], [], [])
1926 return scmutil.status(modified, added, removed, [], [], [], [])
1930
1927
1931 class memfilectx(committablefilectx):
1928 class memfilectx(committablefilectx):
1932 """memfilectx represents an in-memory file to commit.
1929 """memfilectx represents an in-memory file to commit.
1933
1930
1934 See memctx and committablefilectx for more details.
1931 See memctx and committablefilectx for more details.
1935 """
1932 """
1936 def __init__(self, repo, path, data, islink=False,
1933 def __init__(self, repo, path, data, islink=False,
1937 isexec=False, copied=None, memctx=None):
1934 isexec=False, copied=None, memctx=None):
1938 """
1935 """
1939 path is the normalized file path relative to repository root.
1936 path is the normalized file path relative to repository root.
1940 data is the file content as a string.
1937 data is the file content as a string.
1941 islink is True if the file is a symbolic link.
1938 islink is True if the file is a symbolic link.
1942 isexec is True if the file is executable.
1939 isexec is True if the file is executable.
1943 copied is the source file path if current file was copied in the
1940 copied is the source file path if current file was copied in the
1944 revision being committed, or None."""
1941 revision being committed, or None."""
1945 super(memfilectx, self).__init__(repo, path, None, memctx)
1942 super(memfilectx, self).__init__(repo, path, None, memctx)
1946 self._data = data
1943 self._data = data
1947 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1944 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1948 self._copied = None
1945 self._copied = None
1949 if copied:
1946 if copied:
1950 self._copied = (copied, nullid)
1947 self._copied = (copied, nullid)
1951
1948
1952 def data(self):
1949 def data(self):
1953 return self._data
1950 return self._data
1954 def size(self):
1951 def size(self):
1955 return len(self.data())
1952 return len(self.data())
1956 def flags(self):
1953 def flags(self):
1957 return self._flags
1954 return self._flags
1958 def renamed(self):
1955 def renamed(self):
1959 return self._copied
1956 return self._copied
1960
1957
1961 def remove(self, ignoremissing=False):
1958 def remove(self, ignoremissing=False):
1962 """wraps unlink for a repo's working directory"""
1959 """wraps unlink for a repo's working directory"""
1963 # need to figure out what to do here
1960 # need to figure out what to do here
1964 del self._changectx[self._path]
1961 del self._changectx[self._path]
1965
1962
1966 def write(self, data, flags):
1963 def write(self, data, flags):
1967 """wraps repo.wwrite"""
1964 """wraps repo.wwrite"""
1968 self._data = data
1965 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now