##// END OF EJS Templates
adjustlinkrev: remove unnecessary parameters...
Jun Wu -
r30275:e81d72b4 default
parent child Browse files
Show More
@@ -1,1987 +1,1982 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 if not node:
263 if not node:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266
266
267 return node, flag
267 return node, flag
268
268
269 def filenode(self, path):
269 def filenode(self, path):
270 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
271
271
272 def flags(self, path):
272 def flags(self, path):
273 try:
273 try:
274 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
275 except error.LookupError:
275 except error.LookupError:
276 return ''
276 return ''
277
277
278 def sub(self, path, allowcreate=True):
278 def sub(self, path, allowcreate=True):
279 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281
281
282 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
284
284
285 def workingsub(self, path):
285 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
287 context.
288 '''
288 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
290
290
291 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
293 r = self._repo
293 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
295 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
298
298
299 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
307
308 def dirs(self):
308 def dirs(self):
309 return self._manifest.dirs()
309 return self._manifest.dirs()
310
310
311 def hasdir(self, dir):
311 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
313
313
314 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
315 return False
315 return False
316
316
317 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
320 directory.
320 directory.
321
321
322 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
323
323
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
325 """
326
326
327 ctx1 = self
327 ctx1 = self
328 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
329
329
330 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
333 # with its first parent.
334 #
334 #
335 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
336 #
336 #
337 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
338 #
338 #
339 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
342 reversed = False
342 reversed = False
343 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
345 reversed = True
345 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
347
347
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380
380
381 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
382 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
385 if data is None:
385 if data is None:
386 return None
386 return None
387 islink, isexec = mode
387 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
390 if extra is None:
390 if extra is None:
391 extra = {}
391 extra = {}
392 if branch:
392 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
395 date, extra, editor)
396 return ctx
396 return ctx
397
397
398 class changectx(basectx):
398 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
401 the repo."""
401 the repo."""
402 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
404
404
405 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
408 return
408 return
409
409
410 if changeid == '':
410 if changeid == '':
411 changeid = '.'
411 changeid = '.'
412 self._repo = repo
412 self._repo = repo
413
413
414 try:
414 try:
415 if isinstance(changeid, int):
415 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
417 self._rev = changeid
418 return
418 return
419 if isinstance(changeid, long):
419 if isinstance(changeid, long):
420 changeid = str(changeid)
420 changeid = str(changeid)
421 if changeid == 'null':
421 if changeid == 'null':
422 self._node = nullid
422 self._node = nullid
423 self._rev = nullrev
423 self._rev = nullrev
424 return
424 return
425 if changeid == 'tip':
425 if changeid == 'tip':
426 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
434 return
435 if len(changeid) == 20:
435 if len(changeid) == 20:
436 try:
436 try:
437 self._node = changeid
437 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
439 return
439 return
440 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
441 raise
441 raise
442 except LookupError:
442 except LookupError:
443 pass
443 pass
444
444
445 try:
445 try:
446 r = int(changeid)
446 r = int(changeid)
447 if str(r) != changeid:
447 if str(r) != changeid:
448 raise ValueError
448 raise ValueError
449 l = len(repo.changelog)
449 l = len(repo.changelog)
450 if r < 0:
450 if r < 0:
451 r += l
451 r += l
452 if r < 0 or r >= l:
452 if r < 0 or r >= l:
453 raise ValueError
453 raise ValueError
454 self._rev = r
454 self._rev = r
455 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
456 return
456 return
457 except error.FilteredIndexError:
457 except error.FilteredIndexError:
458 raise
458 raise
459 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
460 pass
460 pass
461
461
462 if len(changeid) == 40:
462 if len(changeid) == 40:
463 try:
463 try:
464 self._node = bin(changeid)
464 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
466 return
466 return
467 except error.FilteredLookupError:
467 except error.FilteredLookupError:
468 raise
468 raise
469 except (TypeError, LookupError):
469 except (TypeError, LookupError):
470 pass
470 pass
471
471
472 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
473 try:
473 try:
474 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
480 raise
480 raise
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 pass
482 pass
483
483
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
485 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488
488
489 # lookup failed
489 # lookup failed
490 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
491 #
491 #
492 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
493 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
497 try:
497 try:
498 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
499 changeid = hex(changeid)
500 except TypeError:
500 except TypeError:
501 pass
501 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
511 except IndexError:
512 pass
512 pass
513 raise error.RepoLookupError(
513 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
515
515
516 def __hash__(self):
516 def __hash__(self):
517 try:
517 try:
518 return hash(self._rev)
518 return hash(self._rev)
519 except AttributeError:
519 except AttributeError:
520 return id(self)
520 return id(self)
521
521
522 def __nonzero__(self):
522 def __nonzero__(self):
523 return self._rev != nullrev
523 return self._rev != nullrev
524
524
525 @propertycache
525 @propertycache
526 def _changeset(self):
526 def _changeset(self):
527 return self._repo.changelog.changelogrevision(self.rev())
527 return self._repo.changelog.changelogrevision(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._repo.manifestlog[self._changeset.manifest].read()
531 return self._repo.manifestlog[self._changeset.manifest].read()
532
532
533 @propertycache
533 @propertycache
534 def _manifestdelta(self):
534 def _manifestdelta(self):
535 mfnode = self._changeset.manifest
535 mfnode = self._changeset.manifest
536 return self._repo.manifestlog[mfnode].readdelta()
536 return self._repo.manifestlog[mfnode].readdelta()
537
537
538 @propertycache
538 @propertycache
539 def _parents(self):
539 def _parents(self):
540 repo = self._repo
540 repo = self._repo
541 p1, p2 = repo.changelog.parentrevs(self._rev)
541 p1, p2 = repo.changelog.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1)]
543 return [changectx(repo, p1)]
544 return [changectx(repo, p1), changectx(repo, p2)]
544 return [changectx(repo, p1), changectx(repo, p2)]
545
545
546 def changeset(self):
546 def changeset(self):
547 c = self._changeset
547 c = self._changeset
548 return (
548 return (
549 c.manifest,
549 c.manifest,
550 c.user,
550 c.user,
551 c.date,
551 c.date,
552 c.files,
552 c.files,
553 c.description,
553 c.description,
554 c.extra,
554 c.extra,
555 )
555 )
556 def manifestnode(self):
556 def manifestnode(self):
557 return self._changeset.manifest
557 return self._changeset.manifest
558
558
559 def user(self):
559 def user(self):
560 return self._changeset.user
560 return self._changeset.user
561 def date(self):
561 def date(self):
562 return self._changeset.date
562 return self._changeset.date
563 def files(self):
563 def files(self):
564 return self._changeset.files
564 return self._changeset.files
565 def description(self):
565 def description(self):
566 return self._changeset.description
566 return self._changeset.description
567 def branch(self):
567 def branch(self):
568 return encoding.tolocal(self._changeset.extra.get("branch"))
568 return encoding.tolocal(self._changeset.extra.get("branch"))
569 def closesbranch(self):
569 def closesbranch(self):
570 return 'close' in self._changeset.extra
570 return 'close' in self._changeset.extra
571 def extra(self):
571 def extra(self):
572 return self._changeset.extra
572 return self._changeset.extra
573 def tags(self):
573 def tags(self):
574 return self._repo.nodetags(self._node)
574 return self._repo.nodetags(self._node)
575 def bookmarks(self):
575 def bookmarks(self):
576 return self._repo.nodebookmarks(self._node)
576 return self._repo.nodebookmarks(self._node)
577 def phase(self):
577 def phase(self):
578 return self._repo._phasecache.phase(self._repo, self._rev)
578 return self._repo._phasecache.phase(self._repo, self._rev)
579 def hidden(self):
579 def hidden(self):
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
581
581
582 def children(self):
582 def children(self):
583 """return contexts for each child changeset"""
583 """return contexts for each child changeset"""
584 c = self._repo.changelog.children(self._node)
584 c = self._repo.changelog.children(self._node)
585 return [changectx(self._repo, x) for x in c]
585 return [changectx(self._repo, x) for x in c]
586
586
587 def ancestors(self):
587 def ancestors(self):
588 for a in self._repo.changelog.ancestors([self._rev]):
588 for a in self._repo.changelog.ancestors([self._rev]):
589 yield changectx(self._repo, a)
589 yield changectx(self._repo, a)
590
590
591 def descendants(self):
591 def descendants(self):
592 for d in self._repo.changelog.descendants([self._rev]):
592 for d in self._repo.changelog.descendants([self._rev]):
593 yield changectx(self._repo, d)
593 yield changectx(self._repo, d)
594
594
595 def filectx(self, path, fileid=None, filelog=None):
595 def filectx(self, path, fileid=None, filelog=None):
596 """get a file context from this changeset"""
596 """get a file context from this changeset"""
597 if fileid is None:
597 if fileid is None:
598 fileid = self.filenode(path)
598 fileid = self.filenode(path)
599 return filectx(self._repo, path, fileid=fileid,
599 return filectx(self._repo, path, fileid=fileid,
600 changectx=self, filelog=filelog)
600 changectx=self, filelog=filelog)
601
601
602 def ancestor(self, c2, warn=False):
602 def ancestor(self, c2, warn=False):
603 """return the "best" ancestor context of self and c2
603 """return the "best" ancestor context of self and c2
604
604
605 If there are multiple candidates, it will show a message and check
605 If there are multiple candidates, it will show a message and check
606 merge.preferancestor configuration before falling back to the
606 merge.preferancestor configuration before falling back to the
607 revlog ancestor."""
607 revlog ancestor."""
608 # deal with workingctxs
608 # deal with workingctxs
609 n2 = c2._node
609 n2 = c2._node
610 if n2 is None:
610 if n2 is None:
611 n2 = c2._parents[0]._node
611 n2 = c2._parents[0]._node
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 if not cahs:
613 if not cahs:
614 anc = nullid
614 anc = nullid
615 elif len(cahs) == 1:
615 elif len(cahs) == 1:
616 anc = cahs[0]
616 anc = cahs[0]
617 else:
617 else:
618 # experimental config: merge.preferancestor
618 # experimental config: merge.preferancestor
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 try:
620 try:
621 ctx = changectx(self._repo, r)
621 ctx = changectx(self._repo, r)
622 except error.RepoLookupError:
622 except error.RepoLookupError:
623 continue
623 continue
624 anc = ctx.node()
624 anc = ctx.node()
625 if anc in cahs:
625 if anc in cahs:
626 break
626 break
627 else:
627 else:
628 anc = self._repo.changelog.ancestor(self._node, n2)
628 anc = self._repo.changelog.ancestor(self._node, n2)
629 if warn:
629 if warn:
630 self._repo.ui.status(
630 self._repo.ui.status(
631 (_("note: using %s as ancestor of %s and %s\n") %
631 (_("note: using %s as ancestor of %s and %s\n") %
632 (short(anc), short(self._node), short(n2))) +
632 (short(anc), short(self._node), short(n2))) +
633 ''.join(_(" alternatively, use --config "
633 ''.join(_(" alternatively, use --config "
634 "merge.preferancestor=%s\n") %
634 "merge.preferancestor=%s\n") %
635 short(n) for n in sorted(cahs) if n != anc))
635 short(n) for n in sorted(cahs) if n != anc))
636 return changectx(self._repo, anc)
636 return changectx(self._repo, anc)
637
637
638 def descendant(self, other):
638 def descendant(self, other):
639 """True if other is descendant of this changeset"""
639 """True if other is descendant of this changeset"""
640 return self._repo.changelog.descendant(self._rev, other._rev)
640 return self._repo.changelog.descendant(self._rev, other._rev)
641
641
642 def walk(self, match):
642 def walk(self, match):
643 '''Generates matching file names.'''
643 '''Generates matching file names.'''
644
644
645 # Wrap match.bad method to have message with nodeid
645 # Wrap match.bad method to have message with nodeid
646 def bad(fn, msg):
646 def bad(fn, msg):
647 # The manifest doesn't know about subrepos, so don't complain about
647 # The manifest doesn't know about subrepos, so don't complain about
648 # paths into valid subrepos.
648 # paths into valid subrepos.
649 if any(fn == s or fn.startswith(s + '/')
649 if any(fn == s or fn.startswith(s + '/')
650 for s in self.substate):
650 for s in self.substate):
651 return
651 return
652 match.bad(fn, _('no such file in rev %s') % self)
652 match.bad(fn, _('no such file in rev %s') % self)
653
653
654 m = matchmod.badmatch(match, bad)
654 m = matchmod.badmatch(match, bad)
655 return self._manifest.walk(m)
655 return self._manifest.walk(m)
656
656
657 def matches(self, match):
657 def matches(self, match):
658 return self.walk(match)
658 return self.walk(match)
659
659
660 class basefilectx(object):
660 class basefilectx(object):
661 """A filecontext object represents the common logic for its children:
661 """A filecontext object represents the common logic for its children:
662 filectx: read-only access to a filerevision that is already present
662 filectx: read-only access to a filerevision that is already present
663 in the repo,
663 in the repo,
664 workingfilectx: a filecontext that represents files from the working
664 workingfilectx: a filecontext that represents files from the working
665 directory,
665 directory,
666 memfilectx: a filecontext that represents files in-memory."""
666 memfilectx: a filecontext that represents files in-memory."""
667 def __new__(cls, repo, path, *args, **kwargs):
667 def __new__(cls, repo, path, *args, **kwargs):
668 return super(basefilectx, cls).__new__(cls)
668 return super(basefilectx, cls).__new__(cls)
669
669
670 @propertycache
670 @propertycache
671 def _filelog(self):
671 def _filelog(self):
672 return self._repo.file(self._path)
672 return self._repo.file(self._path)
673
673
674 @propertycache
674 @propertycache
675 def _changeid(self):
675 def _changeid(self):
676 if '_changeid' in self.__dict__:
676 if '_changeid' in self.__dict__:
677 return self._changeid
677 return self._changeid
678 elif '_changectx' in self.__dict__:
678 elif '_changectx' in self.__dict__:
679 return self._changectx.rev()
679 return self._changectx.rev()
680 elif '_descendantrev' in self.__dict__:
680 elif '_descendantrev' in self.__dict__:
681 # this file context was created from a revision with a known
681 # this file context was created from a revision with a known
682 # descendant, we can (lazily) correct for linkrev aliases
682 # descendant, we can (lazily) correct for linkrev aliases
683 return self._adjustlinkrev(self._path, self._filelog,
683 return self._adjustlinkrev(self._descendantrev)
684 self._filenode, self._descendantrev)
685 else:
684 else:
686 return self._filelog.linkrev(self._filerev)
685 return self._filelog.linkrev(self._filerev)
687
686
688 @propertycache
687 @propertycache
689 def _filenode(self):
688 def _filenode(self):
690 if '_fileid' in self.__dict__:
689 if '_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
690 return self._filelog.lookup(self._fileid)
692 else:
691 else:
693 return self._changectx.filenode(self._path)
692 return self._changectx.filenode(self._path)
694
693
695 @propertycache
694 @propertycache
696 def _filerev(self):
695 def _filerev(self):
697 return self._filelog.rev(self._filenode)
696 return self._filelog.rev(self._filenode)
698
697
699 @propertycache
698 @propertycache
700 def _repopath(self):
699 def _repopath(self):
701 return self._path
700 return self._path
702
701
703 def __nonzero__(self):
702 def __nonzero__(self):
704 try:
703 try:
705 self._filenode
704 self._filenode
706 return True
705 return True
707 except error.LookupError:
706 except error.LookupError:
708 # file is missing
707 # file is missing
709 return False
708 return False
710
709
711 def __str__(self):
710 def __str__(self):
712 try:
711 try:
713 return "%s@%s" % (self.path(), self._changectx)
712 return "%s@%s" % (self.path(), self._changectx)
714 except error.LookupError:
713 except error.LookupError:
715 return "%s@???" % self.path()
714 return "%s@???" % self.path()
716
715
717 def __repr__(self):
716 def __repr__(self):
718 return "<%s %s>" % (type(self).__name__, str(self))
717 return "<%s %s>" % (type(self).__name__, str(self))
719
718
720 def __hash__(self):
719 def __hash__(self):
721 try:
720 try:
722 return hash((self._path, self._filenode))
721 return hash((self._path, self._filenode))
723 except AttributeError:
722 except AttributeError:
724 return id(self)
723 return id(self)
725
724
726 def __eq__(self, other):
725 def __eq__(self, other):
727 try:
726 try:
728 return (type(self) == type(other) and self._path == other._path
727 return (type(self) == type(other) and self._path == other._path
729 and self._filenode == other._filenode)
728 and self._filenode == other._filenode)
730 except AttributeError:
729 except AttributeError:
731 return False
730 return False
732
731
733 def __ne__(self, other):
732 def __ne__(self, other):
734 return not (self == other)
733 return not (self == other)
735
734
736 def filerev(self):
735 def filerev(self):
737 return self._filerev
736 return self._filerev
738 def filenode(self):
737 def filenode(self):
739 return self._filenode
738 return self._filenode
740 def flags(self):
739 def flags(self):
741 return self._changectx.flags(self._path)
740 return self._changectx.flags(self._path)
742 def filelog(self):
741 def filelog(self):
743 return self._filelog
742 return self._filelog
744 def rev(self):
743 def rev(self):
745 return self._changeid
744 return self._changeid
746 def linkrev(self):
745 def linkrev(self):
747 return self._filelog.linkrev(self._filerev)
746 return self._filelog.linkrev(self._filerev)
748 def node(self):
747 def node(self):
749 return self._changectx.node()
748 return self._changectx.node()
750 def hex(self):
749 def hex(self):
751 return self._changectx.hex()
750 return self._changectx.hex()
752 def user(self):
751 def user(self):
753 return self._changectx.user()
752 return self._changectx.user()
754 def date(self):
753 def date(self):
755 return self._changectx.date()
754 return self._changectx.date()
756 def files(self):
755 def files(self):
757 return self._changectx.files()
756 return self._changectx.files()
758 def description(self):
757 def description(self):
759 return self._changectx.description()
758 return self._changectx.description()
760 def branch(self):
759 def branch(self):
761 return self._changectx.branch()
760 return self._changectx.branch()
762 def extra(self):
761 def extra(self):
763 return self._changectx.extra()
762 return self._changectx.extra()
764 def phase(self):
763 def phase(self):
765 return self._changectx.phase()
764 return self._changectx.phase()
766 def phasestr(self):
765 def phasestr(self):
767 return self._changectx.phasestr()
766 return self._changectx.phasestr()
768 def manifest(self):
767 def manifest(self):
769 return self._changectx.manifest()
768 return self._changectx.manifest()
770 def changectx(self):
769 def changectx(self):
771 return self._changectx
770 return self._changectx
772 def repo(self):
771 def repo(self):
773 return self._repo
772 return self._repo
774
773
775 def path(self):
774 def path(self):
776 return self._path
775 return self._path
777
776
778 def isbinary(self):
777 def isbinary(self):
779 try:
778 try:
780 return util.binary(self.data())
779 return util.binary(self.data())
781 except IOError:
780 except IOError:
782 return False
781 return False
783 def isexec(self):
782 def isexec(self):
784 return 'x' in self.flags()
783 return 'x' in self.flags()
785 def islink(self):
784 def islink(self):
786 return 'l' in self.flags()
785 return 'l' in self.flags()
787
786
788 def isabsent(self):
787 def isabsent(self):
789 """whether this filectx represents a file not in self._changectx
788 """whether this filectx represents a file not in self._changectx
790
789
791 This is mainly for merge code to detect change/delete conflicts. This is
790 This is mainly for merge code to detect change/delete conflicts. This is
792 expected to be True for all subclasses of basectx."""
791 expected to be True for all subclasses of basectx."""
793 return False
792 return False
794
793
795 _customcmp = False
794 _customcmp = False
796 def cmp(self, fctx):
795 def cmp(self, fctx):
797 """compare with other file context
796 """compare with other file context
798
797
799 returns True if different than fctx.
798 returns True if different than fctx.
800 """
799 """
801 if fctx._customcmp:
800 if fctx._customcmp:
802 return fctx.cmp(self)
801 return fctx.cmp(self)
803
802
804 if (fctx._filenode is None
803 if (fctx._filenode is None
805 and (self._repo._encodefilterpats
804 and (self._repo._encodefilterpats
806 # if file data starts with '\1\n', empty metadata block is
805 # if file data starts with '\1\n', empty metadata block is
807 # prepended, which adds 4 bytes to filelog.size().
806 # prepended, which adds 4 bytes to filelog.size().
808 or self.size() - 4 == fctx.size())
807 or self.size() - 4 == fctx.size())
809 or self.size() == fctx.size()):
808 or self.size() == fctx.size()):
810 return self._filelog.cmp(self._filenode, fctx.data())
809 return self._filelog.cmp(self._filenode, fctx.data())
811
810
812 return True
811 return True
813
812
814 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
813 def _adjustlinkrev(self, srcrev, inclusive=False):
815 """return the first ancestor of <srcrev> introducing <fnode>
814 """return the first ancestor of <srcrev> introducing <fnode>
816
815
817 If the linkrev of the file revision does not point to an ancestor of
816 If the linkrev of the file revision does not point to an ancestor of
818 srcrev, we'll walk down the ancestors until we find one introducing
817 srcrev, we'll walk down the ancestors until we find one introducing
819 this file revision.
818 this file revision.
820
819
821 :repo: a localrepository object (used to access changelog and manifest)
822 :path: the file path
823 :fnode: the nodeid of the file revision
824 :filelog: the filelog of this path
825 :srcrev: the changeset revision we search ancestors from
820 :srcrev: the changeset revision we search ancestors from
826 :inclusive: if true, the src revision will also be checked
821 :inclusive: if true, the src revision will also be checked
827 """
822 """
828 repo = self._repo
823 repo = self._repo
829 cl = repo.unfiltered().changelog
824 cl = repo.unfiltered().changelog
830 mfl = repo.manifestlog
825 mfl = repo.manifestlog
831 # fetch the linkrev
826 # fetch the linkrev
832 fr = filelog.rev(fnode)
827 lkr = self.linkrev()
833 lkr = filelog.linkrev(fr)
834 # hack to reuse ancestor computation when searching for renames
828 # hack to reuse ancestor computation when searching for renames
835 memberanc = getattr(self, '_ancestrycontext', None)
829 memberanc = getattr(self, '_ancestrycontext', None)
836 iteranc = None
830 iteranc = None
837 if srcrev is None:
831 if srcrev is None:
838 # wctx case, used by workingfilectx during mergecopy
832 # wctx case, used by workingfilectx during mergecopy
839 revs = [p.rev() for p in self._repo[None].parents()]
833 revs = [p.rev() for p in self._repo[None].parents()]
840 inclusive = True # we skipped the real (revless) source
834 inclusive = True # we skipped the real (revless) source
841 else:
835 else:
842 revs = [srcrev]
836 revs = [srcrev]
843 if memberanc is None:
837 if memberanc is None:
844 memberanc = iteranc = cl.ancestors(revs, lkr,
838 memberanc = iteranc = cl.ancestors(revs, lkr,
845 inclusive=inclusive)
839 inclusive=inclusive)
846 # check if this linkrev is an ancestor of srcrev
840 # check if this linkrev is an ancestor of srcrev
847 if lkr not in memberanc:
841 if lkr not in memberanc:
848 if iteranc is None:
842 if iteranc is None:
849 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
843 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
844 fnode = self._filenode
845 path = self._path
850 for a in iteranc:
846 for a in iteranc:
851 ac = cl.read(a) # get changeset data (we avoid object creation)
847 ac = cl.read(a) # get changeset data (we avoid object creation)
852 if path in ac[3]: # checking the 'files' field.
848 if path in ac[3]: # checking the 'files' field.
853 # The file has been touched, check if the content is
849 # The file has been touched, check if the content is
854 # similar to the one we search for.
850 # similar to the one we search for.
855 if fnode == mfl[ac[0]].readfast().get(path):
851 if fnode == mfl[ac[0]].readfast().get(path):
856 return a
852 return a
857 # In theory, we should never get out of that loop without a result.
853 # In theory, we should never get out of that loop without a result.
858 # But if manifest uses a buggy file revision (not children of the
854 # But if manifest uses a buggy file revision (not children of the
859 # one it replaces) we could. Such a buggy situation will likely
855 # one it replaces) we could. Such a buggy situation will likely
860 # result is crash somewhere else at to some point.
856 # result is crash somewhere else at to some point.
861 return lkr
857 return lkr
862
858
863 def introrev(self):
859 def introrev(self):
864 """return the rev of the changeset which introduced this file revision
860 """return the rev of the changeset which introduced this file revision
865
861
866 This method is different from linkrev because it take into account the
862 This method is different from linkrev because it take into account the
867 changeset the filectx was created from. It ensures the returned
863 changeset the filectx was created from. It ensures the returned
868 revision is one of its ancestors. This prevents bugs from
864 revision is one of its ancestors. This prevents bugs from
869 'linkrev-shadowing' when a file revision is used by multiple
865 'linkrev-shadowing' when a file revision is used by multiple
870 changesets.
866 changesets.
871 """
867 """
872 lkr = self.linkrev()
868 lkr = self.linkrev()
873 attrs = vars(self)
869 attrs = vars(self)
874 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
870 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
875 if noctx or self.rev() == lkr:
871 if noctx or self.rev() == lkr:
876 return self.linkrev()
872 return self.linkrev()
877 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
873 return self._adjustlinkrev(self.rev(), inclusive=True)
878 self.rev(), inclusive=True)
879
874
880 def _parentfilectx(self, path, fileid, filelog):
875 def _parentfilectx(self, path, fileid, filelog):
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
876 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
877 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
878 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 # If self is associated with a changeset (probably explicitly
879 # If self is associated with a changeset (probably explicitly
885 # fed), ensure the created filectx is associated with a
880 # fed), ensure the created filectx is associated with a
886 # changeset that is an ancestor of self.changectx.
881 # changeset that is an ancestor of self.changectx.
887 # This lets us later use _adjustlinkrev to get a correct link.
882 # This lets us later use _adjustlinkrev to get a correct link.
888 fctx._descendantrev = self.rev()
883 fctx._descendantrev = self.rev()
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
884 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 elif '_descendantrev' in vars(self):
885 elif '_descendantrev' in vars(self):
891 # Otherwise propagate _descendantrev if we have one associated.
886 # Otherwise propagate _descendantrev if we have one associated.
892 fctx._descendantrev = self._descendantrev
887 fctx._descendantrev = self._descendantrev
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
888 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 return fctx
889 return fctx
895
890
896 def parents(self):
891 def parents(self):
897 _path = self._path
892 _path = self._path
898 fl = self._filelog
893 fl = self._filelog
899 parents = self._filelog.parents(self._filenode)
894 parents = self._filelog.parents(self._filenode)
900 pl = [(_path, node, fl) for node in parents if node != nullid]
895 pl = [(_path, node, fl) for node in parents if node != nullid]
901
896
902 r = fl.renamed(self._filenode)
897 r = fl.renamed(self._filenode)
903 if r:
898 if r:
904 # - In the simple rename case, both parent are nullid, pl is empty.
899 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In case of merge, only one of the parent is null id and should
900 # - In case of merge, only one of the parent is null id and should
906 # be replaced with the rename information. This parent is -always-
901 # be replaced with the rename information. This parent is -always-
907 # the first one.
902 # the first one.
908 #
903 #
909 # As null id have always been filtered out in the previous list
904 # As null id have always been filtered out in the previous list
910 # comprehension, inserting to 0 will always result in "replacing
905 # comprehension, inserting to 0 will always result in "replacing
911 # first nullid parent with rename information.
906 # first nullid parent with rename information.
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
907 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913
908
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
909 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915
910
916 def p1(self):
911 def p1(self):
917 return self.parents()[0]
912 return self.parents()[0]
918
913
919 def p2(self):
914 def p2(self):
920 p = self.parents()
915 p = self.parents()
921 if len(p) == 2:
916 if len(p) == 2:
922 return p[1]
917 return p[1]
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
918 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924
919
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
920 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 '''returns a list of tuples of ((ctx, number), line) for each line
921 '''returns a list of tuples of ((ctx, number), line) for each line
927 in the file, where ctx is the filectx of the node where
922 in the file, where ctx is the filectx of the node where
928 that line was last changed; if linenumber parameter is true, number is
923 that line was last changed; if linenumber parameter is true, number is
929 the line number at the first appearance in the managed file, otherwise,
924 the line number at the first appearance in the managed file, otherwise,
930 number has a fixed value of False.
925 number has a fixed value of False.
931 '''
926 '''
932
927
933 def lines(text):
928 def lines(text):
934 if text.endswith("\n"):
929 if text.endswith("\n"):
935 return text.count("\n")
930 return text.count("\n")
936 return text.count("\n") + int(bool(text))
931 return text.count("\n") + int(bool(text))
937
932
938 if linenumber:
933 if linenumber:
939 def decorate(text, rev):
934 def decorate(text, rev):
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
935 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 else:
936 else:
942 def decorate(text, rev):
937 def decorate(text, rev):
943 return ([(rev, False)] * lines(text), text)
938 return ([(rev, False)] * lines(text), text)
944
939
945 def pair(parent, child):
940 def pair(parent, child):
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
941 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 for (a1, a2, b1, b2), t in blocks:
942 for (a1, a2, b1, b2), t in blocks:
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
943 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 # belong to the child.
944 # belong to the child.
950 if t == '=':
945 if t == '=':
951 child[0][b1:b2] = parent[0][a1:a2]
946 child[0][b1:b2] = parent[0][a1:a2]
952 return child
947 return child
953
948
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955
950
956 def parents(f):
951 def parents(f):
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 # isn't an ancestor of the srcrev.
955 # isn't an ancestor of the srcrev.
961 f._changeid
956 f._changeid
962 pl = f.parents()
957 pl = f.parents()
963
958
964 # Don't return renamed parents if we aren't following.
959 # Don't return renamed parents if we aren't following.
965 if not follow:
960 if not follow:
966 pl = [p for p in pl if p.path() == f.path()]
961 pl = [p for p in pl if p.path() == f.path()]
967
962
968 # renamed filectx won't have a filelog yet, so set it
963 # renamed filectx won't have a filelog yet, so set it
969 # from the cache to save time
964 # from the cache to save time
970 for p in pl:
965 for p in pl:
971 if not '_filelog' in p.__dict__:
966 if not '_filelog' in p.__dict__:
972 p._filelog = getlog(p.path())
967 p._filelog = getlog(p.path())
973
968
974 return pl
969 return pl
975
970
976 # use linkrev to find the first changeset where self appeared
971 # use linkrev to find the first changeset where self appeared
977 base = self
972 base = self
978 introrev = self.introrev()
973 introrev = self.introrev()
979 if self.rev() != introrev:
974 if self.rev() != introrev:
980 base = self.filectx(self.filenode(), changeid=introrev)
975 base = self.filectx(self.filenode(), changeid=introrev)
981 if getattr(base, '_ancestrycontext', None) is None:
976 if getattr(base, '_ancestrycontext', None) is None:
982 cl = self._repo.changelog
977 cl = self._repo.changelog
983 if introrev is None:
978 if introrev is None:
984 # wctx is not inclusive, but works because _ancestrycontext
979 # wctx is not inclusive, but works because _ancestrycontext
985 # is used to test filelog revisions
980 # is used to test filelog revisions
986 ac = cl.ancestors([p.rev() for p in base.parents()],
981 ac = cl.ancestors([p.rev() for p in base.parents()],
987 inclusive=True)
982 inclusive=True)
988 else:
983 else:
989 ac = cl.ancestors([introrev], inclusive=True)
984 ac = cl.ancestors([introrev], inclusive=True)
990 base._ancestrycontext = ac
985 base._ancestrycontext = ac
991
986
992 # This algorithm would prefer to be recursive, but Python is a
987 # This algorithm would prefer to be recursive, but Python is a
993 # bit recursion-hostile. Instead we do an iterative
988 # bit recursion-hostile. Instead we do an iterative
994 # depth-first search.
989 # depth-first search.
995
990
996 # 1st DFS pre-calculates pcache and needed
991 # 1st DFS pre-calculates pcache and needed
997 visit = [base]
992 visit = [base]
998 pcache = {}
993 pcache = {}
999 needed = {base: 1}
994 needed = {base: 1}
1000 while visit:
995 while visit:
1001 f = visit.pop()
996 f = visit.pop()
1002 if f in pcache:
997 if f in pcache:
1003 continue
998 continue
1004 pl = parents(f)
999 pl = parents(f)
1005 pcache[f] = pl
1000 pcache[f] = pl
1006 for p in pl:
1001 for p in pl:
1007 needed[p] = needed.get(p, 0) + 1
1002 needed[p] = needed.get(p, 0) + 1
1008 if p not in pcache:
1003 if p not in pcache:
1009 visit.append(p)
1004 visit.append(p)
1010
1005
1011 # 2nd DFS does the actual annotate
1006 # 2nd DFS does the actual annotate
1012 visit[:] = [base]
1007 visit[:] = [base]
1013 hist = {}
1008 hist = {}
1014 while visit:
1009 while visit:
1015 f = visit[-1]
1010 f = visit[-1]
1016 if f in hist:
1011 if f in hist:
1017 visit.pop()
1012 visit.pop()
1018 continue
1013 continue
1019
1014
1020 ready = True
1015 ready = True
1021 pl = pcache[f]
1016 pl = pcache[f]
1022 for p in pl:
1017 for p in pl:
1023 if p not in hist:
1018 if p not in hist:
1024 ready = False
1019 ready = False
1025 visit.append(p)
1020 visit.append(p)
1026 if ready:
1021 if ready:
1027 visit.pop()
1022 visit.pop()
1028 curr = decorate(f.data(), f)
1023 curr = decorate(f.data(), f)
1029 for p in pl:
1024 for p in pl:
1030 curr = pair(hist[p], curr)
1025 curr = pair(hist[p], curr)
1031 if needed[p] == 1:
1026 if needed[p] == 1:
1032 del hist[p]
1027 del hist[p]
1033 del needed[p]
1028 del needed[p]
1034 else:
1029 else:
1035 needed[p] -= 1
1030 needed[p] -= 1
1036
1031
1037 hist[f] = curr
1032 hist[f] = curr
1038 del pcache[f]
1033 del pcache[f]
1039
1034
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1035 return zip(hist[base][0], hist[base][1].splitlines(True))
1041
1036
1042 def ancestors(self, followfirst=False):
1037 def ancestors(self, followfirst=False):
1043 visit = {}
1038 visit = {}
1044 c = self
1039 c = self
1045 if followfirst:
1040 if followfirst:
1046 cut = 1
1041 cut = 1
1047 else:
1042 else:
1048 cut = None
1043 cut = None
1049
1044
1050 while True:
1045 while True:
1051 for parent in c.parents()[:cut]:
1046 for parent in c.parents()[:cut]:
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1047 visit[(parent.linkrev(), parent.filenode())] = parent
1053 if not visit:
1048 if not visit:
1054 break
1049 break
1055 c = visit.pop(max(visit))
1050 c = visit.pop(max(visit))
1056 yield c
1051 yield c
1057
1052
1058 class filectx(basefilectx):
1053 class filectx(basefilectx):
1059 """A filecontext object makes access to data related to a particular
1054 """A filecontext object makes access to data related to a particular
1060 filerevision convenient."""
1055 filerevision convenient."""
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1056 def __init__(self, repo, path, changeid=None, fileid=None,
1062 filelog=None, changectx=None):
1057 filelog=None, changectx=None):
1063 """changeid can be a changeset revision, node, or tag.
1058 """changeid can be a changeset revision, node, or tag.
1064 fileid can be a file revision or node."""
1059 fileid can be a file revision or node."""
1065 self._repo = repo
1060 self._repo = repo
1066 self._path = path
1061 self._path = path
1067
1062
1068 assert (changeid is not None
1063 assert (changeid is not None
1069 or fileid is not None
1064 or fileid is not None
1070 or changectx is not None), \
1065 or changectx is not None), \
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1066 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 % (changeid, fileid, changectx))
1067 % (changeid, fileid, changectx))
1073
1068
1074 if filelog is not None:
1069 if filelog is not None:
1075 self._filelog = filelog
1070 self._filelog = filelog
1076
1071
1077 if changeid is not None:
1072 if changeid is not None:
1078 self._changeid = changeid
1073 self._changeid = changeid
1079 if changectx is not None:
1074 if changectx is not None:
1080 self._changectx = changectx
1075 self._changectx = changectx
1081 if fileid is not None:
1076 if fileid is not None:
1082 self._fileid = fileid
1077 self._fileid = fileid
1083
1078
1084 @propertycache
1079 @propertycache
1085 def _changectx(self):
1080 def _changectx(self):
1086 try:
1081 try:
1087 return changectx(self._repo, self._changeid)
1082 return changectx(self._repo, self._changeid)
1088 except error.FilteredRepoLookupError:
1083 except error.FilteredRepoLookupError:
1089 # Linkrev may point to any revision in the repository. When the
1084 # Linkrev may point to any revision in the repository. When the
1090 # repository is filtered this may lead to `filectx` trying to build
1085 # repository is filtered this may lead to `filectx` trying to build
1091 # `changectx` for filtered revision. In such case we fallback to
1086 # `changectx` for filtered revision. In such case we fallback to
1092 # creating `changectx` on the unfiltered version of the reposition.
1087 # creating `changectx` on the unfiltered version of the reposition.
1093 # This fallback should not be an issue because `changectx` from
1088 # This fallback should not be an issue because `changectx` from
1094 # `filectx` are not used in complex operations that care about
1089 # `filectx` are not used in complex operations that care about
1095 # filtering.
1090 # filtering.
1096 #
1091 #
1097 # This fallback is a cheap and dirty fix that prevent several
1092 # This fallback is a cheap and dirty fix that prevent several
1098 # crashes. It does not ensure the behavior is correct. However the
1093 # crashes. It does not ensure the behavior is correct. However the
1099 # behavior was not correct before filtering either and "incorrect
1094 # behavior was not correct before filtering either and "incorrect
1100 # behavior" is seen as better as "crash"
1095 # behavior" is seen as better as "crash"
1101 #
1096 #
1102 # Linkrevs have several serious troubles with filtering that are
1097 # Linkrevs have several serious troubles with filtering that are
1103 # complicated to solve. Proper handling of the issue here should be
1098 # complicated to solve. Proper handling of the issue here should be
1104 # considered when solving linkrev issue are on the table.
1099 # considered when solving linkrev issue are on the table.
1105 return changectx(self._repo.unfiltered(), self._changeid)
1100 return changectx(self._repo.unfiltered(), self._changeid)
1106
1101
1107 def filectx(self, fileid, changeid=None):
1102 def filectx(self, fileid, changeid=None):
1108 '''opens an arbitrary revision of the file without
1103 '''opens an arbitrary revision of the file without
1109 opening a new filelog'''
1104 opening a new filelog'''
1110 return filectx(self._repo, self._path, fileid=fileid,
1105 return filectx(self._repo, self._path, fileid=fileid,
1111 filelog=self._filelog, changeid=changeid)
1106 filelog=self._filelog, changeid=changeid)
1112
1107
1113 def data(self):
1108 def data(self):
1114 try:
1109 try:
1115 return self._filelog.read(self._filenode)
1110 return self._filelog.read(self._filenode)
1116 except error.CensoredNodeError:
1111 except error.CensoredNodeError:
1117 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1112 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1118 return ""
1113 return ""
1119 raise error.Abort(_("censored node: %s") % short(self._filenode),
1114 raise error.Abort(_("censored node: %s") % short(self._filenode),
1120 hint=_("set censor.policy to ignore errors"))
1115 hint=_("set censor.policy to ignore errors"))
1121
1116
1122 def size(self):
1117 def size(self):
1123 return self._filelog.size(self._filerev)
1118 return self._filelog.size(self._filerev)
1124
1119
1125 def renamed(self):
1120 def renamed(self):
1126 """check if file was actually renamed in this changeset revision
1121 """check if file was actually renamed in this changeset revision
1127
1122
1128 If rename logged in file revision, we report copy for changeset only
1123 If rename logged in file revision, we report copy for changeset only
1129 if file revisions linkrev points back to the changeset in question
1124 if file revisions linkrev points back to the changeset in question
1130 or both changeset parents contain different file revisions.
1125 or both changeset parents contain different file revisions.
1131 """
1126 """
1132
1127
1133 renamed = self._filelog.renamed(self._filenode)
1128 renamed = self._filelog.renamed(self._filenode)
1134 if not renamed:
1129 if not renamed:
1135 return renamed
1130 return renamed
1136
1131
1137 if self.rev() == self.linkrev():
1132 if self.rev() == self.linkrev():
1138 return renamed
1133 return renamed
1139
1134
1140 name = self.path()
1135 name = self.path()
1141 fnode = self._filenode
1136 fnode = self._filenode
1142 for p in self._changectx.parents():
1137 for p in self._changectx.parents():
1143 try:
1138 try:
1144 if fnode == p.filenode(name):
1139 if fnode == p.filenode(name):
1145 return None
1140 return None
1146 except error.LookupError:
1141 except error.LookupError:
1147 pass
1142 pass
1148 return renamed
1143 return renamed
1149
1144
1150 def children(self):
1145 def children(self):
1151 # hard for renames
1146 # hard for renames
1152 c = self._filelog.children(self._filenode)
1147 c = self._filelog.children(self._filenode)
1153 return [filectx(self._repo, self._path, fileid=x,
1148 return [filectx(self._repo, self._path, fileid=x,
1154 filelog=self._filelog) for x in c]
1149 filelog=self._filelog) for x in c]
1155
1150
1156 class committablectx(basectx):
1151 class committablectx(basectx):
1157 """A committablectx object provides common functionality for a context that
1152 """A committablectx object provides common functionality for a context that
1158 wants the ability to commit, e.g. workingctx or memctx."""
1153 wants the ability to commit, e.g. workingctx or memctx."""
1159 def __init__(self, repo, text="", user=None, date=None, extra=None,
1154 def __init__(self, repo, text="", user=None, date=None, extra=None,
1160 changes=None):
1155 changes=None):
1161 self._repo = repo
1156 self._repo = repo
1162 self._rev = None
1157 self._rev = None
1163 self._node = None
1158 self._node = None
1164 self._text = text
1159 self._text = text
1165 if date:
1160 if date:
1166 self._date = util.parsedate(date)
1161 self._date = util.parsedate(date)
1167 if user:
1162 if user:
1168 self._user = user
1163 self._user = user
1169 if changes:
1164 if changes:
1170 self._status = changes
1165 self._status = changes
1171
1166
1172 self._extra = {}
1167 self._extra = {}
1173 if extra:
1168 if extra:
1174 self._extra = extra.copy()
1169 self._extra = extra.copy()
1175 if 'branch' not in self._extra:
1170 if 'branch' not in self._extra:
1176 try:
1171 try:
1177 branch = encoding.fromlocal(self._repo.dirstate.branch())
1172 branch = encoding.fromlocal(self._repo.dirstate.branch())
1178 except UnicodeDecodeError:
1173 except UnicodeDecodeError:
1179 raise error.Abort(_('branch name not in UTF-8!'))
1174 raise error.Abort(_('branch name not in UTF-8!'))
1180 self._extra['branch'] = branch
1175 self._extra['branch'] = branch
1181 if self._extra['branch'] == '':
1176 if self._extra['branch'] == '':
1182 self._extra['branch'] = 'default'
1177 self._extra['branch'] = 'default'
1183
1178
1184 def __str__(self):
1179 def __str__(self):
1185 return str(self._parents[0]) + "+"
1180 return str(self._parents[0]) + "+"
1186
1181
1187 def __nonzero__(self):
1182 def __nonzero__(self):
1188 return True
1183 return True
1189
1184
1190 def _buildflagfunc(self):
1185 def _buildflagfunc(self):
1191 # Create a fallback function for getting file flags when the
1186 # Create a fallback function for getting file flags when the
1192 # filesystem doesn't support them
1187 # filesystem doesn't support them
1193
1188
1194 copiesget = self._repo.dirstate.copies().get
1189 copiesget = self._repo.dirstate.copies().get
1195 parents = self.parents()
1190 parents = self.parents()
1196 if len(parents) < 2:
1191 if len(parents) < 2:
1197 # when we have one parent, it's easy: copy from parent
1192 # when we have one parent, it's easy: copy from parent
1198 man = parents[0].manifest()
1193 man = parents[0].manifest()
1199 def func(f):
1194 def func(f):
1200 f = copiesget(f, f)
1195 f = copiesget(f, f)
1201 return man.flags(f)
1196 return man.flags(f)
1202 else:
1197 else:
1203 # merges are tricky: we try to reconstruct the unstored
1198 # merges are tricky: we try to reconstruct the unstored
1204 # result from the merge (issue1802)
1199 # result from the merge (issue1802)
1205 p1, p2 = parents
1200 p1, p2 = parents
1206 pa = p1.ancestor(p2)
1201 pa = p1.ancestor(p2)
1207 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1202 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1208
1203
1209 def func(f):
1204 def func(f):
1210 f = copiesget(f, f) # may be wrong for merges with copies
1205 f = copiesget(f, f) # may be wrong for merges with copies
1211 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1206 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1212 if fl1 == fl2:
1207 if fl1 == fl2:
1213 return fl1
1208 return fl1
1214 if fl1 == fla:
1209 if fl1 == fla:
1215 return fl2
1210 return fl2
1216 if fl2 == fla:
1211 if fl2 == fla:
1217 return fl1
1212 return fl1
1218 return '' # punt for conflicts
1213 return '' # punt for conflicts
1219
1214
1220 return func
1215 return func
1221
1216
1222 @propertycache
1217 @propertycache
1223 def _flagfunc(self):
1218 def _flagfunc(self):
1224 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1219 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1225
1220
1226 @propertycache
1221 @propertycache
1227 def _manifest(self):
1222 def _manifest(self):
1228 """generate a manifest corresponding to the values in self._status
1223 """generate a manifest corresponding to the values in self._status
1229
1224
1230 This reuse the file nodeid from parent, but we append an extra letter
1225 This reuse the file nodeid from parent, but we append an extra letter
1231 when modified. Modified files get an extra 'm' while added files get
1226 when modified. Modified files get an extra 'm' while added files get
1232 an extra 'a'. This is used by manifests merge to see that files
1227 an extra 'a'. This is used by manifests merge to see that files
1233 are different and by update logic to avoid deleting newly added files.
1228 are different and by update logic to avoid deleting newly added files.
1234 """
1229 """
1235 parents = self.parents()
1230 parents = self.parents()
1236
1231
1237 man1 = parents[0].manifest()
1232 man1 = parents[0].manifest()
1238 man = man1.copy()
1233 man = man1.copy()
1239 if len(parents) > 1:
1234 if len(parents) > 1:
1240 man2 = self.p2().manifest()
1235 man2 = self.p2().manifest()
1241 def getman(f):
1236 def getman(f):
1242 if f in man1:
1237 if f in man1:
1243 return man1
1238 return man1
1244 return man2
1239 return man2
1245 else:
1240 else:
1246 getman = lambda f: man1
1241 getman = lambda f: man1
1247
1242
1248 copied = self._repo.dirstate.copies()
1243 copied = self._repo.dirstate.copies()
1249 ff = self._flagfunc
1244 ff = self._flagfunc
1250 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1245 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1251 for f in l:
1246 for f in l:
1252 orig = copied.get(f, f)
1247 orig = copied.get(f, f)
1253 man[f] = getman(orig).get(orig, nullid) + i
1248 man[f] = getman(orig).get(orig, nullid) + i
1254 try:
1249 try:
1255 man.setflag(f, ff(f))
1250 man.setflag(f, ff(f))
1256 except OSError:
1251 except OSError:
1257 pass
1252 pass
1258
1253
1259 for f in self._status.deleted + self._status.removed:
1254 for f in self._status.deleted + self._status.removed:
1260 if f in man:
1255 if f in man:
1261 del man[f]
1256 del man[f]
1262
1257
1263 return man
1258 return man
1264
1259
1265 @propertycache
1260 @propertycache
1266 def _status(self):
1261 def _status(self):
1267 return self._repo.status()
1262 return self._repo.status()
1268
1263
1269 @propertycache
1264 @propertycache
1270 def _user(self):
1265 def _user(self):
1271 return self._repo.ui.username()
1266 return self._repo.ui.username()
1272
1267
1273 @propertycache
1268 @propertycache
1274 def _date(self):
1269 def _date(self):
1275 return util.makedate()
1270 return util.makedate()
1276
1271
1277 def subrev(self, subpath):
1272 def subrev(self, subpath):
1278 return None
1273 return None
1279
1274
1280 def manifestnode(self):
1275 def manifestnode(self):
1281 return None
1276 return None
1282 def user(self):
1277 def user(self):
1283 return self._user or self._repo.ui.username()
1278 return self._user or self._repo.ui.username()
1284 def date(self):
1279 def date(self):
1285 return self._date
1280 return self._date
1286 def description(self):
1281 def description(self):
1287 return self._text
1282 return self._text
1288 def files(self):
1283 def files(self):
1289 return sorted(self._status.modified + self._status.added +
1284 return sorted(self._status.modified + self._status.added +
1290 self._status.removed)
1285 self._status.removed)
1291
1286
1292 def modified(self):
1287 def modified(self):
1293 return self._status.modified
1288 return self._status.modified
1294 def added(self):
1289 def added(self):
1295 return self._status.added
1290 return self._status.added
1296 def removed(self):
1291 def removed(self):
1297 return self._status.removed
1292 return self._status.removed
1298 def deleted(self):
1293 def deleted(self):
1299 return self._status.deleted
1294 return self._status.deleted
1300 def branch(self):
1295 def branch(self):
1301 return encoding.tolocal(self._extra['branch'])
1296 return encoding.tolocal(self._extra['branch'])
1302 def closesbranch(self):
1297 def closesbranch(self):
1303 return 'close' in self._extra
1298 return 'close' in self._extra
1304 def extra(self):
1299 def extra(self):
1305 return self._extra
1300 return self._extra
1306
1301
1307 def tags(self):
1302 def tags(self):
1308 return []
1303 return []
1309
1304
1310 def bookmarks(self):
1305 def bookmarks(self):
1311 b = []
1306 b = []
1312 for p in self.parents():
1307 for p in self.parents():
1313 b.extend(p.bookmarks())
1308 b.extend(p.bookmarks())
1314 return b
1309 return b
1315
1310
1316 def phase(self):
1311 def phase(self):
1317 phase = phases.draft # default phase to draft
1312 phase = phases.draft # default phase to draft
1318 for p in self.parents():
1313 for p in self.parents():
1319 phase = max(phase, p.phase())
1314 phase = max(phase, p.phase())
1320 return phase
1315 return phase
1321
1316
1322 def hidden(self):
1317 def hidden(self):
1323 return False
1318 return False
1324
1319
1325 def children(self):
1320 def children(self):
1326 return []
1321 return []
1327
1322
1328 def flags(self, path):
1323 def flags(self, path):
1329 if '_manifest' in self.__dict__:
1324 if '_manifest' in self.__dict__:
1330 try:
1325 try:
1331 return self._manifest.flags(path)
1326 return self._manifest.flags(path)
1332 except KeyError:
1327 except KeyError:
1333 return ''
1328 return ''
1334
1329
1335 try:
1330 try:
1336 return self._flagfunc(path)
1331 return self._flagfunc(path)
1337 except OSError:
1332 except OSError:
1338 return ''
1333 return ''
1339
1334
1340 def ancestor(self, c2):
1335 def ancestor(self, c2):
1341 """return the "best" ancestor context of self and c2"""
1336 """return the "best" ancestor context of self and c2"""
1342 return self._parents[0].ancestor(c2) # punt on two parents for now
1337 return self._parents[0].ancestor(c2) # punt on two parents for now
1343
1338
1344 def walk(self, match):
1339 def walk(self, match):
1345 '''Generates matching file names.'''
1340 '''Generates matching file names.'''
1346 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1341 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1347 True, False))
1342 True, False))
1348
1343
1349 def matches(self, match):
1344 def matches(self, match):
1350 return sorted(self._repo.dirstate.matches(match))
1345 return sorted(self._repo.dirstate.matches(match))
1351
1346
1352 def ancestors(self):
1347 def ancestors(self):
1353 for p in self._parents:
1348 for p in self._parents:
1354 yield p
1349 yield p
1355 for a in self._repo.changelog.ancestors(
1350 for a in self._repo.changelog.ancestors(
1356 [p.rev() for p in self._parents]):
1351 [p.rev() for p in self._parents]):
1357 yield changectx(self._repo, a)
1352 yield changectx(self._repo, a)
1358
1353
1359 def markcommitted(self, node):
1354 def markcommitted(self, node):
1360 """Perform post-commit cleanup necessary after committing this ctx
1355 """Perform post-commit cleanup necessary after committing this ctx
1361
1356
1362 Specifically, this updates backing stores this working context
1357 Specifically, this updates backing stores this working context
1363 wraps to reflect the fact that the changes reflected by this
1358 wraps to reflect the fact that the changes reflected by this
1364 workingctx have been committed. For example, it marks
1359 workingctx have been committed. For example, it marks
1365 modified and added files as normal in the dirstate.
1360 modified and added files as normal in the dirstate.
1366
1361
1367 """
1362 """
1368
1363
1369 self._repo.dirstate.beginparentchange()
1364 self._repo.dirstate.beginparentchange()
1370 for f in self.modified() + self.added():
1365 for f in self.modified() + self.added():
1371 self._repo.dirstate.normal(f)
1366 self._repo.dirstate.normal(f)
1372 for f in self.removed():
1367 for f in self.removed():
1373 self._repo.dirstate.drop(f)
1368 self._repo.dirstate.drop(f)
1374 self._repo.dirstate.setparents(node)
1369 self._repo.dirstate.setparents(node)
1375 self._repo.dirstate.endparentchange()
1370 self._repo.dirstate.endparentchange()
1376
1371
1377 # write changes out explicitly, because nesting wlock at
1372 # write changes out explicitly, because nesting wlock at
1378 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1373 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1379 # from immediately doing so for subsequent changing files
1374 # from immediately doing so for subsequent changing files
1380 self._repo.dirstate.write(self._repo.currenttransaction())
1375 self._repo.dirstate.write(self._repo.currenttransaction())
1381
1376
1382 class workingctx(committablectx):
1377 class workingctx(committablectx):
1383 """A workingctx object makes access to data related to
1378 """A workingctx object makes access to data related to
1384 the current working directory convenient.
1379 the current working directory convenient.
1385 date - any valid date string or (unixtime, offset), or None.
1380 date - any valid date string or (unixtime, offset), or None.
1386 user - username string, or None.
1381 user - username string, or None.
1387 extra - a dictionary of extra values, or None.
1382 extra - a dictionary of extra values, or None.
1388 changes - a list of file lists as returned by localrepo.status()
1383 changes - a list of file lists as returned by localrepo.status()
1389 or None to use the repository status.
1384 or None to use the repository status.
1390 """
1385 """
1391 def __init__(self, repo, text="", user=None, date=None, extra=None,
1386 def __init__(self, repo, text="", user=None, date=None, extra=None,
1392 changes=None):
1387 changes=None):
1393 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1388 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1394
1389
1395 def __iter__(self):
1390 def __iter__(self):
1396 d = self._repo.dirstate
1391 d = self._repo.dirstate
1397 for f in d:
1392 for f in d:
1398 if d[f] != 'r':
1393 if d[f] != 'r':
1399 yield f
1394 yield f
1400
1395
1401 def __contains__(self, key):
1396 def __contains__(self, key):
1402 return self._repo.dirstate[key] not in "?r"
1397 return self._repo.dirstate[key] not in "?r"
1403
1398
1404 def hex(self):
1399 def hex(self):
1405 return hex(wdirid)
1400 return hex(wdirid)
1406
1401
1407 @propertycache
1402 @propertycache
1408 def _parents(self):
1403 def _parents(self):
1409 p = self._repo.dirstate.parents()
1404 p = self._repo.dirstate.parents()
1410 if p[1] == nullid:
1405 if p[1] == nullid:
1411 p = p[:-1]
1406 p = p[:-1]
1412 return [changectx(self._repo, x) for x in p]
1407 return [changectx(self._repo, x) for x in p]
1413
1408
1414 def filectx(self, path, filelog=None):
1409 def filectx(self, path, filelog=None):
1415 """get a file context from the working directory"""
1410 """get a file context from the working directory"""
1416 return workingfilectx(self._repo, path, workingctx=self,
1411 return workingfilectx(self._repo, path, workingctx=self,
1417 filelog=filelog)
1412 filelog=filelog)
1418
1413
1419 def dirty(self, missing=False, merge=True, branch=True):
1414 def dirty(self, missing=False, merge=True, branch=True):
1420 "check whether a working directory is modified"
1415 "check whether a working directory is modified"
1421 # check subrepos first
1416 # check subrepos first
1422 for s in sorted(self.substate):
1417 for s in sorted(self.substate):
1423 if self.sub(s).dirty():
1418 if self.sub(s).dirty():
1424 return True
1419 return True
1425 # check current working dir
1420 # check current working dir
1426 return ((merge and self.p2()) or
1421 return ((merge and self.p2()) or
1427 (branch and self.branch() != self.p1().branch()) or
1422 (branch and self.branch() != self.p1().branch()) or
1428 self.modified() or self.added() or self.removed() or
1423 self.modified() or self.added() or self.removed() or
1429 (missing and self.deleted()))
1424 (missing and self.deleted()))
1430
1425
1431 def add(self, list, prefix=""):
1426 def add(self, list, prefix=""):
1432 join = lambda f: os.path.join(prefix, f)
1427 join = lambda f: os.path.join(prefix, f)
1433 with self._repo.wlock():
1428 with self._repo.wlock():
1434 ui, ds = self._repo.ui, self._repo.dirstate
1429 ui, ds = self._repo.ui, self._repo.dirstate
1435 rejected = []
1430 rejected = []
1436 lstat = self._repo.wvfs.lstat
1431 lstat = self._repo.wvfs.lstat
1437 for f in list:
1432 for f in list:
1438 scmutil.checkportable(ui, join(f))
1433 scmutil.checkportable(ui, join(f))
1439 try:
1434 try:
1440 st = lstat(f)
1435 st = lstat(f)
1441 except OSError:
1436 except OSError:
1442 ui.warn(_("%s does not exist!\n") % join(f))
1437 ui.warn(_("%s does not exist!\n") % join(f))
1443 rejected.append(f)
1438 rejected.append(f)
1444 continue
1439 continue
1445 if st.st_size > 10000000:
1440 if st.st_size > 10000000:
1446 ui.warn(_("%s: up to %d MB of RAM may be required "
1441 ui.warn(_("%s: up to %d MB of RAM may be required "
1447 "to manage this file\n"
1442 "to manage this file\n"
1448 "(use 'hg revert %s' to cancel the "
1443 "(use 'hg revert %s' to cancel the "
1449 "pending addition)\n")
1444 "pending addition)\n")
1450 % (f, 3 * st.st_size // 1000000, join(f)))
1445 % (f, 3 * st.st_size // 1000000, join(f)))
1451 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1446 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1452 ui.warn(_("%s not added: only files and symlinks "
1447 ui.warn(_("%s not added: only files and symlinks "
1453 "supported currently\n") % join(f))
1448 "supported currently\n") % join(f))
1454 rejected.append(f)
1449 rejected.append(f)
1455 elif ds[f] in 'amn':
1450 elif ds[f] in 'amn':
1456 ui.warn(_("%s already tracked!\n") % join(f))
1451 ui.warn(_("%s already tracked!\n") % join(f))
1457 elif ds[f] == 'r':
1452 elif ds[f] == 'r':
1458 ds.normallookup(f)
1453 ds.normallookup(f)
1459 else:
1454 else:
1460 ds.add(f)
1455 ds.add(f)
1461 return rejected
1456 return rejected
1462
1457
1463 def forget(self, files, prefix=""):
1458 def forget(self, files, prefix=""):
1464 join = lambda f: os.path.join(prefix, f)
1459 join = lambda f: os.path.join(prefix, f)
1465 with self._repo.wlock():
1460 with self._repo.wlock():
1466 rejected = []
1461 rejected = []
1467 for f in files:
1462 for f in files:
1468 if f not in self._repo.dirstate:
1463 if f not in self._repo.dirstate:
1469 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1464 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1470 rejected.append(f)
1465 rejected.append(f)
1471 elif self._repo.dirstate[f] != 'a':
1466 elif self._repo.dirstate[f] != 'a':
1472 self._repo.dirstate.remove(f)
1467 self._repo.dirstate.remove(f)
1473 else:
1468 else:
1474 self._repo.dirstate.drop(f)
1469 self._repo.dirstate.drop(f)
1475 return rejected
1470 return rejected
1476
1471
1477 def undelete(self, list):
1472 def undelete(self, list):
1478 pctxs = self.parents()
1473 pctxs = self.parents()
1479 with self._repo.wlock():
1474 with self._repo.wlock():
1480 for f in list:
1475 for f in list:
1481 if self._repo.dirstate[f] != 'r':
1476 if self._repo.dirstate[f] != 'r':
1482 self._repo.ui.warn(_("%s not removed!\n") % f)
1477 self._repo.ui.warn(_("%s not removed!\n") % f)
1483 else:
1478 else:
1484 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1479 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1485 t = fctx.data()
1480 t = fctx.data()
1486 self._repo.wwrite(f, t, fctx.flags())
1481 self._repo.wwrite(f, t, fctx.flags())
1487 self._repo.dirstate.normal(f)
1482 self._repo.dirstate.normal(f)
1488
1483
1489 def copy(self, source, dest):
1484 def copy(self, source, dest):
1490 try:
1485 try:
1491 st = self._repo.wvfs.lstat(dest)
1486 st = self._repo.wvfs.lstat(dest)
1492 except OSError as err:
1487 except OSError as err:
1493 if err.errno != errno.ENOENT:
1488 if err.errno != errno.ENOENT:
1494 raise
1489 raise
1495 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1490 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1496 return
1491 return
1497 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1492 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1498 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1493 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1499 "symbolic link\n") % dest)
1494 "symbolic link\n") % dest)
1500 else:
1495 else:
1501 with self._repo.wlock():
1496 with self._repo.wlock():
1502 if self._repo.dirstate[dest] in '?':
1497 if self._repo.dirstate[dest] in '?':
1503 self._repo.dirstate.add(dest)
1498 self._repo.dirstate.add(dest)
1504 elif self._repo.dirstate[dest] in 'r':
1499 elif self._repo.dirstate[dest] in 'r':
1505 self._repo.dirstate.normallookup(dest)
1500 self._repo.dirstate.normallookup(dest)
1506 self._repo.dirstate.copy(source, dest)
1501 self._repo.dirstate.copy(source, dest)
1507
1502
1508 def match(self, pats=[], include=None, exclude=None, default='glob',
1503 def match(self, pats=[], include=None, exclude=None, default='glob',
1509 listsubrepos=False, badfn=None):
1504 listsubrepos=False, badfn=None):
1510 r = self._repo
1505 r = self._repo
1511
1506
1512 # Only a case insensitive filesystem needs magic to translate user input
1507 # Only a case insensitive filesystem needs magic to translate user input
1513 # to actual case in the filesystem.
1508 # to actual case in the filesystem.
1514 if not util.fscasesensitive(r.root):
1509 if not util.fscasesensitive(r.root):
1515 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1510 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1516 exclude, default, r.auditor, self,
1511 exclude, default, r.auditor, self,
1517 listsubrepos=listsubrepos,
1512 listsubrepos=listsubrepos,
1518 badfn=badfn)
1513 badfn=badfn)
1519 return matchmod.match(r.root, r.getcwd(), pats,
1514 return matchmod.match(r.root, r.getcwd(), pats,
1520 include, exclude, default,
1515 include, exclude, default,
1521 auditor=r.auditor, ctx=self,
1516 auditor=r.auditor, ctx=self,
1522 listsubrepos=listsubrepos, badfn=badfn)
1517 listsubrepos=listsubrepos, badfn=badfn)
1523
1518
1524 def _filtersuspectsymlink(self, files):
1519 def _filtersuspectsymlink(self, files):
1525 if not files or self._repo.dirstate._checklink:
1520 if not files or self._repo.dirstate._checklink:
1526 return files
1521 return files
1527
1522
1528 # Symlink placeholders may get non-symlink-like contents
1523 # Symlink placeholders may get non-symlink-like contents
1529 # via user error or dereferencing by NFS or Samba servers,
1524 # via user error or dereferencing by NFS or Samba servers,
1530 # so we filter out any placeholders that don't look like a
1525 # so we filter out any placeholders that don't look like a
1531 # symlink
1526 # symlink
1532 sane = []
1527 sane = []
1533 for f in files:
1528 for f in files:
1534 if self.flags(f) == 'l':
1529 if self.flags(f) == 'l':
1535 d = self[f].data()
1530 d = self[f].data()
1536 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1531 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1537 self._repo.ui.debug('ignoring suspect symlink placeholder'
1532 self._repo.ui.debug('ignoring suspect symlink placeholder'
1538 ' "%s"\n' % f)
1533 ' "%s"\n' % f)
1539 continue
1534 continue
1540 sane.append(f)
1535 sane.append(f)
1541 return sane
1536 return sane
1542
1537
1543 def _checklookup(self, files):
1538 def _checklookup(self, files):
1544 # check for any possibly clean files
1539 # check for any possibly clean files
1545 if not files:
1540 if not files:
1546 return [], []
1541 return [], []
1547
1542
1548 modified = []
1543 modified = []
1549 fixup = []
1544 fixup = []
1550 pctx = self._parents[0]
1545 pctx = self._parents[0]
1551 # do a full compare of any files that might have changed
1546 # do a full compare of any files that might have changed
1552 for f in sorted(files):
1547 for f in sorted(files):
1553 if (f not in pctx or self.flags(f) != pctx.flags(f)
1548 if (f not in pctx or self.flags(f) != pctx.flags(f)
1554 or pctx[f].cmp(self[f])):
1549 or pctx[f].cmp(self[f])):
1555 modified.append(f)
1550 modified.append(f)
1556 else:
1551 else:
1557 fixup.append(f)
1552 fixup.append(f)
1558
1553
1559 # update dirstate for files that are actually clean
1554 # update dirstate for files that are actually clean
1560 if fixup:
1555 if fixup:
1561 try:
1556 try:
1562 # updating the dirstate is optional
1557 # updating the dirstate is optional
1563 # so we don't wait on the lock
1558 # so we don't wait on the lock
1564 # wlock can invalidate the dirstate, so cache normal _after_
1559 # wlock can invalidate the dirstate, so cache normal _after_
1565 # taking the lock
1560 # taking the lock
1566 with self._repo.wlock(False):
1561 with self._repo.wlock(False):
1567 normal = self._repo.dirstate.normal
1562 normal = self._repo.dirstate.normal
1568 for f in fixup:
1563 for f in fixup:
1569 normal(f)
1564 normal(f)
1570 # write changes out explicitly, because nesting
1565 # write changes out explicitly, because nesting
1571 # wlock at runtime may prevent 'wlock.release()'
1566 # wlock at runtime may prevent 'wlock.release()'
1572 # after this block from doing so for subsequent
1567 # after this block from doing so for subsequent
1573 # changing files
1568 # changing files
1574 self._repo.dirstate.write(self._repo.currenttransaction())
1569 self._repo.dirstate.write(self._repo.currenttransaction())
1575 except error.LockError:
1570 except error.LockError:
1576 pass
1571 pass
1577 return modified, fixup
1572 return modified, fixup
1578
1573
1579 def _manifestmatches(self, match, s):
1574 def _manifestmatches(self, match, s):
1580 """Slow path for workingctx
1575 """Slow path for workingctx
1581
1576
1582 The fast path is when we compare the working directory to its parent
1577 The fast path is when we compare the working directory to its parent
1583 which means this function is comparing with a non-parent; therefore we
1578 which means this function is comparing with a non-parent; therefore we
1584 need to build a manifest and return what matches.
1579 need to build a manifest and return what matches.
1585 """
1580 """
1586 mf = self._repo['.']._manifestmatches(match, s)
1581 mf = self._repo['.']._manifestmatches(match, s)
1587 for f in s.modified + s.added:
1582 for f in s.modified + s.added:
1588 mf[f] = _newnode
1583 mf[f] = _newnode
1589 mf.setflag(f, self.flags(f))
1584 mf.setflag(f, self.flags(f))
1590 for f in s.removed:
1585 for f in s.removed:
1591 if f in mf:
1586 if f in mf:
1592 del mf[f]
1587 del mf[f]
1593 return mf
1588 return mf
1594
1589
1595 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1590 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1596 unknown=False):
1591 unknown=False):
1597 '''Gets the status from the dirstate -- internal use only.'''
1592 '''Gets the status from the dirstate -- internal use only.'''
1598 listignored, listclean, listunknown = ignored, clean, unknown
1593 listignored, listclean, listunknown = ignored, clean, unknown
1599 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1594 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1600 subrepos = []
1595 subrepos = []
1601 if '.hgsub' in self:
1596 if '.hgsub' in self:
1602 subrepos = sorted(self.substate)
1597 subrepos = sorted(self.substate)
1603 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1598 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1604 listclean, listunknown)
1599 listclean, listunknown)
1605
1600
1606 # check for any possibly clean files
1601 # check for any possibly clean files
1607 if cmp:
1602 if cmp:
1608 modified2, fixup = self._checklookup(cmp)
1603 modified2, fixup = self._checklookup(cmp)
1609 s.modified.extend(modified2)
1604 s.modified.extend(modified2)
1610
1605
1611 # update dirstate for files that are actually clean
1606 # update dirstate for files that are actually clean
1612 if fixup and listclean:
1607 if fixup and listclean:
1613 s.clean.extend(fixup)
1608 s.clean.extend(fixup)
1614
1609
1615 if match.always():
1610 if match.always():
1616 # cache for performance
1611 # cache for performance
1617 if s.unknown or s.ignored or s.clean:
1612 if s.unknown or s.ignored or s.clean:
1618 # "_status" is cached with list*=False in the normal route
1613 # "_status" is cached with list*=False in the normal route
1619 self._status = scmutil.status(s.modified, s.added, s.removed,
1614 self._status = scmutil.status(s.modified, s.added, s.removed,
1620 s.deleted, [], [], [])
1615 s.deleted, [], [], [])
1621 else:
1616 else:
1622 self._status = s
1617 self._status = s
1623
1618
1624 return s
1619 return s
1625
1620
1626 def _buildstatus(self, other, s, match, listignored, listclean,
1621 def _buildstatus(self, other, s, match, listignored, listclean,
1627 listunknown):
1622 listunknown):
1628 """build a status with respect to another context
1623 """build a status with respect to another context
1629
1624
1630 This includes logic for maintaining the fast path of status when
1625 This includes logic for maintaining the fast path of status when
1631 comparing the working directory against its parent, which is to skip
1626 comparing the working directory against its parent, which is to skip
1632 building a new manifest if self (working directory) is not comparing
1627 building a new manifest if self (working directory) is not comparing
1633 against its parent (repo['.']).
1628 against its parent (repo['.']).
1634 """
1629 """
1635 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1630 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1636 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1631 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1637 # might have accidentally ended up with the entire contents of the file
1632 # might have accidentally ended up with the entire contents of the file
1638 # they are supposed to be linking to.
1633 # they are supposed to be linking to.
1639 s.modified[:] = self._filtersuspectsymlink(s.modified)
1634 s.modified[:] = self._filtersuspectsymlink(s.modified)
1640 if other != self._repo['.']:
1635 if other != self._repo['.']:
1641 s = super(workingctx, self)._buildstatus(other, s, match,
1636 s = super(workingctx, self)._buildstatus(other, s, match,
1642 listignored, listclean,
1637 listignored, listclean,
1643 listunknown)
1638 listunknown)
1644 return s
1639 return s
1645
1640
1646 def _matchstatus(self, other, match):
1641 def _matchstatus(self, other, match):
1647 """override the match method with a filter for directory patterns
1642 """override the match method with a filter for directory patterns
1648
1643
1649 We use inheritance to customize the match.bad method only in cases of
1644 We use inheritance to customize the match.bad method only in cases of
1650 workingctx since it belongs only to the working directory when
1645 workingctx since it belongs only to the working directory when
1651 comparing against the parent changeset.
1646 comparing against the parent changeset.
1652
1647
1653 If we aren't comparing against the working directory's parent, then we
1648 If we aren't comparing against the working directory's parent, then we
1654 just use the default match object sent to us.
1649 just use the default match object sent to us.
1655 """
1650 """
1656 superself = super(workingctx, self)
1651 superself = super(workingctx, self)
1657 match = superself._matchstatus(other, match)
1652 match = superself._matchstatus(other, match)
1658 if other != self._repo['.']:
1653 if other != self._repo['.']:
1659 def bad(f, msg):
1654 def bad(f, msg):
1660 # 'f' may be a directory pattern from 'match.files()',
1655 # 'f' may be a directory pattern from 'match.files()',
1661 # so 'f not in ctx1' is not enough
1656 # so 'f not in ctx1' is not enough
1662 if f not in other and not other.hasdir(f):
1657 if f not in other and not other.hasdir(f):
1663 self._repo.ui.warn('%s: %s\n' %
1658 self._repo.ui.warn('%s: %s\n' %
1664 (self._repo.dirstate.pathto(f), msg))
1659 (self._repo.dirstate.pathto(f), msg))
1665 match.bad = bad
1660 match.bad = bad
1666 return match
1661 return match
1667
1662
1668 class committablefilectx(basefilectx):
1663 class committablefilectx(basefilectx):
1669 """A committablefilectx provides common functionality for a file context
1664 """A committablefilectx provides common functionality for a file context
1670 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1665 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1671 def __init__(self, repo, path, filelog=None, ctx=None):
1666 def __init__(self, repo, path, filelog=None, ctx=None):
1672 self._repo = repo
1667 self._repo = repo
1673 self._path = path
1668 self._path = path
1674 self._changeid = None
1669 self._changeid = None
1675 self._filerev = self._filenode = None
1670 self._filerev = self._filenode = None
1676
1671
1677 if filelog is not None:
1672 if filelog is not None:
1678 self._filelog = filelog
1673 self._filelog = filelog
1679 if ctx:
1674 if ctx:
1680 self._changectx = ctx
1675 self._changectx = ctx
1681
1676
1682 def __nonzero__(self):
1677 def __nonzero__(self):
1683 return True
1678 return True
1684
1679
1685 def linkrev(self):
1680 def linkrev(self):
1686 # linked to self._changectx no matter if file is modified or not
1681 # linked to self._changectx no matter if file is modified or not
1687 return self.rev()
1682 return self.rev()
1688
1683
1689 def parents(self):
1684 def parents(self):
1690 '''return parent filectxs, following copies if necessary'''
1685 '''return parent filectxs, following copies if necessary'''
1691 def filenode(ctx, path):
1686 def filenode(ctx, path):
1692 return ctx._manifest.get(path, nullid)
1687 return ctx._manifest.get(path, nullid)
1693
1688
1694 path = self._path
1689 path = self._path
1695 fl = self._filelog
1690 fl = self._filelog
1696 pcl = self._changectx._parents
1691 pcl = self._changectx._parents
1697 renamed = self.renamed()
1692 renamed = self.renamed()
1698
1693
1699 if renamed:
1694 if renamed:
1700 pl = [renamed + (None,)]
1695 pl = [renamed + (None,)]
1701 else:
1696 else:
1702 pl = [(path, filenode(pcl[0], path), fl)]
1697 pl = [(path, filenode(pcl[0], path), fl)]
1703
1698
1704 for pc in pcl[1:]:
1699 for pc in pcl[1:]:
1705 pl.append((path, filenode(pc, path), fl))
1700 pl.append((path, filenode(pc, path), fl))
1706
1701
1707 return [self._parentfilectx(p, fileid=n, filelog=l)
1702 return [self._parentfilectx(p, fileid=n, filelog=l)
1708 for p, n, l in pl if n != nullid]
1703 for p, n, l in pl if n != nullid]
1709
1704
1710 def children(self):
1705 def children(self):
1711 return []
1706 return []
1712
1707
1713 class workingfilectx(committablefilectx):
1708 class workingfilectx(committablefilectx):
1714 """A workingfilectx object makes access to data related to a particular
1709 """A workingfilectx object makes access to data related to a particular
1715 file in the working directory convenient."""
1710 file in the working directory convenient."""
1716 def __init__(self, repo, path, filelog=None, workingctx=None):
1711 def __init__(self, repo, path, filelog=None, workingctx=None):
1717 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1712 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1718
1713
1719 @propertycache
1714 @propertycache
1720 def _changectx(self):
1715 def _changectx(self):
1721 return workingctx(self._repo)
1716 return workingctx(self._repo)
1722
1717
1723 def data(self):
1718 def data(self):
1724 return self._repo.wread(self._path)
1719 return self._repo.wread(self._path)
1725 def renamed(self):
1720 def renamed(self):
1726 rp = self._repo.dirstate.copied(self._path)
1721 rp = self._repo.dirstate.copied(self._path)
1727 if not rp:
1722 if not rp:
1728 return None
1723 return None
1729 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1724 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1730
1725
1731 def size(self):
1726 def size(self):
1732 return self._repo.wvfs.lstat(self._path).st_size
1727 return self._repo.wvfs.lstat(self._path).st_size
1733 def date(self):
1728 def date(self):
1734 t, tz = self._changectx.date()
1729 t, tz = self._changectx.date()
1735 try:
1730 try:
1736 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1731 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1737 except OSError as err:
1732 except OSError as err:
1738 if err.errno != errno.ENOENT:
1733 if err.errno != errno.ENOENT:
1739 raise
1734 raise
1740 return (t, tz)
1735 return (t, tz)
1741
1736
1742 def cmp(self, fctx):
1737 def cmp(self, fctx):
1743 """compare with other file context
1738 """compare with other file context
1744
1739
1745 returns True if different than fctx.
1740 returns True if different than fctx.
1746 """
1741 """
1747 # fctx should be a filectx (not a workingfilectx)
1742 # fctx should be a filectx (not a workingfilectx)
1748 # invert comparison to reuse the same code path
1743 # invert comparison to reuse the same code path
1749 return fctx.cmp(self)
1744 return fctx.cmp(self)
1750
1745
1751 def remove(self, ignoremissing=False):
1746 def remove(self, ignoremissing=False):
1752 """wraps unlink for a repo's working directory"""
1747 """wraps unlink for a repo's working directory"""
1753 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1748 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1754
1749
1755 def write(self, data, flags):
1750 def write(self, data, flags):
1756 """wraps repo.wwrite"""
1751 """wraps repo.wwrite"""
1757 self._repo.wwrite(self._path, data, flags)
1752 self._repo.wwrite(self._path, data, flags)
1758
1753
1759 class workingcommitctx(workingctx):
1754 class workingcommitctx(workingctx):
1760 """A workingcommitctx object makes access to data related to
1755 """A workingcommitctx object makes access to data related to
1761 the revision being committed convenient.
1756 the revision being committed convenient.
1762
1757
1763 This hides changes in the working directory, if they aren't
1758 This hides changes in the working directory, if they aren't
1764 committed in this context.
1759 committed in this context.
1765 """
1760 """
1766 def __init__(self, repo, changes,
1761 def __init__(self, repo, changes,
1767 text="", user=None, date=None, extra=None):
1762 text="", user=None, date=None, extra=None):
1768 super(workingctx, self).__init__(repo, text, user, date, extra,
1763 super(workingctx, self).__init__(repo, text, user, date, extra,
1769 changes)
1764 changes)
1770
1765
1771 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1766 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1772 unknown=False):
1767 unknown=False):
1773 """Return matched files only in ``self._status``
1768 """Return matched files only in ``self._status``
1774
1769
1775 Uncommitted files appear "clean" via this context, even if
1770 Uncommitted files appear "clean" via this context, even if
1776 they aren't actually so in the working directory.
1771 they aren't actually so in the working directory.
1777 """
1772 """
1778 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1773 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1779 if clean:
1774 if clean:
1780 clean = [f for f in self._manifest if f not in self._changedset]
1775 clean = [f for f in self._manifest if f not in self._changedset]
1781 else:
1776 else:
1782 clean = []
1777 clean = []
1783 return scmutil.status([f for f in self._status.modified if match(f)],
1778 return scmutil.status([f for f in self._status.modified if match(f)],
1784 [f for f in self._status.added if match(f)],
1779 [f for f in self._status.added if match(f)],
1785 [f for f in self._status.removed if match(f)],
1780 [f for f in self._status.removed if match(f)],
1786 [], [], [], clean)
1781 [], [], [], clean)
1787
1782
1788 @propertycache
1783 @propertycache
1789 def _changedset(self):
1784 def _changedset(self):
1790 """Return the set of files changed in this context
1785 """Return the set of files changed in this context
1791 """
1786 """
1792 changed = set(self._status.modified)
1787 changed = set(self._status.modified)
1793 changed.update(self._status.added)
1788 changed.update(self._status.added)
1794 changed.update(self._status.removed)
1789 changed.update(self._status.removed)
1795 return changed
1790 return changed
1796
1791
1797 def makecachingfilectxfn(func):
1792 def makecachingfilectxfn(func):
1798 """Create a filectxfn that caches based on the path.
1793 """Create a filectxfn that caches based on the path.
1799
1794
1800 We can't use util.cachefunc because it uses all arguments as the cache
1795 We can't use util.cachefunc because it uses all arguments as the cache
1801 key and this creates a cycle since the arguments include the repo and
1796 key and this creates a cycle since the arguments include the repo and
1802 memctx.
1797 memctx.
1803 """
1798 """
1804 cache = {}
1799 cache = {}
1805
1800
1806 def getfilectx(repo, memctx, path):
1801 def getfilectx(repo, memctx, path):
1807 if path not in cache:
1802 if path not in cache:
1808 cache[path] = func(repo, memctx, path)
1803 cache[path] = func(repo, memctx, path)
1809 return cache[path]
1804 return cache[path]
1810
1805
1811 return getfilectx
1806 return getfilectx
1812
1807
1813 class memctx(committablectx):
1808 class memctx(committablectx):
1814 """Use memctx to perform in-memory commits via localrepo.commitctx().
1809 """Use memctx to perform in-memory commits via localrepo.commitctx().
1815
1810
1816 Revision information is supplied at initialization time while
1811 Revision information is supplied at initialization time while
1817 related files data and is made available through a callback
1812 related files data and is made available through a callback
1818 mechanism. 'repo' is the current localrepo, 'parents' is a
1813 mechanism. 'repo' is the current localrepo, 'parents' is a
1819 sequence of two parent revisions identifiers (pass None for every
1814 sequence of two parent revisions identifiers (pass None for every
1820 missing parent), 'text' is the commit message and 'files' lists
1815 missing parent), 'text' is the commit message and 'files' lists
1821 names of files touched by the revision (normalized and relative to
1816 names of files touched by the revision (normalized and relative to
1822 repository root).
1817 repository root).
1823
1818
1824 filectxfn(repo, memctx, path) is a callable receiving the
1819 filectxfn(repo, memctx, path) is a callable receiving the
1825 repository, the current memctx object and the normalized path of
1820 repository, the current memctx object and the normalized path of
1826 requested file, relative to repository root. It is fired by the
1821 requested file, relative to repository root. It is fired by the
1827 commit function for every file in 'files', but calls order is
1822 commit function for every file in 'files', but calls order is
1828 undefined. If the file is available in the revision being
1823 undefined. If the file is available in the revision being
1829 committed (updated or added), filectxfn returns a memfilectx
1824 committed (updated or added), filectxfn returns a memfilectx
1830 object. If the file was removed, filectxfn raises an
1825 object. If the file was removed, filectxfn raises an
1831 IOError. Moved files are represented by marking the source file
1826 IOError. Moved files are represented by marking the source file
1832 removed and the new file added with copy information (see
1827 removed and the new file added with copy information (see
1833 memfilectx).
1828 memfilectx).
1834
1829
1835 user receives the committer name and defaults to current
1830 user receives the committer name and defaults to current
1836 repository username, date is the commit date in any format
1831 repository username, date is the commit date in any format
1837 supported by util.parsedate() and defaults to current date, extra
1832 supported by util.parsedate() and defaults to current date, extra
1838 is a dictionary of metadata or is left empty.
1833 is a dictionary of metadata or is left empty.
1839 """
1834 """
1840
1835
1841 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1836 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1842 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1837 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1843 # this field to determine what to do in filectxfn.
1838 # this field to determine what to do in filectxfn.
1844 _returnnoneformissingfiles = True
1839 _returnnoneformissingfiles = True
1845
1840
1846 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1841 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1847 date=None, extra=None, editor=False):
1842 date=None, extra=None, editor=False):
1848 super(memctx, self).__init__(repo, text, user, date, extra)
1843 super(memctx, self).__init__(repo, text, user, date, extra)
1849 self._rev = None
1844 self._rev = None
1850 self._node = None
1845 self._node = None
1851 parents = [(p or nullid) for p in parents]
1846 parents = [(p or nullid) for p in parents]
1852 p1, p2 = parents
1847 p1, p2 = parents
1853 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1848 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1854 files = sorted(set(files))
1849 files = sorted(set(files))
1855 self._files = files
1850 self._files = files
1856 self.substate = {}
1851 self.substate = {}
1857
1852
1858 # if store is not callable, wrap it in a function
1853 # if store is not callable, wrap it in a function
1859 if not callable(filectxfn):
1854 if not callable(filectxfn):
1860 def getfilectx(repo, memctx, path):
1855 def getfilectx(repo, memctx, path):
1861 fctx = filectxfn[path]
1856 fctx = filectxfn[path]
1862 # this is weird but apparently we only keep track of one parent
1857 # this is weird but apparently we only keep track of one parent
1863 # (why not only store that instead of a tuple?)
1858 # (why not only store that instead of a tuple?)
1864 copied = fctx.renamed()
1859 copied = fctx.renamed()
1865 if copied:
1860 if copied:
1866 copied = copied[0]
1861 copied = copied[0]
1867 return memfilectx(repo, path, fctx.data(),
1862 return memfilectx(repo, path, fctx.data(),
1868 islink=fctx.islink(), isexec=fctx.isexec(),
1863 islink=fctx.islink(), isexec=fctx.isexec(),
1869 copied=copied, memctx=memctx)
1864 copied=copied, memctx=memctx)
1870 self._filectxfn = getfilectx
1865 self._filectxfn = getfilectx
1871 else:
1866 else:
1872 # memoizing increases performance for e.g. vcs convert scenarios.
1867 # memoizing increases performance for e.g. vcs convert scenarios.
1873 self._filectxfn = makecachingfilectxfn(filectxfn)
1868 self._filectxfn = makecachingfilectxfn(filectxfn)
1874
1869
1875 if extra:
1870 if extra:
1876 self._extra = extra.copy()
1871 self._extra = extra.copy()
1877 else:
1872 else:
1878 self._extra = {}
1873 self._extra = {}
1879
1874
1880 if self._extra.get('branch', '') == '':
1875 if self._extra.get('branch', '') == '':
1881 self._extra['branch'] = 'default'
1876 self._extra['branch'] = 'default'
1882
1877
1883 if editor:
1878 if editor:
1884 self._text = editor(self._repo, self, [])
1879 self._text = editor(self._repo, self, [])
1885 self._repo.savecommitmessage(self._text)
1880 self._repo.savecommitmessage(self._text)
1886
1881
1887 def filectx(self, path, filelog=None):
1882 def filectx(self, path, filelog=None):
1888 """get a file context from the working directory
1883 """get a file context from the working directory
1889
1884
1890 Returns None if file doesn't exist and should be removed."""
1885 Returns None if file doesn't exist and should be removed."""
1891 return self._filectxfn(self._repo, self, path)
1886 return self._filectxfn(self._repo, self, path)
1892
1887
1893 def commit(self):
1888 def commit(self):
1894 """commit context to the repo"""
1889 """commit context to the repo"""
1895 return self._repo.commitctx(self)
1890 return self._repo.commitctx(self)
1896
1891
1897 @propertycache
1892 @propertycache
1898 def _manifest(self):
1893 def _manifest(self):
1899 """generate a manifest based on the return values of filectxfn"""
1894 """generate a manifest based on the return values of filectxfn"""
1900
1895
1901 # keep this simple for now; just worry about p1
1896 # keep this simple for now; just worry about p1
1902 pctx = self._parents[0]
1897 pctx = self._parents[0]
1903 man = pctx.manifest().copy()
1898 man = pctx.manifest().copy()
1904
1899
1905 for f in self._status.modified:
1900 for f in self._status.modified:
1906 p1node = nullid
1901 p1node = nullid
1907 p2node = nullid
1902 p2node = nullid
1908 p = pctx[f].parents() # if file isn't in pctx, check p2?
1903 p = pctx[f].parents() # if file isn't in pctx, check p2?
1909 if len(p) > 0:
1904 if len(p) > 0:
1910 p1node = p[0].filenode()
1905 p1node = p[0].filenode()
1911 if len(p) > 1:
1906 if len(p) > 1:
1912 p2node = p[1].filenode()
1907 p2node = p[1].filenode()
1913 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1908 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1914
1909
1915 for f in self._status.added:
1910 for f in self._status.added:
1916 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1911 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1917
1912
1918 for f in self._status.removed:
1913 for f in self._status.removed:
1919 if f in man:
1914 if f in man:
1920 del man[f]
1915 del man[f]
1921
1916
1922 return man
1917 return man
1923
1918
1924 @propertycache
1919 @propertycache
1925 def _status(self):
1920 def _status(self):
1926 """Calculate exact status from ``files`` specified at construction
1921 """Calculate exact status from ``files`` specified at construction
1927 """
1922 """
1928 man1 = self.p1().manifest()
1923 man1 = self.p1().manifest()
1929 p2 = self._parents[1]
1924 p2 = self._parents[1]
1930 # "1 < len(self._parents)" can't be used for checking
1925 # "1 < len(self._parents)" can't be used for checking
1931 # existence of the 2nd parent, because "memctx._parents" is
1926 # existence of the 2nd parent, because "memctx._parents" is
1932 # explicitly initialized by the list, of which length is 2.
1927 # explicitly initialized by the list, of which length is 2.
1933 if p2.node() != nullid:
1928 if p2.node() != nullid:
1934 man2 = p2.manifest()
1929 man2 = p2.manifest()
1935 managing = lambda f: f in man1 or f in man2
1930 managing = lambda f: f in man1 or f in man2
1936 else:
1931 else:
1937 managing = lambda f: f in man1
1932 managing = lambda f: f in man1
1938
1933
1939 modified, added, removed = [], [], []
1934 modified, added, removed = [], [], []
1940 for f in self._files:
1935 for f in self._files:
1941 if not managing(f):
1936 if not managing(f):
1942 added.append(f)
1937 added.append(f)
1943 elif self[f]:
1938 elif self[f]:
1944 modified.append(f)
1939 modified.append(f)
1945 else:
1940 else:
1946 removed.append(f)
1941 removed.append(f)
1947
1942
1948 return scmutil.status(modified, added, removed, [], [], [], [])
1943 return scmutil.status(modified, added, removed, [], [], [], [])
1949
1944
1950 class memfilectx(committablefilectx):
1945 class memfilectx(committablefilectx):
1951 """memfilectx represents an in-memory file to commit.
1946 """memfilectx represents an in-memory file to commit.
1952
1947
1953 See memctx and committablefilectx for more details.
1948 See memctx and committablefilectx for more details.
1954 """
1949 """
1955 def __init__(self, repo, path, data, islink=False,
1950 def __init__(self, repo, path, data, islink=False,
1956 isexec=False, copied=None, memctx=None):
1951 isexec=False, copied=None, memctx=None):
1957 """
1952 """
1958 path is the normalized file path relative to repository root.
1953 path is the normalized file path relative to repository root.
1959 data is the file content as a string.
1954 data is the file content as a string.
1960 islink is True if the file is a symbolic link.
1955 islink is True if the file is a symbolic link.
1961 isexec is True if the file is executable.
1956 isexec is True if the file is executable.
1962 copied is the source file path if current file was copied in the
1957 copied is the source file path if current file was copied in the
1963 revision being committed, or None."""
1958 revision being committed, or None."""
1964 super(memfilectx, self).__init__(repo, path, None, memctx)
1959 super(memfilectx, self).__init__(repo, path, None, memctx)
1965 self._data = data
1960 self._data = data
1966 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1961 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1967 self._copied = None
1962 self._copied = None
1968 if copied:
1963 if copied:
1969 self._copied = (copied, nullid)
1964 self._copied = (copied, nullid)
1970
1965
1971 def data(self):
1966 def data(self):
1972 return self._data
1967 return self._data
1973 def size(self):
1968 def size(self):
1974 return len(self.data())
1969 return len(self.data())
1975 def flags(self):
1970 def flags(self):
1976 return self._flags
1971 return self._flags
1977 def renamed(self):
1972 def renamed(self):
1978 return self._copied
1973 return self._copied
1979
1974
1980 def remove(self, ignoremissing=False):
1975 def remove(self, ignoremissing=False):
1981 """wraps unlink for a repo's working directory"""
1976 """wraps unlink for a repo's working directory"""
1982 # need to figure out what to do here
1977 # need to figure out what to do here
1983 del self._changectx[self._path]
1978 del self._changectx[self._path]
1984
1979
1985 def write(self, data, flags):
1980 def write(self, data, flags):
1986 """wraps repo.wwrite"""
1981 """wraps repo.wwrite"""
1987 self._data = data
1982 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now