##// END OF EJS Templates
namespaces: add branches...
Sean Farley -
r23563:11499204 default
parent child Browse files
Show More
@@ -1,1685 +1,1680 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class basectx(object):
20 class basectx(object):
21 """A basectx object represents the common logic for its children:
21 """A basectx object represents the common logic for its children:
22 changectx: read-only context that is already present in the repo,
22 changectx: read-only context that is already present in the repo,
23 workingctx: a context that represents the working directory and can
23 workingctx: a context that represents the working directory and can
24 be committed,
24 be committed,
25 memctx: a context that represents changes in-memory and can also
25 memctx: a context that represents changes in-memory and can also
26 be committed."""
26 be committed."""
27 def __new__(cls, repo, changeid='', *args, **kwargs):
27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 if isinstance(changeid, basectx):
28 if isinstance(changeid, basectx):
29 return changeid
29 return changeid
30
30
31 o = super(basectx, cls).__new__(cls)
31 o = super(basectx, cls).__new__(cls)
32
32
33 o._repo = repo
33 o._repo = repo
34 o._rev = nullrev
34 o._rev = nullrev
35 o._node = nullid
35 o._node = nullid
36
36
37 return o
37 return o
38
38
39 def __str__(self):
39 def __str__(self):
40 return short(self.node())
40 return short(self.node())
41
41
42 def __int__(self):
42 def __int__(self):
43 return self.rev()
43 return self.rev()
44
44
45 def __repr__(self):
45 def __repr__(self):
46 return "<%s %s>" % (type(self).__name__, str(self))
46 return "<%s %s>" % (type(self).__name__, str(self))
47
47
48 def __eq__(self, other):
48 def __eq__(self, other):
49 try:
49 try:
50 return type(self) == type(other) and self._rev == other._rev
50 return type(self) == type(other) and self._rev == other._rev
51 except AttributeError:
51 except AttributeError:
52 return False
52 return False
53
53
54 def __ne__(self, other):
54 def __ne__(self, other):
55 return not (self == other)
55 return not (self == other)
56
56
57 def __contains__(self, key):
57 def __contains__(self, key):
58 return key in self._manifest
58 return key in self._manifest
59
59
60 def __getitem__(self, key):
60 def __getitem__(self, key):
61 return self.filectx(key)
61 return self.filectx(key)
62
62
63 def __iter__(self):
63 def __iter__(self):
64 for f in sorted(self._manifest):
64 for f in sorted(self._manifest):
65 yield f
65 yield f
66
66
67 def _manifestmatches(self, match, s):
67 def _manifestmatches(self, match, s):
68 """generate a new manifest filtered by the match argument
68 """generate a new manifest filtered by the match argument
69
69
70 This method is for internal use only and mainly exists to provide an
70 This method is for internal use only and mainly exists to provide an
71 object oriented way for other contexts to customize the manifest
71 object oriented way for other contexts to customize the manifest
72 generation.
72 generation.
73 """
73 """
74 return self.manifest().matches(match)
74 return self.manifest().matches(match)
75
75
76 def _matchstatus(self, other, match):
76 def _matchstatus(self, other, match):
77 """return match.always if match is none
77 """return match.always if match is none
78
78
79 This internal method provides a way for child objects to override the
79 This internal method provides a way for child objects to override the
80 match operator.
80 match operator.
81 """
81 """
82 return match or matchmod.always(self._repo.root, self._repo.getcwd())
82 return match or matchmod.always(self._repo.root, self._repo.getcwd())
83
83
84 def _buildstatus(self, other, s, match, listignored, listclean,
84 def _buildstatus(self, other, s, match, listignored, listclean,
85 listunknown):
85 listunknown):
86 """build a status with respect to another context"""
86 """build a status with respect to another context"""
87 # Load earliest manifest first for caching reasons. More specifically,
87 # Load earliest manifest first for caching reasons. More specifically,
88 # if you have revisions 1000 and 1001, 1001 is probably stored as a
88 # if you have revisions 1000 and 1001, 1001 is probably stored as a
89 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
89 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
90 # 1000 and cache it so that when you read 1001, we just need to apply a
90 # 1000 and cache it so that when you read 1001, we just need to apply a
91 # delta to what's in the cache. So that's one full reconstruction + one
91 # delta to what's in the cache. So that's one full reconstruction + one
92 # delta application.
92 # delta application.
93 if self.rev() is not None and self.rev() < other.rev():
93 if self.rev() is not None and self.rev() < other.rev():
94 self.manifest()
94 self.manifest()
95 mf1 = other._manifestmatches(match, s)
95 mf1 = other._manifestmatches(match, s)
96 mf2 = self._manifestmatches(match, s)
96 mf2 = self._manifestmatches(match, s)
97
97
98 modified, added, clean = [], [], []
98 modified, added, clean = [], [], []
99 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
99 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
100 deletedset = set(deleted)
100 deletedset = set(deleted)
101 withflags = mf1.withflags() | mf2.withflags()
101 withflags = mf1.withflags() | mf2.withflags()
102 for fn, mf2node in mf2.iteritems():
102 for fn, mf2node in mf2.iteritems():
103 if fn in mf1:
103 if fn in mf1:
104 if (fn not in deletedset and
104 if (fn not in deletedset and
105 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
105 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
106 (mf1[fn] != mf2node and
106 (mf1[fn] != mf2node and
107 (mf2node or self[fn].cmp(other[fn]))))):
107 (mf2node or self[fn].cmp(other[fn]))))):
108 modified.append(fn)
108 modified.append(fn)
109 elif listclean:
109 elif listclean:
110 clean.append(fn)
110 clean.append(fn)
111 del mf1[fn]
111 del mf1[fn]
112 elif fn not in deletedset:
112 elif fn not in deletedset:
113 added.append(fn)
113 added.append(fn)
114 removed = mf1.keys()
114 removed = mf1.keys()
115 if removed:
115 if removed:
116 # need to filter files if they are already reported as removed
116 # need to filter files if they are already reported as removed
117 unknown = [fn for fn in unknown if fn not in mf1]
117 unknown = [fn for fn in unknown if fn not in mf1]
118 ignored = [fn for fn in ignored if fn not in mf1]
118 ignored = [fn for fn in ignored if fn not in mf1]
119
119
120 return scmutil.status(modified, added, removed, deleted, unknown,
120 return scmutil.status(modified, added, removed, deleted, unknown,
121 ignored, clean)
121 ignored, clean)
122
122
123 @propertycache
123 @propertycache
124 def substate(self):
124 def substate(self):
125 return subrepo.state(self, self._repo.ui)
125 return subrepo.state(self, self._repo.ui)
126
126
127 def subrev(self, subpath):
127 def subrev(self, subpath):
128 return self.substate[subpath][1]
128 return self.substate[subpath][1]
129
129
130 def rev(self):
130 def rev(self):
131 return self._rev
131 return self._rev
132 def node(self):
132 def node(self):
133 return self._node
133 return self._node
134 def hex(self):
134 def hex(self):
135 return hex(self.node())
135 return hex(self.node())
136 def manifest(self):
136 def manifest(self):
137 return self._manifest
137 return self._manifest
138 def phasestr(self):
138 def phasestr(self):
139 return phases.phasenames[self.phase()]
139 return phases.phasenames[self.phase()]
140 def mutable(self):
140 def mutable(self):
141 return self.phase() > phases.public
141 return self.phase() > phases.public
142
142
143 def getfileset(self, expr):
143 def getfileset(self, expr):
144 return fileset.getfileset(self, expr)
144 return fileset.getfileset(self, expr)
145
145
146 def obsolete(self):
146 def obsolete(self):
147 """True if the changeset is obsolete"""
147 """True if the changeset is obsolete"""
148 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
148 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
149
149
150 def extinct(self):
150 def extinct(self):
151 """True if the changeset is extinct"""
151 """True if the changeset is extinct"""
152 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
152 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
153
153
154 def unstable(self):
154 def unstable(self):
155 """True if the changeset is not obsolete but it's ancestor are"""
155 """True if the changeset is not obsolete but it's ancestor are"""
156 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
156 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
157
157
158 def bumped(self):
158 def bumped(self):
159 """True if the changeset try to be a successor of a public changeset
159 """True if the changeset try to be a successor of a public changeset
160
160
161 Only non-public and non-obsolete changesets may be bumped.
161 Only non-public and non-obsolete changesets may be bumped.
162 """
162 """
163 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
163 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
164
164
165 def divergent(self):
165 def divergent(self):
166 """Is a successors of a changeset with multiple possible successors set
166 """Is a successors of a changeset with multiple possible successors set
167
167
168 Only non-public and non-obsolete changesets may be divergent.
168 Only non-public and non-obsolete changesets may be divergent.
169 """
169 """
170 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
170 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
171
171
172 def troubled(self):
172 def troubled(self):
173 """True if the changeset is either unstable, bumped or divergent"""
173 """True if the changeset is either unstable, bumped or divergent"""
174 return self.unstable() or self.bumped() or self.divergent()
174 return self.unstable() or self.bumped() or self.divergent()
175
175
176 def troubles(self):
176 def troubles(self):
177 """return the list of troubles affecting this changesets.
177 """return the list of troubles affecting this changesets.
178
178
179 Troubles are returned as strings. possible values are:
179 Troubles are returned as strings. possible values are:
180 - unstable,
180 - unstable,
181 - bumped,
181 - bumped,
182 - divergent.
182 - divergent.
183 """
183 """
184 troubles = []
184 troubles = []
185 if self.unstable():
185 if self.unstable():
186 troubles.append('unstable')
186 troubles.append('unstable')
187 if self.bumped():
187 if self.bumped():
188 troubles.append('bumped')
188 troubles.append('bumped')
189 if self.divergent():
189 if self.divergent():
190 troubles.append('divergent')
190 troubles.append('divergent')
191 return troubles
191 return troubles
192
192
193 def parents(self):
193 def parents(self):
194 """return contexts for each parent changeset"""
194 """return contexts for each parent changeset"""
195 return self._parents
195 return self._parents
196
196
197 def p1(self):
197 def p1(self):
198 return self._parents[0]
198 return self._parents[0]
199
199
200 def p2(self):
200 def p2(self):
201 if len(self._parents) == 2:
201 if len(self._parents) == 2:
202 return self._parents[1]
202 return self._parents[1]
203 return changectx(self._repo, -1)
203 return changectx(self._repo, -1)
204
204
205 def _fileinfo(self, path):
205 def _fileinfo(self, path):
206 if '_manifest' in self.__dict__:
206 if '_manifest' in self.__dict__:
207 try:
207 try:
208 return self._manifest[path], self._manifest.flags(path)
208 return self._manifest[path], self._manifest.flags(path)
209 except KeyError:
209 except KeyError:
210 raise error.ManifestLookupError(self._node, path,
210 raise error.ManifestLookupError(self._node, path,
211 _('not found in manifest'))
211 _('not found in manifest'))
212 if '_manifestdelta' in self.__dict__ or path in self.files():
212 if '_manifestdelta' in self.__dict__ or path in self.files():
213 if path in self._manifestdelta:
213 if path in self._manifestdelta:
214 return (self._manifestdelta[path],
214 return (self._manifestdelta[path],
215 self._manifestdelta.flags(path))
215 self._manifestdelta.flags(path))
216 node, flag = self._repo.manifest.find(self._changeset[0], path)
216 node, flag = self._repo.manifest.find(self._changeset[0], path)
217 if not node:
217 if not node:
218 raise error.ManifestLookupError(self._node, path,
218 raise error.ManifestLookupError(self._node, path,
219 _('not found in manifest'))
219 _('not found in manifest'))
220
220
221 return node, flag
221 return node, flag
222
222
223 def filenode(self, path):
223 def filenode(self, path):
224 return self._fileinfo(path)[0]
224 return self._fileinfo(path)[0]
225
225
226 def flags(self, path):
226 def flags(self, path):
227 try:
227 try:
228 return self._fileinfo(path)[1]
228 return self._fileinfo(path)[1]
229 except error.LookupError:
229 except error.LookupError:
230 return ''
230 return ''
231
231
232 def sub(self, path):
232 def sub(self, path):
233 return subrepo.subrepo(self, path)
233 return subrepo.subrepo(self, path)
234
234
235 def match(self, pats=[], include=None, exclude=None, default='glob'):
235 def match(self, pats=[], include=None, exclude=None, default='glob'):
236 r = self._repo
236 r = self._repo
237 return matchmod.match(r.root, r.getcwd(), pats,
237 return matchmod.match(r.root, r.getcwd(), pats,
238 include, exclude, default,
238 include, exclude, default,
239 auditor=r.auditor, ctx=self)
239 auditor=r.auditor, ctx=self)
240
240
241 def diff(self, ctx2=None, match=None, **opts):
241 def diff(self, ctx2=None, match=None, **opts):
242 """Returns a diff generator for the given contexts and matcher"""
242 """Returns a diff generator for the given contexts and matcher"""
243 if ctx2 is None:
243 if ctx2 is None:
244 ctx2 = self.p1()
244 ctx2 = self.p1()
245 if ctx2 is not None:
245 if ctx2 is not None:
246 ctx2 = self._repo[ctx2]
246 ctx2 = self._repo[ctx2]
247 diffopts = patch.diffopts(self._repo.ui, opts)
247 diffopts = patch.diffopts(self._repo.ui, opts)
248 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
248 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
249
249
250 @propertycache
250 @propertycache
251 def _dirs(self):
251 def _dirs(self):
252 return scmutil.dirs(self._manifest)
252 return scmutil.dirs(self._manifest)
253
253
254 def dirs(self):
254 def dirs(self):
255 return self._dirs
255 return self._dirs
256
256
257 def dirty(self, missing=False, merge=True, branch=True):
257 def dirty(self, missing=False, merge=True, branch=True):
258 return False
258 return False
259
259
260 def status(self, other=None, match=None, listignored=False,
260 def status(self, other=None, match=None, listignored=False,
261 listclean=False, listunknown=False, listsubrepos=False):
261 listclean=False, listunknown=False, listsubrepos=False):
262 """return status of files between two nodes or node and working
262 """return status of files between two nodes or node and working
263 directory.
263 directory.
264
264
265 If other is None, compare this node with working directory.
265 If other is None, compare this node with working directory.
266
266
267 returns (modified, added, removed, deleted, unknown, ignored, clean)
267 returns (modified, added, removed, deleted, unknown, ignored, clean)
268 """
268 """
269
269
270 ctx1 = self
270 ctx1 = self
271 ctx2 = self._repo[other]
271 ctx2 = self._repo[other]
272
272
273 # This next code block is, admittedly, fragile logic that tests for
273 # This next code block is, admittedly, fragile logic that tests for
274 # reversing the contexts and wouldn't need to exist if it weren't for
274 # reversing the contexts and wouldn't need to exist if it weren't for
275 # the fast (and common) code path of comparing the working directory
275 # the fast (and common) code path of comparing the working directory
276 # with its first parent.
276 # with its first parent.
277 #
277 #
278 # What we're aiming for here is the ability to call:
278 # What we're aiming for here is the ability to call:
279 #
279 #
280 # workingctx.status(parentctx)
280 # workingctx.status(parentctx)
281 #
281 #
282 # If we always built the manifest for each context and compared those,
282 # If we always built the manifest for each context and compared those,
283 # then we'd be done. But the special case of the above call means we
283 # then we'd be done. But the special case of the above call means we
284 # just copy the manifest of the parent.
284 # just copy the manifest of the parent.
285 reversed = False
285 reversed = False
286 if (not isinstance(ctx1, changectx)
286 if (not isinstance(ctx1, changectx)
287 and isinstance(ctx2, changectx)):
287 and isinstance(ctx2, changectx)):
288 reversed = True
288 reversed = True
289 ctx1, ctx2 = ctx2, ctx1
289 ctx1, ctx2 = ctx2, ctx1
290
290
291 match = ctx2._matchstatus(ctx1, match)
291 match = ctx2._matchstatus(ctx1, match)
292 r = scmutil.status([], [], [], [], [], [], [])
292 r = scmutil.status([], [], [], [], [], [], [])
293 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
293 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
294 listunknown)
294 listunknown)
295
295
296 if reversed:
296 if reversed:
297 # Reverse added and removed. Clear deleted, unknown and ignored as
297 # Reverse added and removed. Clear deleted, unknown and ignored as
298 # these make no sense to reverse.
298 # these make no sense to reverse.
299 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
299 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
300 r.clean)
300 r.clean)
301
301
302 if listsubrepos:
302 if listsubrepos:
303 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
303 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
304 rev2 = ctx2.subrev(subpath)
304 rev2 = ctx2.subrev(subpath)
305 try:
305 try:
306 submatch = matchmod.narrowmatcher(subpath, match)
306 submatch = matchmod.narrowmatcher(subpath, match)
307 s = sub.status(rev2, match=submatch, ignored=listignored,
307 s = sub.status(rev2, match=submatch, ignored=listignored,
308 clean=listclean, unknown=listunknown,
308 clean=listclean, unknown=listunknown,
309 listsubrepos=True)
309 listsubrepos=True)
310 for rfiles, sfiles in zip(r, s):
310 for rfiles, sfiles in zip(r, s):
311 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
311 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
312 except error.LookupError:
312 except error.LookupError:
313 self._repo.ui.status(_("skipping missing "
313 self._repo.ui.status(_("skipping missing "
314 "subrepository: %s\n") % subpath)
314 "subrepository: %s\n") % subpath)
315
315
316 for l in r:
316 for l in r:
317 l.sort()
317 l.sort()
318
318
319 return r
319 return r
320
320
321
321
322 def makememctx(repo, parents, text, user, date, branch, files, store,
322 def makememctx(repo, parents, text, user, date, branch, files, store,
323 editor=None):
323 editor=None):
324 def getfilectx(repo, memctx, path):
324 def getfilectx(repo, memctx, path):
325 data, mode, copied = store.getfile(path)
325 data, mode, copied = store.getfile(path)
326 if data is None:
326 if data is None:
327 return None
327 return None
328 islink, isexec = mode
328 islink, isexec = mode
329 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
329 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
330 copied=copied, memctx=memctx)
330 copied=copied, memctx=memctx)
331 extra = {}
331 extra = {}
332 if branch:
332 if branch:
333 extra['branch'] = encoding.fromlocal(branch)
333 extra['branch'] = encoding.fromlocal(branch)
334 ctx = memctx(repo, parents, text, files, getfilectx, user,
334 ctx = memctx(repo, parents, text, files, getfilectx, user,
335 date, extra, editor)
335 date, extra, editor)
336 return ctx
336 return ctx
337
337
338 class changectx(basectx):
338 class changectx(basectx):
339 """A changecontext object makes access to data related to a particular
339 """A changecontext object makes access to data related to a particular
340 changeset convenient. It represents a read-only context already present in
340 changeset convenient. It represents a read-only context already present in
341 the repo."""
341 the repo."""
342 def __init__(self, repo, changeid=''):
342 def __init__(self, repo, changeid=''):
343 """changeid is a revision number, node, or tag"""
343 """changeid is a revision number, node, or tag"""
344
344
345 # since basectx.__new__ already took care of copying the object, we
345 # since basectx.__new__ already took care of copying the object, we
346 # don't need to do anything in __init__, so we just exit here
346 # don't need to do anything in __init__, so we just exit here
347 if isinstance(changeid, basectx):
347 if isinstance(changeid, basectx):
348 return
348 return
349
349
350 if changeid == '':
350 if changeid == '':
351 changeid = '.'
351 changeid = '.'
352 self._repo = repo
352 self._repo = repo
353
353
354 try:
354 try:
355 if isinstance(changeid, int):
355 if isinstance(changeid, int):
356 self._node = repo.changelog.node(changeid)
356 self._node = repo.changelog.node(changeid)
357 self._rev = changeid
357 self._rev = changeid
358 return
358 return
359 if isinstance(changeid, long):
359 if isinstance(changeid, long):
360 changeid = str(changeid)
360 changeid = str(changeid)
361 if changeid == '.':
361 if changeid == '.':
362 self._node = repo.dirstate.p1()
362 self._node = repo.dirstate.p1()
363 self._rev = repo.changelog.rev(self._node)
363 self._rev = repo.changelog.rev(self._node)
364 return
364 return
365 if changeid == 'null':
365 if changeid == 'null':
366 self._node = nullid
366 self._node = nullid
367 self._rev = nullrev
367 self._rev = nullrev
368 return
368 return
369 if changeid == 'tip':
369 if changeid == 'tip':
370 self._node = repo.changelog.tip()
370 self._node = repo.changelog.tip()
371 self._rev = repo.changelog.rev(self._node)
371 self._rev = repo.changelog.rev(self._node)
372 return
372 return
373 if len(changeid) == 20:
373 if len(changeid) == 20:
374 try:
374 try:
375 self._node = changeid
375 self._node = changeid
376 self._rev = repo.changelog.rev(changeid)
376 self._rev = repo.changelog.rev(changeid)
377 return
377 return
378 except error.FilteredRepoLookupError:
378 except error.FilteredRepoLookupError:
379 raise
379 raise
380 except LookupError:
380 except LookupError:
381 pass
381 pass
382
382
383 try:
383 try:
384 r = int(changeid)
384 r = int(changeid)
385 if str(r) != changeid:
385 if str(r) != changeid:
386 raise ValueError
386 raise ValueError
387 l = len(repo.changelog)
387 l = len(repo.changelog)
388 if r < 0:
388 if r < 0:
389 r += l
389 r += l
390 if r < 0 or r >= l:
390 if r < 0 or r >= l:
391 raise ValueError
391 raise ValueError
392 self._rev = r
392 self._rev = r
393 self._node = repo.changelog.node(r)
393 self._node = repo.changelog.node(r)
394 return
394 return
395 except error.FilteredIndexError:
395 except error.FilteredIndexError:
396 raise
396 raise
397 except (ValueError, OverflowError, IndexError):
397 except (ValueError, OverflowError, IndexError):
398 pass
398 pass
399
399
400 if len(changeid) == 40:
400 if len(changeid) == 40:
401 try:
401 try:
402 self._node = bin(changeid)
402 self._node = bin(changeid)
403 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
404 return
404 return
405 except error.FilteredLookupError:
405 except error.FilteredLookupError:
406 raise
406 raise
407 except (TypeError, LookupError):
407 except (TypeError, LookupError):
408 pass
408 pass
409
409
410 # lookup bookmarks through the name interface
410 # lookup bookmarks through the name interface
411 try:
411 try:
412 self._node = repo.names.singlenode(repo, changeid)
412 self._node = repo.names.singlenode(repo, changeid)
413 self._rev = repo.changelog.rev(self._node)
413 self._rev = repo.changelog.rev(self._node)
414 return
414 return
415 except KeyError:
415 except KeyError:
416 pass
416 pass
417
418 try:
419 self._node = repo.branchtip(changeid)
420 self._rev = repo.changelog.rev(self._node)
421 return
422 except error.FilteredRepoLookupError:
417 except error.FilteredRepoLookupError:
423 raise
418 raise
424 except error.RepoLookupError:
419 except error.RepoLookupError:
425 pass
420 pass
426
421
427 self._node = repo.unfiltered().changelog._partialmatch(changeid)
422 self._node = repo.unfiltered().changelog._partialmatch(changeid)
428 if self._node is not None:
423 if self._node is not None:
429 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
430 return
425 return
431
426
432 # lookup failed
427 # lookup failed
433 # check if it might have come from damaged dirstate
428 # check if it might have come from damaged dirstate
434 #
429 #
435 # XXX we could avoid the unfiltered if we had a recognizable
430 # XXX we could avoid the unfiltered if we had a recognizable
436 # exception for filtered changeset access
431 # exception for filtered changeset access
437 if changeid in repo.unfiltered().dirstate.parents():
432 if changeid in repo.unfiltered().dirstate.parents():
438 msg = _("working directory has unknown parent '%s'!")
433 msg = _("working directory has unknown parent '%s'!")
439 raise error.Abort(msg % short(changeid))
434 raise error.Abort(msg % short(changeid))
440 try:
435 try:
441 if len(changeid) == 20:
436 if len(changeid) == 20:
442 changeid = hex(changeid)
437 changeid = hex(changeid)
443 except TypeError:
438 except TypeError:
444 pass
439 pass
445 except (error.FilteredIndexError, error.FilteredLookupError,
440 except (error.FilteredIndexError, error.FilteredLookupError,
446 error.FilteredRepoLookupError):
441 error.FilteredRepoLookupError):
447 if repo.filtername == 'visible':
442 if repo.filtername == 'visible':
448 msg = _("hidden revision '%s'") % changeid
443 msg = _("hidden revision '%s'") % changeid
449 hint = _('use --hidden to access hidden revisions')
444 hint = _('use --hidden to access hidden revisions')
450 raise error.FilteredRepoLookupError(msg, hint=hint)
445 raise error.FilteredRepoLookupError(msg, hint=hint)
451 msg = _("filtered revision '%s' (not in '%s' subset)")
446 msg = _("filtered revision '%s' (not in '%s' subset)")
452 msg %= (changeid, repo.filtername)
447 msg %= (changeid, repo.filtername)
453 raise error.FilteredRepoLookupError(msg)
448 raise error.FilteredRepoLookupError(msg)
454 except IndexError:
449 except IndexError:
455 pass
450 pass
456 raise error.RepoLookupError(
451 raise error.RepoLookupError(
457 _("unknown revision '%s'") % changeid)
452 _("unknown revision '%s'") % changeid)
458
453
459 def __hash__(self):
454 def __hash__(self):
460 try:
455 try:
461 return hash(self._rev)
456 return hash(self._rev)
462 except AttributeError:
457 except AttributeError:
463 return id(self)
458 return id(self)
464
459
465 def __nonzero__(self):
460 def __nonzero__(self):
466 return self._rev != nullrev
461 return self._rev != nullrev
467
462
468 @propertycache
463 @propertycache
469 def _changeset(self):
464 def _changeset(self):
470 return self._repo.changelog.read(self.rev())
465 return self._repo.changelog.read(self.rev())
471
466
472 @propertycache
467 @propertycache
473 def _manifest(self):
468 def _manifest(self):
474 return self._repo.manifest.read(self._changeset[0])
469 return self._repo.manifest.read(self._changeset[0])
475
470
476 @propertycache
471 @propertycache
477 def _manifestdelta(self):
472 def _manifestdelta(self):
478 return self._repo.manifest.readdelta(self._changeset[0])
473 return self._repo.manifest.readdelta(self._changeset[0])
479
474
480 @propertycache
475 @propertycache
481 def _parents(self):
476 def _parents(self):
482 p = self._repo.changelog.parentrevs(self._rev)
477 p = self._repo.changelog.parentrevs(self._rev)
483 if p[1] == nullrev:
478 if p[1] == nullrev:
484 p = p[:-1]
479 p = p[:-1]
485 return [changectx(self._repo, x) for x in p]
480 return [changectx(self._repo, x) for x in p]
486
481
487 def changeset(self):
482 def changeset(self):
488 return self._changeset
483 return self._changeset
489 def manifestnode(self):
484 def manifestnode(self):
490 return self._changeset[0]
485 return self._changeset[0]
491
486
492 def user(self):
487 def user(self):
493 return self._changeset[1]
488 return self._changeset[1]
494 def date(self):
489 def date(self):
495 return self._changeset[2]
490 return self._changeset[2]
496 def files(self):
491 def files(self):
497 return self._changeset[3]
492 return self._changeset[3]
498 def description(self):
493 def description(self):
499 return self._changeset[4]
494 return self._changeset[4]
500 def branch(self):
495 def branch(self):
501 return encoding.tolocal(self._changeset[5].get("branch"))
496 return encoding.tolocal(self._changeset[5].get("branch"))
502 def closesbranch(self):
497 def closesbranch(self):
503 return 'close' in self._changeset[5]
498 return 'close' in self._changeset[5]
504 def extra(self):
499 def extra(self):
505 return self._changeset[5]
500 return self._changeset[5]
506 def tags(self):
501 def tags(self):
507 return self._repo.nodetags(self._node)
502 return self._repo.nodetags(self._node)
508 def bookmarks(self):
503 def bookmarks(self):
509 return self._repo.nodebookmarks(self._node)
504 return self._repo.nodebookmarks(self._node)
510 def phase(self):
505 def phase(self):
511 return self._repo._phasecache.phase(self._repo, self._rev)
506 return self._repo._phasecache.phase(self._repo, self._rev)
512 def hidden(self):
507 def hidden(self):
513 return self._rev in repoview.filterrevs(self._repo, 'visible')
508 return self._rev in repoview.filterrevs(self._repo, 'visible')
514
509
515 def children(self):
510 def children(self):
516 """return contexts for each child changeset"""
511 """return contexts for each child changeset"""
517 c = self._repo.changelog.children(self._node)
512 c = self._repo.changelog.children(self._node)
518 return [changectx(self._repo, x) for x in c]
513 return [changectx(self._repo, x) for x in c]
519
514
520 def ancestors(self):
515 def ancestors(self):
521 for a in self._repo.changelog.ancestors([self._rev]):
516 for a in self._repo.changelog.ancestors([self._rev]):
522 yield changectx(self._repo, a)
517 yield changectx(self._repo, a)
523
518
524 def descendants(self):
519 def descendants(self):
525 for d in self._repo.changelog.descendants([self._rev]):
520 for d in self._repo.changelog.descendants([self._rev]):
526 yield changectx(self._repo, d)
521 yield changectx(self._repo, d)
527
522
528 def filectx(self, path, fileid=None, filelog=None):
523 def filectx(self, path, fileid=None, filelog=None):
529 """get a file context from this changeset"""
524 """get a file context from this changeset"""
530 if fileid is None:
525 if fileid is None:
531 fileid = self.filenode(path)
526 fileid = self.filenode(path)
532 return filectx(self._repo, path, fileid=fileid,
527 return filectx(self._repo, path, fileid=fileid,
533 changectx=self, filelog=filelog)
528 changectx=self, filelog=filelog)
534
529
535 def ancestor(self, c2, warn=False):
530 def ancestor(self, c2, warn=False):
536 """return the "best" ancestor context of self and c2
531 """return the "best" ancestor context of self and c2
537
532
538 If there are multiple candidates, it will show a message and check
533 If there are multiple candidates, it will show a message and check
539 merge.preferancestor configuration before falling back to the
534 merge.preferancestor configuration before falling back to the
540 revlog ancestor."""
535 revlog ancestor."""
541 # deal with workingctxs
536 # deal with workingctxs
542 n2 = c2._node
537 n2 = c2._node
543 if n2 is None:
538 if n2 is None:
544 n2 = c2._parents[0]._node
539 n2 = c2._parents[0]._node
545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
540 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 if not cahs:
541 if not cahs:
547 anc = nullid
542 anc = nullid
548 elif len(cahs) == 1:
543 elif len(cahs) == 1:
549 anc = cahs[0]
544 anc = cahs[0]
550 else:
545 else:
551 for r in self._repo.ui.configlist('merge', 'preferancestor'):
546 for r in self._repo.ui.configlist('merge', 'preferancestor'):
552 try:
547 try:
553 ctx = changectx(self._repo, r)
548 ctx = changectx(self._repo, r)
554 except error.RepoLookupError:
549 except error.RepoLookupError:
555 continue
550 continue
556 anc = ctx.node()
551 anc = ctx.node()
557 if anc in cahs:
552 if anc in cahs:
558 break
553 break
559 else:
554 else:
560 anc = self._repo.changelog.ancestor(self._node, n2)
555 anc = self._repo.changelog.ancestor(self._node, n2)
561 if warn:
556 if warn:
562 self._repo.ui.status(
557 self._repo.ui.status(
563 (_("note: using %s as ancestor of %s and %s\n") %
558 (_("note: using %s as ancestor of %s and %s\n") %
564 (short(anc), short(self._node), short(n2))) +
559 (short(anc), short(self._node), short(n2))) +
565 ''.join(_(" alternatively, use --config "
560 ''.join(_(" alternatively, use --config "
566 "merge.preferancestor=%s\n") %
561 "merge.preferancestor=%s\n") %
567 short(n) for n in sorted(cahs) if n != anc))
562 short(n) for n in sorted(cahs) if n != anc))
568 return changectx(self._repo, anc)
563 return changectx(self._repo, anc)
569
564
570 def descendant(self, other):
565 def descendant(self, other):
571 """True if other is descendant of this changeset"""
566 """True if other is descendant of this changeset"""
572 return self._repo.changelog.descendant(self._rev, other._rev)
567 return self._repo.changelog.descendant(self._rev, other._rev)
573
568
574 def walk(self, match):
569 def walk(self, match):
575 fset = set(match.files())
570 fset = set(match.files())
576 # for dirstate.walk, files=['.'] means "walk the whole tree".
571 # for dirstate.walk, files=['.'] means "walk the whole tree".
577 # follow that here, too
572 # follow that here, too
578 fset.discard('.')
573 fset.discard('.')
579
574
580 # avoid the entire walk if we're only looking for specific files
575 # avoid the entire walk if we're only looking for specific files
581 if fset and not match.anypats():
576 if fset and not match.anypats():
582 if util.all([fn in self for fn in fset]):
577 if util.all([fn in self for fn in fset]):
583 for fn in sorted(fset):
578 for fn in sorted(fset):
584 if match(fn):
579 if match(fn):
585 yield fn
580 yield fn
586 raise StopIteration
581 raise StopIteration
587
582
588 for fn in self:
583 for fn in self:
589 if fn in fset:
584 if fn in fset:
590 # specified pattern is the exact name
585 # specified pattern is the exact name
591 fset.remove(fn)
586 fset.remove(fn)
592 if match(fn):
587 if match(fn):
593 yield fn
588 yield fn
594 for fn in sorted(fset):
589 for fn in sorted(fset):
595 if fn in self._dirs:
590 if fn in self._dirs:
596 # specified pattern is a directory
591 # specified pattern is a directory
597 continue
592 continue
598 match.bad(fn, _('no such file in rev %s') % self)
593 match.bad(fn, _('no such file in rev %s') % self)
599
594
600 def matches(self, match):
595 def matches(self, match):
601 return self.walk(match)
596 return self.walk(match)
602
597
603 class basefilectx(object):
598 class basefilectx(object):
604 """A filecontext object represents the common logic for its children:
599 """A filecontext object represents the common logic for its children:
605 filectx: read-only access to a filerevision that is already present
600 filectx: read-only access to a filerevision that is already present
606 in the repo,
601 in the repo,
607 workingfilectx: a filecontext that represents files from the working
602 workingfilectx: a filecontext that represents files from the working
608 directory,
603 directory,
609 memfilectx: a filecontext that represents files in-memory."""
604 memfilectx: a filecontext that represents files in-memory."""
610 def __new__(cls, repo, path, *args, **kwargs):
605 def __new__(cls, repo, path, *args, **kwargs):
611 return super(basefilectx, cls).__new__(cls)
606 return super(basefilectx, cls).__new__(cls)
612
607
613 @propertycache
608 @propertycache
614 def _filelog(self):
609 def _filelog(self):
615 return self._repo.file(self._path)
610 return self._repo.file(self._path)
616
611
617 @propertycache
612 @propertycache
618 def _changeid(self):
613 def _changeid(self):
619 if '_changeid' in self.__dict__:
614 if '_changeid' in self.__dict__:
620 return self._changeid
615 return self._changeid
621 elif '_changectx' in self.__dict__:
616 elif '_changectx' in self.__dict__:
622 return self._changectx.rev()
617 return self._changectx.rev()
623 else:
618 else:
624 return self._filelog.linkrev(self._filerev)
619 return self._filelog.linkrev(self._filerev)
625
620
626 @propertycache
621 @propertycache
627 def _filenode(self):
622 def _filenode(self):
628 if '_fileid' in self.__dict__:
623 if '_fileid' in self.__dict__:
629 return self._filelog.lookup(self._fileid)
624 return self._filelog.lookup(self._fileid)
630 else:
625 else:
631 return self._changectx.filenode(self._path)
626 return self._changectx.filenode(self._path)
632
627
633 @propertycache
628 @propertycache
634 def _filerev(self):
629 def _filerev(self):
635 return self._filelog.rev(self._filenode)
630 return self._filelog.rev(self._filenode)
636
631
637 @propertycache
632 @propertycache
638 def _repopath(self):
633 def _repopath(self):
639 return self._path
634 return self._path
640
635
641 def __nonzero__(self):
636 def __nonzero__(self):
642 try:
637 try:
643 self._filenode
638 self._filenode
644 return True
639 return True
645 except error.LookupError:
640 except error.LookupError:
646 # file is missing
641 # file is missing
647 return False
642 return False
648
643
649 def __str__(self):
644 def __str__(self):
650 return "%s@%s" % (self.path(), self._changectx)
645 return "%s@%s" % (self.path(), self._changectx)
651
646
652 def __repr__(self):
647 def __repr__(self):
653 return "<%s %s>" % (type(self).__name__, str(self))
648 return "<%s %s>" % (type(self).__name__, str(self))
654
649
655 def __hash__(self):
650 def __hash__(self):
656 try:
651 try:
657 return hash((self._path, self._filenode))
652 return hash((self._path, self._filenode))
658 except AttributeError:
653 except AttributeError:
659 return id(self)
654 return id(self)
660
655
661 def __eq__(self, other):
656 def __eq__(self, other):
662 try:
657 try:
663 return (type(self) == type(other) and self._path == other._path
658 return (type(self) == type(other) and self._path == other._path
664 and self._filenode == other._filenode)
659 and self._filenode == other._filenode)
665 except AttributeError:
660 except AttributeError:
666 return False
661 return False
667
662
668 def __ne__(self, other):
663 def __ne__(self, other):
669 return not (self == other)
664 return not (self == other)
670
665
671 def filerev(self):
666 def filerev(self):
672 return self._filerev
667 return self._filerev
673 def filenode(self):
668 def filenode(self):
674 return self._filenode
669 return self._filenode
675 def flags(self):
670 def flags(self):
676 return self._changectx.flags(self._path)
671 return self._changectx.flags(self._path)
677 def filelog(self):
672 def filelog(self):
678 return self._filelog
673 return self._filelog
679 def rev(self):
674 def rev(self):
680 return self._changeid
675 return self._changeid
681 def linkrev(self):
676 def linkrev(self):
682 return self._filelog.linkrev(self._filerev)
677 return self._filelog.linkrev(self._filerev)
683 def node(self):
678 def node(self):
684 return self._changectx.node()
679 return self._changectx.node()
685 def hex(self):
680 def hex(self):
686 return self._changectx.hex()
681 return self._changectx.hex()
687 def user(self):
682 def user(self):
688 return self._changectx.user()
683 return self._changectx.user()
689 def date(self):
684 def date(self):
690 return self._changectx.date()
685 return self._changectx.date()
691 def files(self):
686 def files(self):
692 return self._changectx.files()
687 return self._changectx.files()
693 def description(self):
688 def description(self):
694 return self._changectx.description()
689 return self._changectx.description()
695 def branch(self):
690 def branch(self):
696 return self._changectx.branch()
691 return self._changectx.branch()
697 def extra(self):
692 def extra(self):
698 return self._changectx.extra()
693 return self._changectx.extra()
699 def phase(self):
694 def phase(self):
700 return self._changectx.phase()
695 return self._changectx.phase()
701 def phasestr(self):
696 def phasestr(self):
702 return self._changectx.phasestr()
697 return self._changectx.phasestr()
703 def manifest(self):
698 def manifest(self):
704 return self._changectx.manifest()
699 return self._changectx.manifest()
705 def changectx(self):
700 def changectx(self):
706 return self._changectx
701 return self._changectx
707
702
708 def path(self):
703 def path(self):
709 return self._path
704 return self._path
710
705
711 def isbinary(self):
706 def isbinary(self):
712 try:
707 try:
713 return util.binary(self.data())
708 return util.binary(self.data())
714 except IOError:
709 except IOError:
715 return False
710 return False
716 def isexec(self):
711 def isexec(self):
717 return 'x' in self.flags()
712 return 'x' in self.flags()
718 def islink(self):
713 def islink(self):
719 return 'l' in self.flags()
714 return 'l' in self.flags()
720
715
721 def cmp(self, fctx):
716 def cmp(self, fctx):
722 """compare with other file context
717 """compare with other file context
723
718
724 returns True if different than fctx.
719 returns True if different than fctx.
725 """
720 """
726 if (fctx._filerev is None
721 if (fctx._filerev is None
727 and (self._repo._encodefilterpats
722 and (self._repo._encodefilterpats
728 # if file data starts with '\1\n', empty metadata block is
723 # if file data starts with '\1\n', empty metadata block is
729 # prepended, which adds 4 bytes to filelog.size().
724 # prepended, which adds 4 bytes to filelog.size().
730 or self.size() - 4 == fctx.size())
725 or self.size() - 4 == fctx.size())
731 or self.size() == fctx.size()):
726 or self.size() == fctx.size()):
732 return self._filelog.cmp(self._filenode, fctx.data())
727 return self._filelog.cmp(self._filenode, fctx.data())
733
728
734 return True
729 return True
735
730
736 def parents(self):
731 def parents(self):
737 _path = self._path
732 _path = self._path
738 fl = self._filelog
733 fl = self._filelog
739 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
734 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
740
735
741 r = self._filelog.renamed(self._filenode)
736 r = self._filelog.renamed(self._filenode)
742 if r:
737 if r:
743 pl[0] = (r[0], r[1], None)
738 pl[0] = (r[0], r[1], None)
744
739
745 return [filectx(self._repo, p, fileid=n, filelog=l)
740 return [filectx(self._repo, p, fileid=n, filelog=l)
746 for p, n, l in pl if n != nullid]
741 for p, n, l in pl if n != nullid]
747
742
748 def p1(self):
743 def p1(self):
749 return self.parents()[0]
744 return self.parents()[0]
750
745
751 def p2(self):
746 def p2(self):
752 p = self.parents()
747 p = self.parents()
753 if len(p) == 2:
748 if len(p) == 2:
754 return p[1]
749 return p[1]
755 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
750 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
756
751
757 def annotate(self, follow=False, linenumber=None, diffopts=None):
752 def annotate(self, follow=False, linenumber=None, diffopts=None):
758 '''returns a list of tuples of (ctx, line) for each line
753 '''returns a list of tuples of (ctx, line) for each line
759 in the file, where ctx is the filectx of the node where
754 in the file, where ctx is the filectx of the node where
760 that line was last changed.
755 that line was last changed.
761 This returns tuples of ((ctx, linenumber), line) for each line,
756 This returns tuples of ((ctx, linenumber), line) for each line,
762 if "linenumber" parameter is NOT "None".
757 if "linenumber" parameter is NOT "None".
763 In such tuples, linenumber means one at the first appearance
758 In such tuples, linenumber means one at the first appearance
764 in the managed file.
759 in the managed file.
765 To reduce annotation cost,
760 To reduce annotation cost,
766 this returns fixed value(False is used) as linenumber,
761 this returns fixed value(False is used) as linenumber,
767 if "linenumber" parameter is "False".'''
762 if "linenumber" parameter is "False".'''
768
763
769 if linenumber is None:
764 if linenumber is None:
770 def decorate(text, rev):
765 def decorate(text, rev):
771 return ([rev] * len(text.splitlines()), text)
766 return ([rev] * len(text.splitlines()), text)
772 elif linenumber:
767 elif linenumber:
773 def decorate(text, rev):
768 def decorate(text, rev):
774 size = len(text.splitlines())
769 size = len(text.splitlines())
775 return ([(rev, i) for i in xrange(1, size + 1)], text)
770 return ([(rev, i) for i in xrange(1, size + 1)], text)
776 else:
771 else:
777 def decorate(text, rev):
772 def decorate(text, rev):
778 return ([(rev, False)] * len(text.splitlines()), text)
773 return ([(rev, False)] * len(text.splitlines()), text)
779
774
780 def pair(parent, child):
775 def pair(parent, child):
781 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
776 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
782 refine=True)
777 refine=True)
783 for (a1, a2, b1, b2), t in blocks:
778 for (a1, a2, b1, b2), t in blocks:
784 # Changed blocks ('!') or blocks made only of blank lines ('~')
779 # Changed blocks ('!') or blocks made only of blank lines ('~')
785 # belong to the child.
780 # belong to the child.
786 if t == '=':
781 if t == '=':
787 child[0][b1:b2] = parent[0][a1:a2]
782 child[0][b1:b2] = parent[0][a1:a2]
788 return child
783 return child
789
784
790 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
785 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
791
786
792 def parents(f):
787 def parents(f):
793 pl = f.parents()
788 pl = f.parents()
794
789
795 # Don't return renamed parents if we aren't following.
790 # Don't return renamed parents if we aren't following.
796 if not follow:
791 if not follow:
797 pl = [p for p in pl if p.path() == f.path()]
792 pl = [p for p in pl if p.path() == f.path()]
798
793
799 # renamed filectx won't have a filelog yet, so set it
794 # renamed filectx won't have a filelog yet, so set it
800 # from the cache to save time
795 # from the cache to save time
801 for p in pl:
796 for p in pl:
802 if not '_filelog' in p.__dict__:
797 if not '_filelog' in p.__dict__:
803 p._filelog = getlog(p.path())
798 p._filelog = getlog(p.path())
804
799
805 return pl
800 return pl
806
801
807 # use linkrev to find the first changeset where self appeared
802 # use linkrev to find the first changeset where self appeared
808 if self.rev() != self.linkrev():
803 if self.rev() != self.linkrev():
809 base = self.filectx(self.filenode())
804 base = self.filectx(self.filenode())
810 else:
805 else:
811 base = self
806 base = self
812
807
813 # This algorithm would prefer to be recursive, but Python is a
808 # This algorithm would prefer to be recursive, but Python is a
814 # bit recursion-hostile. Instead we do an iterative
809 # bit recursion-hostile. Instead we do an iterative
815 # depth-first search.
810 # depth-first search.
816
811
817 visit = [base]
812 visit = [base]
818 hist = {}
813 hist = {}
819 pcache = {}
814 pcache = {}
820 needed = {base: 1}
815 needed = {base: 1}
821 while visit:
816 while visit:
822 f = visit[-1]
817 f = visit[-1]
823 pcached = f in pcache
818 pcached = f in pcache
824 if not pcached:
819 if not pcached:
825 pcache[f] = parents(f)
820 pcache[f] = parents(f)
826
821
827 ready = True
822 ready = True
828 pl = pcache[f]
823 pl = pcache[f]
829 for p in pl:
824 for p in pl:
830 if p not in hist:
825 if p not in hist:
831 ready = False
826 ready = False
832 visit.append(p)
827 visit.append(p)
833 if not pcached:
828 if not pcached:
834 needed[p] = needed.get(p, 0) + 1
829 needed[p] = needed.get(p, 0) + 1
835 if ready:
830 if ready:
836 visit.pop()
831 visit.pop()
837 reusable = f in hist
832 reusable = f in hist
838 if reusable:
833 if reusable:
839 curr = hist[f]
834 curr = hist[f]
840 else:
835 else:
841 curr = decorate(f.data(), f)
836 curr = decorate(f.data(), f)
842 for p in pl:
837 for p in pl:
843 if not reusable:
838 if not reusable:
844 curr = pair(hist[p], curr)
839 curr = pair(hist[p], curr)
845 if needed[p] == 1:
840 if needed[p] == 1:
846 del hist[p]
841 del hist[p]
847 del needed[p]
842 del needed[p]
848 else:
843 else:
849 needed[p] -= 1
844 needed[p] -= 1
850
845
851 hist[f] = curr
846 hist[f] = curr
852 pcache[f] = []
847 pcache[f] = []
853
848
854 return zip(hist[base][0], hist[base][1].splitlines(True))
849 return zip(hist[base][0], hist[base][1].splitlines(True))
855
850
856 def ancestors(self, followfirst=False):
851 def ancestors(self, followfirst=False):
857 visit = {}
852 visit = {}
858 c = self
853 c = self
859 cut = followfirst and 1 or None
854 cut = followfirst and 1 or None
860 while True:
855 while True:
861 for parent in c.parents()[:cut]:
856 for parent in c.parents()[:cut]:
862 visit[(parent.rev(), parent.node())] = parent
857 visit[(parent.rev(), parent.node())] = parent
863 if not visit:
858 if not visit:
864 break
859 break
865 c = visit.pop(max(visit))
860 c = visit.pop(max(visit))
866 yield c
861 yield c
867
862
868 class filectx(basefilectx):
863 class filectx(basefilectx):
869 """A filecontext object makes access to data related to a particular
864 """A filecontext object makes access to data related to a particular
870 filerevision convenient."""
865 filerevision convenient."""
871 def __init__(self, repo, path, changeid=None, fileid=None,
866 def __init__(self, repo, path, changeid=None, fileid=None,
872 filelog=None, changectx=None):
867 filelog=None, changectx=None):
873 """changeid can be a changeset revision, node, or tag.
868 """changeid can be a changeset revision, node, or tag.
874 fileid can be a file revision or node."""
869 fileid can be a file revision or node."""
875 self._repo = repo
870 self._repo = repo
876 self._path = path
871 self._path = path
877
872
878 assert (changeid is not None
873 assert (changeid is not None
879 or fileid is not None
874 or fileid is not None
880 or changectx is not None), \
875 or changectx is not None), \
881 ("bad args: changeid=%r, fileid=%r, changectx=%r"
876 ("bad args: changeid=%r, fileid=%r, changectx=%r"
882 % (changeid, fileid, changectx))
877 % (changeid, fileid, changectx))
883
878
884 if filelog is not None:
879 if filelog is not None:
885 self._filelog = filelog
880 self._filelog = filelog
886
881
887 if changeid is not None:
882 if changeid is not None:
888 self._changeid = changeid
883 self._changeid = changeid
889 if changectx is not None:
884 if changectx is not None:
890 self._changectx = changectx
885 self._changectx = changectx
891 if fileid is not None:
886 if fileid is not None:
892 self._fileid = fileid
887 self._fileid = fileid
893
888
894 @propertycache
889 @propertycache
895 def _changectx(self):
890 def _changectx(self):
896 try:
891 try:
897 return changectx(self._repo, self._changeid)
892 return changectx(self._repo, self._changeid)
898 except error.RepoLookupError:
893 except error.RepoLookupError:
899 # Linkrev may point to any revision in the repository. When the
894 # Linkrev may point to any revision in the repository. When the
900 # repository is filtered this may lead to `filectx` trying to build
895 # repository is filtered this may lead to `filectx` trying to build
901 # `changectx` for filtered revision. In such case we fallback to
896 # `changectx` for filtered revision. In such case we fallback to
902 # creating `changectx` on the unfiltered version of the reposition.
897 # creating `changectx` on the unfiltered version of the reposition.
903 # This fallback should not be an issue because `changectx` from
898 # This fallback should not be an issue because `changectx` from
904 # `filectx` are not used in complex operations that care about
899 # `filectx` are not used in complex operations that care about
905 # filtering.
900 # filtering.
906 #
901 #
907 # This fallback is a cheap and dirty fix that prevent several
902 # This fallback is a cheap and dirty fix that prevent several
908 # crashes. It does not ensure the behavior is correct. However the
903 # crashes. It does not ensure the behavior is correct. However the
909 # behavior was not correct before filtering either and "incorrect
904 # behavior was not correct before filtering either and "incorrect
910 # behavior" is seen as better as "crash"
905 # behavior" is seen as better as "crash"
911 #
906 #
912 # Linkrevs have several serious troubles with filtering that are
907 # Linkrevs have several serious troubles with filtering that are
913 # complicated to solve. Proper handling of the issue here should be
908 # complicated to solve. Proper handling of the issue here should be
914 # considered when solving linkrev issue are on the table.
909 # considered when solving linkrev issue are on the table.
915 return changectx(self._repo.unfiltered(), self._changeid)
910 return changectx(self._repo.unfiltered(), self._changeid)
916
911
917 def filectx(self, fileid):
912 def filectx(self, fileid):
918 '''opens an arbitrary revision of the file without
913 '''opens an arbitrary revision of the file without
919 opening a new filelog'''
914 opening a new filelog'''
920 return filectx(self._repo, self._path, fileid=fileid,
915 return filectx(self._repo, self._path, fileid=fileid,
921 filelog=self._filelog)
916 filelog=self._filelog)
922
917
923 def data(self):
918 def data(self):
924 try:
919 try:
925 return self._filelog.read(self._filenode)
920 return self._filelog.read(self._filenode)
926 except error.CensoredNodeError:
921 except error.CensoredNodeError:
927 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
922 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
928 return ""
923 return ""
929 raise util.Abort(_("censored node: %s") % short(self._filenode),
924 raise util.Abort(_("censored node: %s") % short(self._filenode),
930 hint=_("set censor.policy to ignore errors"))
925 hint=_("set censor.policy to ignore errors"))
931
926
932 def size(self):
927 def size(self):
933 return self._filelog.size(self._filerev)
928 return self._filelog.size(self._filerev)
934
929
935 def renamed(self):
930 def renamed(self):
936 """check if file was actually renamed in this changeset revision
931 """check if file was actually renamed in this changeset revision
937
932
938 If rename logged in file revision, we report copy for changeset only
933 If rename logged in file revision, we report copy for changeset only
939 if file revisions linkrev points back to the changeset in question
934 if file revisions linkrev points back to the changeset in question
940 or both changeset parents contain different file revisions.
935 or both changeset parents contain different file revisions.
941 """
936 """
942
937
943 renamed = self._filelog.renamed(self._filenode)
938 renamed = self._filelog.renamed(self._filenode)
944 if not renamed:
939 if not renamed:
945 return renamed
940 return renamed
946
941
947 if self.rev() == self.linkrev():
942 if self.rev() == self.linkrev():
948 return renamed
943 return renamed
949
944
950 name = self.path()
945 name = self.path()
951 fnode = self._filenode
946 fnode = self._filenode
952 for p in self._changectx.parents():
947 for p in self._changectx.parents():
953 try:
948 try:
954 if fnode == p.filenode(name):
949 if fnode == p.filenode(name):
955 return None
950 return None
956 except error.LookupError:
951 except error.LookupError:
957 pass
952 pass
958 return renamed
953 return renamed
959
954
960 def children(self):
955 def children(self):
961 # hard for renames
956 # hard for renames
962 c = self._filelog.children(self._filenode)
957 c = self._filelog.children(self._filenode)
963 return [filectx(self._repo, self._path, fileid=x,
958 return [filectx(self._repo, self._path, fileid=x,
964 filelog=self._filelog) for x in c]
959 filelog=self._filelog) for x in c]
965
960
966 class committablectx(basectx):
961 class committablectx(basectx):
967 """A committablectx object provides common functionality for a context that
962 """A committablectx object provides common functionality for a context that
968 wants the ability to commit, e.g. workingctx or memctx."""
963 wants the ability to commit, e.g. workingctx or memctx."""
969 def __init__(self, repo, text="", user=None, date=None, extra=None,
964 def __init__(self, repo, text="", user=None, date=None, extra=None,
970 changes=None):
965 changes=None):
971 self._repo = repo
966 self._repo = repo
972 self._rev = None
967 self._rev = None
973 self._node = None
968 self._node = None
974 self._text = text
969 self._text = text
975 if date:
970 if date:
976 self._date = util.parsedate(date)
971 self._date = util.parsedate(date)
977 if user:
972 if user:
978 self._user = user
973 self._user = user
979 if changes:
974 if changes:
980 self._status = changes
975 self._status = changes
981
976
982 self._extra = {}
977 self._extra = {}
983 if extra:
978 if extra:
984 self._extra = extra.copy()
979 self._extra = extra.copy()
985 if 'branch' not in self._extra:
980 if 'branch' not in self._extra:
986 try:
981 try:
987 branch = encoding.fromlocal(self._repo.dirstate.branch())
982 branch = encoding.fromlocal(self._repo.dirstate.branch())
988 except UnicodeDecodeError:
983 except UnicodeDecodeError:
989 raise util.Abort(_('branch name not in UTF-8!'))
984 raise util.Abort(_('branch name not in UTF-8!'))
990 self._extra['branch'] = branch
985 self._extra['branch'] = branch
991 if self._extra['branch'] == '':
986 if self._extra['branch'] == '':
992 self._extra['branch'] = 'default'
987 self._extra['branch'] = 'default'
993
988
994 def __str__(self):
989 def __str__(self):
995 return str(self._parents[0]) + "+"
990 return str(self._parents[0]) + "+"
996
991
997 def __nonzero__(self):
992 def __nonzero__(self):
998 return True
993 return True
999
994
1000 def _buildflagfunc(self):
995 def _buildflagfunc(self):
1001 # Create a fallback function for getting file flags when the
996 # Create a fallback function for getting file flags when the
1002 # filesystem doesn't support them
997 # filesystem doesn't support them
1003
998
1004 copiesget = self._repo.dirstate.copies().get
999 copiesget = self._repo.dirstate.copies().get
1005
1000
1006 if len(self._parents) < 2:
1001 if len(self._parents) < 2:
1007 # when we have one parent, it's easy: copy from parent
1002 # when we have one parent, it's easy: copy from parent
1008 man = self._parents[0].manifest()
1003 man = self._parents[0].manifest()
1009 def func(f):
1004 def func(f):
1010 f = copiesget(f, f)
1005 f = copiesget(f, f)
1011 return man.flags(f)
1006 return man.flags(f)
1012 else:
1007 else:
1013 # merges are tricky: we try to reconstruct the unstored
1008 # merges are tricky: we try to reconstruct the unstored
1014 # result from the merge (issue1802)
1009 # result from the merge (issue1802)
1015 p1, p2 = self._parents
1010 p1, p2 = self._parents
1016 pa = p1.ancestor(p2)
1011 pa = p1.ancestor(p2)
1017 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1012 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1018
1013
1019 def func(f):
1014 def func(f):
1020 f = copiesget(f, f) # may be wrong for merges with copies
1015 f = copiesget(f, f) # may be wrong for merges with copies
1021 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1016 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1022 if fl1 == fl2:
1017 if fl1 == fl2:
1023 return fl1
1018 return fl1
1024 if fl1 == fla:
1019 if fl1 == fla:
1025 return fl2
1020 return fl2
1026 if fl2 == fla:
1021 if fl2 == fla:
1027 return fl1
1022 return fl1
1028 return '' # punt for conflicts
1023 return '' # punt for conflicts
1029
1024
1030 return func
1025 return func
1031
1026
1032 @propertycache
1027 @propertycache
1033 def _flagfunc(self):
1028 def _flagfunc(self):
1034 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1029 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1035
1030
1036 @propertycache
1031 @propertycache
1037 def _manifest(self):
1032 def _manifest(self):
1038 """generate a manifest corresponding to the values in self._status
1033 """generate a manifest corresponding to the values in self._status
1039
1034
1040 This reuse the file nodeid from parent, but we append an extra letter
1035 This reuse the file nodeid from parent, but we append an extra letter
1041 when modified. Modified files get an extra 'm' while added files get
1036 when modified. Modified files get an extra 'm' while added files get
1042 an extra 'a'. This is used by manifests merge to see that files
1037 an extra 'a'. This is used by manifests merge to see that files
1043 are different and by update logic to avoid deleting newly added files.
1038 are different and by update logic to avoid deleting newly added files.
1044 """
1039 """
1045
1040
1046 man1 = self._parents[0].manifest()
1041 man1 = self._parents[0].manifest()
1047 man = man1.copy()
1042 man = man1.copy()
1048 if len(self._parents) > 1:
1043 if len(self._parents) > 1:
1049 man2 = self.p2().manifest()
1044 man2 = self.p2().manifest()
1050 def getman(f):
1045 def getman(f):
1051 if f in man1:
1046 if f in man1:
1052 return man1
1047 return man1
1053 return man2
1048 return man2
1054 else:
1049 else:
1055 getman = lambda f: man1
1050 getman = lambda f: man1
1056
1051
1057 copied = self._repo.dirstate.copies()
1052 copied = self._repo.dirstate.copies()
1058 ff = self._flagfunc
1053 ff = self._flagfunc
1059 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1054 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1060 for f in l:
1055 for f in l:
1061 orig = copied.get(f, f)
1056 orig = copied.get(f, f)
1062 man[f] = getman(orig).get(orig, nullid) + i
1057 man[f] = getman(orig).get(orig, nullid) + i
1063 try:
1058 try:
1064 man.setflag(f, ff(f))
1059 man.setflag(f, ff(f))
1065 except OSError:
1060 except OSError:
1066 pass
1061 pass
1067
1062
1068 for f in self._status.deleted + self._status.removed:
1063 for f in self._status.deleted + self._status.removed:
1069 if f in man:
1064 if f in man:
1070 del man[f]
1065 del man[f]
1071
1066
1072 return man
1067 return man
1073
1068
1074 @propertycache
1069 @propertycache
1075 def _status(self):
1070 def _status(self):
1076 return self._repo.status()
1071 return self._repo.status()
1077
1072
1078 @propertycache
1073 @propertycache
1079 def _user(self):
1074 def _user(self):
1080 return self._repo.ui.username()
1075 return self._repo.ui.username()
1081
1076
1082 @propertycache
1077 @propertycache
1083 def _date(self):
1078 def _date(self):
1084 return util.makedate()
1079 return util.makedate()
1085
1080
1086 def subrev(self, subpath):
1081 def subrev(self, subpath):
1087 return None
1082 return None
1088
1083
1089 def user(self):
1084 def user(self):
1090 return self._user or self._repo.ui.username()
1085 return self._user or self._repo.ui.username()
1091 def date(self):
1086 def date(self):
1092 return self._date
1087 return self._date
1093 def description(self):
1088 def description(self):
1094 return self._text
1089 return self._text
1095 def files(self):
1090 def files(self):
1096 return sorted(self._status.modified + self._status.added +
1091 return sorted(self._status.modified + self._status.added +
1097 self._status.removed)
1092 self._status.removed)
1098
1093
1099 def modified(self):
1094 def modified(self):
1100 return self._status.modified
1095 return self._status.modified
1101 def added(self):
1096 def added(self):
1102 return self._status.added
1097 return self._status.added
1103 def removed(self):
1098 def removed(self):
1104 return self._status.removed
1099 return self._status.removed
1105 def deleted(self):
1100 def deleted(self):
1106 return self._status.deleted
1101 return self._status.deleted
1107 def unknown(self):
1102 def unknown(self):
1108 return self._status.unknown
1103 return self._status.unknown
1109 def ignored(self):
1104 def ignored(self):
1110 return self._status.ignored
1105 return self._status.ignored
1111 def clean(self):
1106 def clean(self):
1112 return self._status.clean
1107 return self._status.clean
1113 def branch(self):
1108 def branch(self):
1114 return encoding.tolocal(self._extra['branch'])
1109 return encoding.tolocal(self._extra['branch'])
1115 def closesbranch(self):
1110 def closesbranch(self):
1116 return 'close' in self._extra
1111 return 'close' in self._extra
1117 def extra(self):
1112 def extra(self):
1118 return self._extra
1113 return self._extra
1119
1114
1120 def tags(self):
1115 def tags(self):
1121 t = []
1116 t = []
1122 for p in self.parents():
1117 for p in self.parents():
1123 t.extend(p.tags())
1118 t.extend(p.tags())
1124 return t
1119 return t
1125
1120
1126 def bookmarks(self):
1121 def bookmarks(self):
1127 b = []
1122 b = []
1128 for p in self.parents():
1123 for p in self.parents():
1129 b.extend(p.bookmarks())
1124 b.extend(p.bookmarks())
1130 return b
1125 return b
1131
1126
1132 def phase(self):
1127 def phase(self):
1133 phase = phases.draft # default phase to draft
1128 phase = phases.draft # default phase to draft
1134 for p in self.parents():
1129 for p in self.parents():
1135 phase = max(phase, p.phase())
1130 phase = max(phase, p.phase())
1136 return phase
1131 return phase
1137
1132
1138 def hidden(self):
1133 def hidden(self):
1139 return False
1134 return False
1140
1135
1141 def children(self):
1136 def children(self):
1142 return []
1137 return []
1143
1138
1144 def flags(self, path):
1139 def flags(self, path):
1145 if '_manifest' in self.__dict__:
1140 if '_manifest' in self.__dict__:
1146 try:
1141 try:
1147 return self._manifest.flags(path)
1142 return self._manifest.flags(path)
1148 except KeyError:
1143 except KeyError:
1149 return ''
1144 return ''
1150
1145
1151 try:
1146 try:
1152 return self._flagfunc(path)
1147 return self._flagfunc(path)
1153 except OSError:
1148 except OSError:
1154 return ''
1149 return ''
1155
1150
1156 def ancestor(self, c2):
1151 def ancestor(self, c2):
1157 """return the "best" ancestor context of self and c2"""
1152 """return the "best" ancestor context of self and c2"""
1158 return self._parents[0].ancestor(c2) # punt on two parents for now
1153 return self._parents[0].ancestor(c2) # punt on two parents for now
1159
1154
1160 def walk(self, match):
1155 def walk(self, match):
1161 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1156 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1162 True, False))
1157 True, False))
1163
1158
1164 def matches(self, match):
1159 def matches(self, match):
1165 return sorted(self._repo.dirstate.matches(match))
1160 return sorted(self._repo.dirstate.matches(match))
1166
1161
1167 def ancestors(self):
1162 def ancestors(self):
1168 for a in self._repo.changelog.ancestors(
1163 for a in self._repo.changelog.ancestors(
1169 [p.rev() for p in self._parents]):
1164 [p.rev() for p in self._parents]):
1170 yield changectx(self._repo, a)
1165 yield changectx(self._repo, a)
1171
1166
1172 def markcommitted(self, node):
1167 def markcommitted(self, node):
1173 """Perform post-commit cleanup necessary after committing this ctx
1168 """Perform post-commit cleanup necessary after committing this ctx
1174
1169
1175 Specifically, this updates backing stores this working context
1170 Specifically, this updates backing stores this working context
1176 wraps to reflect the fact that the changes reflected by this
1171 wraps to reflect the fact that the changes reflected by this
1177 workingctx have been committed. For example, it marks
1172 workingctx have been committed. For example, it marks
1178 modified and added files as normal in the dirstate.
1173 modified and added files as normal in the dirstate.
1179
1174
1180 """
1175 """
1181
1176
1182 self._repo.dirstate.beginparentchange()
1177 self._repo.dirstate.beginparentchange()
1183 for f in self.modified() + self.added():
1178 for f in self.modified() + self.added():
1184 self._repo.dirstate.normal(f)
1179 self._repo.dirstate.normal(f)
1185 for f in self.removed():
1180 for f in self.removed():
1186 self._repo.dirstate.drop(f)
1181 self._repo.dirstate.drop(f)
1187 self._repo.dirstate.setparents(node)
1182 self._repo.dirstate.setparents(node)
1188 self._repo.dirstate.endparentchange()
1183 self._repo.dirstate.endparentchange()
1189
1184
1190 def dirs(self):
1185 def dirs(self):
1191 return self._repo.dirstate.dirs()
1186 return self._repo.dirstate.dirs()
1192
1187
1193 class workingctx(committablectx):
1188 class workingctx(committablectx):
1194 """A workingctx object makes access to data related to
1189 """A workingctx object makes access to data related to
1195 the current working directory convenient.
1190 the current working directory convenient.
1196 date - any valid date string or (unixtime, offset), or None.
1191 date - any valid date string or (unixtime, offset), or None.
1197 user - username string, or None.
1192 user - username string, or None.
1198 extra - a dictionary of extra values, or None.
1193 extra - a dictionary of extra values, or None.
1199 changes - a list of file lists as returned by localrepo.status()
1194 changes - a list of file lists as returned by localrepo.status()
1200 or None to use the repository status.
1195 or None to use the repository status.
1201 """
1196 """
1202 def __init__(self, repo, text="", user=None, date=None, extra=None,
1197 def __init__(self, repo, text="", user=None, date=None, extra=None,
1203 changes=None):
1198 changes=None):
1204 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1199 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1205
1200
1206 def __iter__(self):
1201 def __iter__(self):
1207 d = self._repo.dirstate
1202 d = self._repo.dirstate
1208 for f in d:
1203 for f in d:
1209 if d[f] != 'r':
1204 if d[f] != 'r':
1210 yield f
1205 yield f
1211
1206
1212 def __contains__(self, key):
1207 def __contains__(self, key):
1213 return self._repo.dirstate[key] not in "?r"
1208 return self._repo.dirstate[key] not in "?r"
1214
1209
1215 @propertycache
1210 @propertycache
1216 def _parents(self):
1211 def _parents(self):
1217 p = self._repo.dirstate.parents()
1212 p = self._repo.dirstate.parents()
1218 if p[1] == nullid:
1213 if p[1] == nullid:
1219 p = p[:-1]
1214 p = p[:-1]
1220 return [changectx(self._repo, x) for x in p]
1215 return [changectx(self._repo, x) for x in p]
1221
1216
1222 def filectx(self, path, filelog=None):
1217 def filectx(self, path, filelog=None):
1223 """get a file context from the working directory"""
1218 """get a file context from the working directory"""
1224 return workingfilectx(self._repo, path, workingctx=self,
1219 return workingfilectx(self._repo, path, workingctx=self,
1225 filelog=filelog)
1220 filelog=filelog)
1226
1221
1227 def dirty(self, missing=False, merge=True, branch=True):
1222 def dirty(self, missing=False, merge=True, branch=True):
1228 "check whether a working directory is modified"
1223 "check whether a working directory is modified"
1229 # check subrepos first
1224 # check subrepos first
1230 for s in sorted(self.substate):
1225 for s in sorted(self.substate):
1231 if self.sub(s).dirty():
1226 if self.sub(s).dirty():
1232 return True
1227 return True
1233 # check current working dir
1228 # check current working dir
1234 return ((merge and self.p2()) or
1229 return ((merge and self.p2()) or
1235 (branch and self.branch() != self.p1().branch()) or
1230 (branch and self.branch() != self.p1().branch()) or
1236 self.modified() or self.added() or self.removed() or
1231 self.modified() or self.added() or self.removed() or
1237 (missing and self.deleted()))
1232 (missing and self.deleted()))
1238
1233
1239 def add(self, list, prefix=""):
1234 def add(self, list, prefix=""):
1240 join = lambda f: os.path.join(prefix, f)
1235 join = lambda f: os.path.join(prefix, f)
1241 wlock = self._repo.wlock()
1236 wlock = self._repo.wlock()
1242 ui, ds = self._repo.ui, self._repo.dirstate
1237 ui, ds = self._repo.ui, self._repo.dirstate
1243 try:
1238 try:
1244 rejected = []
1239 rejected = []
1245 lstat = self._repo.wvfs.lstat
1240 lstat = self._repo.wvfs.lstat
1246 for f in list:
1241 for f in list:
1247 scmutil.checkportable(ui, join(f))
1242 scmutil.checkportable(ui, join(f))
1248 try:
1243 try:
1249 st = lstat(f)
1244 st = lstat(f)
1250 except OSError:
1245 except OSError:
1251 ui.warn(_("%s does not exist!\n") % join(f))
1246 ui.warn(_("%s does not exist!\n") % join(f))
1252 rejected.append(f)
1247 rejected.append(f)
1253 continue
1248 continue
1254 if st.st_size > 10000000:
1249 if st.st_size > 10000000:
1255 ui.warn(_("%s: up to %d MB of RAM may be required "
1250 ui.warn(_("%s: up to %d MB of RAM may be required "
1256 "to manage this file\n"
1251 "to manage this file\n"
1257 "(use 'hg revert %s' to cancel the "
1252 "(use 'hg revert %s' to cancel the "
1258 "pending addition)\n")
1253 "pending addition)\n")
1259 % (f, 3 * st.st_size // 1000000, join(f)))
1254 % (f, 3 * st.st_size // 1000000, join(f)))
1260 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1255 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1261 ui.warn(_("%s not added: only files and symlinks "
1256 ui.warn(_("%s not added: only files and symlinks "
1262 "supported currently\n") % join(f))
1257 "supported currently\n") % join(f))
1263 rejected.append(f)
1258 rejected.append(f)
1264 elif ds[f] in 'amn':
1259 elif ds[f] in 'amn':
1265 ui.warn(_("%s already tracked!\n") % join(f))
1260 ui.warn(_("%s already tracked!\n") % join(f))
1266 elif ds[f] == 'r':
1261 elif ds[f] == 'r':
1267 ds.normallookup(f)
1262 ds.normallookup(f)
1268 else:
1263 else:
1269 ds.add(f)
1264 ds.add(f)
1270 return rejected
1265 return rejected
1271 finally:
1266 finally:
1272 wlock.release()
1267 wlock.release()
1273
1268
1274 def forget(self, files, prefix=""):
1269 def forget(self, files, prefix=""):
1275 join = lambda f: os.path.join(prefix, f)
1270 join = lambda f: os.path.join(prefix, f)
1276 wlock = self._repo.wlock()
1271 wlock = self._repo.wlock()
1277 try:
1272 try:
1278 rejected = []
1273 rejected = []
1279 for f in files:
1274 for f in files:
1280 if f not in self._repo.dirstate:
1275 if f not in self._repo.dirstate:
1281 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1276 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1282 rejected.append(f)
1277 rejected.append(f)
1283 elif self._repo.dirstate[f] != 'a':
1278 elif self._repo.dirstate[f] != 'a':
1284 self._repo.dirstate.remove(f)
1279 self._repo.dirstate.remove(f)
1285 else:
1280 else:
1286 self._repo.dirstate.drop(f)
1281 self._repo.dirstate.drop(f)
1287 return rejected
1282 return rejected
1288 finally:
1283 finally:
1289 wlock.release()
1284 wlock.release()
1290
1285
1291 def undelete(self, list):
1286 def undelete(self, list):
1292 pctxs = self.parents()
1287 pctxs = self.parents()
1293 wlock = self._repo.wlock()
1288 wlock = self._repo.wlock()
1294 try:
1289 try:
1295 for f in list:
1290 for f in list:
1296 if self._repo.dirstate[f] != 'r':
1291 if self._repo.dirstate[f] != 'r':
1297 self._repo.ui.warn(_("%s not removed!\n") % f)
1292 self._repo.ui.warn(_("%s not removed!\n") % f)
1298 else:
1293 else:
1299 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1294 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1300 t = fctx.data()
1295 t = fctx.data()
1301 self._repo.wwrite(f, t, fctx.flags())
1296 self._repo.wwrite(f, t, fctx.flags())
1302 self._repo.dirstate.normal(f)
1297 self._repo.dirstate.normal(f)
1303 finally:
1298 finally:
1304 wlock.release()
1299 wlock.release()
1305
1300
1306 def copy(self, source, dest):
1301 def copy(self, source, dest):
1307 try:
1302 try:
1308 st = self._repo.wvfs.lstat(dest)
1303 st = self._repo.wvfs.lstat(dest)
1309 except OSError, err:
1304 except OSError, err:
1310 if err.errno != errno.ENOENT:
1305 if err.errno != errno.ENOENT:
1311 raise
1306 raise
1312 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1307 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1313 return
1308 return
1314 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1309 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1315 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1310 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1316 "symbolic link\n") % dest)
1311 "symbolic link\n") % dest)
1317 else:
1312 else:
1318 wlock = self._repo.wlock()
1313 wlock = self._repo.wlock()
1319 try:
1314 try:
1320 if self._repo.dirstate[dest] in '?':
1315 if self._repo.dirstate[dest] in '?':
1321 self._repo.dirstate.add(dest)
1316 self._repo.dirstate.add(dest)
1322 elif self._repo.dirstate[dest] in 'r':
1317 elif self._repo.dirstate[dest] in 'r':
1323 self._repo.dirstate.normallookup(dest)
1318 self._repo.dirstate.normallookup(dest)
1324 self._repo.dirstate.copy(source, dest)
1319 self._repo.dirstate.copy(source, dest)
1325 finally:
1320 finally:
1326 wlock.release()
1321 wlock.release()
1327
1322
1328 def _filtersuspectsymlink(self, files):
1323 def _filtersuspectsymlink(self, files):
1329 if not files or self._repo.dirstate._checklink:
1324 if not files or self._repo.dirstate._checklink:
1330 return files
1325 return files
1331
1326
1332 # Symlink placeholders may get non-symlink-like contents
1327 # Symlink placeholders may get non-symlink-like contents
1333 # via user error or dereferencing by NFS or Samba servers,
1328 # via user error or dereferencing by NFS or Samba servers,
1334 # so we filter out any placeholders that don't look like a
1329 # so we filter out any placeholders that don't look like a
1335 # symlink
1330 # symlink
1336 sane = []
1331 sane = []
1337 for f in files:
1332 for f in files:
1338 if self.flags(f) == 'l':
1333 if self.flags(f) == 'l':
1339 d = self[f].data()
1334 d = self[f].data()
1340 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1335 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1341 self._repo.ui.debug('ignoring suspect symlink placeholder'
1336 self._repo.ui.debug('ignoring suspect symlink placeholder'
1342 ' "%s"\n' % f)
1337 ' "%s"\n' % f)
1343 continue
1338 continue
1344 sane.append(f)
1339 sane.append(f)
1345 return sane
1340 return sane
1346
1341
1347 def _checklookup(self, files):
1342 def _checklookup(self, files):
1348 # check for any possibly clean files
1343 # check for any possibly clean files
1349 if not files:
1344 if not files:
1350 return [], []
1345 return [], []
1351
1346
1352 modified = []
1347 modified = []
1353 fixup = []
1348 fixup = []
1354 pctx = self._parents[0]
1349 pctx = self._parents[0]
1355 # do a full compare of any files that might have changed
1350 # do a full compare of any files that might have changed
1356 for f in sorted(files):
1351 for f in sorted(files):
1357 if (f not in pctx or self.flags(f) != pctx.flags(f)
1352 if (f not in pctx or self.flags(f) != pctx.flags(f)
1358 or pctx[f].cmp(self[f])):
1353 or pctx[f].cmp(self[f])):
1359 modified.append(f)
1354 modified.append(f)
1360 else:
1355 else:
1361 fixup.append(f)
1356 fixup.append(f)
1362
1357
1363 # update dirstate for files that are actually clean
1358 # update dirstate for files that are actually clean
1364 if fixup:
1359 if fixup:
1365 try:
1360 try:
1366 # updating the dirstate is optional
1361 # updating the dirstate is optional
1367 # so we don't wait on the lock
1362 # so we don't wait on the lock
1368 # wlock can invalidate the dirstate, so cache normal _after_
1363 # wlock can invalidate the dirstate, so cache normal _after_
1369 # taking the lock
1364 # taking the lock
1370 wlock = self._repo.wlock(False)
1365 wlock = self._repo.wlock(False)
1371 normal = self._repo.dirstate.normal
1366 normal = self._repo.dirstate.normal
1372 try:
1367 try:
1373 for f in fixup:
1368 for f in fixup:
1374 normal(f)
1369 normal(f)
1375 finally:
1370 finally:
1376 wlock.release()
1371 wlock.release()
1377 except error.LockError:
1372 except error.LockError:
1378 pass
1373 pass
1379 return modified, fixup
1374 return modified, fixup
1380
1375
1381 def _manifestmatches(self, match, s):
1376 def _manifestmatches(self, match, s):
1382 """Slow path for workingctx
1377 """Slow path for workingctx
1383
1378
1384 The fast path is when we compare the working directory to its parent
1379 The fast path is when we compare the working directory to its parent
1385 which means this function is comparing with a non-parent; therefore we
1380 which means this function is comparing with a non-parent; therefore we
1386 need to build a manifest and return what matches.
1381 need to build a manifest and return what matches.
1387 """
1382 """
1388 mf = self._repo['.']._manifestmatches(match, s)
1383 mf = self._repo['.']._manifestmatches(match, s)
1389 for f in s.modified + s.added:
1384 for f in s.modified + s.added:
1390 mf[f] = None
1385 mf[f] = None
1391 mf.setflag(f, self.flags(f))
1386 mf.setflag(f, self.flags(f))
1392 for f in s.removed:
1387 for f in s.removed:
1393 if f in mf:
1388 if f in mf:
1394 del mf[f]
1389 del mf[f]
1395 return mf
1390 return mf
1396
1391
1397 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1392 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1398 unknown=False):
1393 unknown=False):
1399 '''Gets the status from the dirstate -- internal use only.'''
1394 '''Gets the status from the dirstate -- internal use only.'''
1400 listignored, listclean, listunknown = ignored, clean, unknown
1395 listignored, listclean, listunknown = ignored, clean, unknown
1401 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1396 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1402 subrepos = []
1397 subrepos = []
1403 if '.hgsub' in self:
1398 if '.hgsub' in self:
1404 subrepos = sorted(self.substate)
1399 subrepos = sorted(self.substate)
1405 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1400 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1406 listclean, listunknown)
1401 listclean, listunknown)
1407
1402
1408 # check for any possibly clean files
1403 # check for any possibly clean files
1409 if cmp:
1404 if cmp:
1410 modified2, fixup = self._checklookup(cmp)
1405 modified2, fixup = self._checklookup(cmp)
1411 s.modified.extend(modified2)
1406 s.modified.extend(modified2)
1412
1407
1413 # update dirstate for files that are actually clean
1408 # update dirstate for files that are actually clean
1414 if fixup and listclean:
1409 if fixup and listclean:
1415 s.clean.extend(fixup)
1410 s.clean.extend(fixup)
1416
1411
1417 return s
1412 return s
1418
1413
1419 def _buildstatus(self, other, s, match, listignored, listclean,
1414 def _buildstatus(self, other, s, match, listignored, listclean,
1420 listunknown):
1415 listunknown):
1421 """build a status with respect to another context
1416 """build a status with respect to another context
1422
1417
1423 This includes logic for maintaining the fast path of status when
1418 This includes logic for maintaining the fast path of status when
1424 comparing the working directory against its parent, which is to skip
1419 comparing the working directory against its parent, which is to skip
1425 building a new manifest if self (working directory) is not comparing
1420 building a new manifest if self (working directory) is not comparing
1426 against its parent (repo['.']).
1421 against its parent (repo['.']).
1427 """
1422 """
1428 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1423 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1429 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1424 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1430 # might have accidentally ended up with the entire contents of the file
1425 # might have accidentally ended up with the entire contents of the file
1431 # they are supposed to be linking to.
1426 # they are supposed to be linking to.
1432 s.modified[:] = self._filtersuspectsymlink(s.modified)
1427 s.modified[:] = self._filtersuspectsymlink(s.modified)
1433 if other != self._repo['.']:
1428 if other != self._repo['.']:
1434 s = super(workingctx, self)._buildstatus(other, s, match,
1429 s = super(workingctx, self)._buildstatus(other, s, match,
1435 listignored, listclean,
1430 listignored, listclean,
1436 listunknown)
1431 listunknown)
1437 self._status = s
1432 self._status = s
1438 return s
1433 return s
1439
1434
1440 def _matchstatus(self, other, match):
1435 def _matchstatus(self, other, match):
1441 """override the match method with a filter for directory patterns
1436 """override the match method with a filter for directory patterns
1442
1437
1443 We use inheritance to customize the match.bad method only in cases of
1438 We use inheritance to customize the match.bad method only in cases of
1444 workingctx since it belongs only to the working directory when
1439 workingctx since it belongs only to the working directory when
1445 comparing against the parent changeset.
1440 comparing against the parent changeset.
1446
1441
1447 If we aren't comparing against the working directory's parent, then we
1442 If we aren't comparing against the working directory's parent, then we
1448 just use the default match object sent to us.
1443 just use the default match object sent to us.
1449 """
1444 """
1450 superself = super(workingctx, self)
1445 superself = super(workingctx, self)
1451 match = superself._matchstatus(other, match)
1446 match = superself._matchstatus(other, match)
1452 if other != self._repo['.']:
1447 if other != self._repo['.']:
1453 def bad(f, msg):
1448 def bad(f, msg):
1454 # 'f' may be a directory pattern from 'match.files()',
1449 # 'f' may be a directory pattern from 'match.files()',
1455 # so 'f not in ctx1' is not enough
1450 # so 'f not in ctx1' is not enough
1456 if f not in other and f not in other.dirs():
1451 if f not in other and f not in other.dirs():
1457 self._repo.ui.warn('%s: %s\n' %
1452 self._repo.ui.warn('%s: %s\n' %
1458 (self._repo.dirstate.pathto(f), msg))
1453 (self._repo.dirstate.pathto(f), msg))
1459 match.bad = bad
1454 match.bad = bad
1460 return match
1455 return match
1461
1456
1462 class committablefilectx(basefilectx):
1457 class committablefilectx(basefilectx):
1463 """A committablefilectx provides common functionality for a file context
1458 """A committablefilectx provides common functionality for a file context
1464 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1459 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1465 def __init__(self, repo, path, filelog=None, ctx=None):
1460 def __init__(self, repo, path, filelog=None, ctx=None):
1466 self._repo = repo
1461 self._repo = repo
1467 self._path = path
1462 self._path = path
1468 self._changeid = None
1463 self._changeid = None
1469 self._filerev = self._filenode = None
1464 self._filerev = self._filenode = None
1470
1465
1471 if filelog is not None:
1466 if filelog is not None:
1472 self._filelog = filelog
1467 self._filelog = filelog
1473 if ctx:
1468 if ctx:
1474 self._changectx = ctx
1469 self._changectx = ctx
1475
1470
1476 def __nonzero__(self):
1471 def __nonzero__(self):
1477 return True
1472 return True
1478
1473
1479 def parents(self):
1474 def parents(self):
1480 '''return parent filectxs, following copies if necessary'''
1475 '''return parent filectxs, following copies if necessary'''
1481 def filenode(ctx, path):
1476 def filenode(ctx, path):
1482 return ctx._manifest.get(path, nullid)
1477 return ctx._manifest.get(path, nullid)
1483
1478
1484 path = self._path
1479 path = self._path
1485 fl = self._filelog
1480 fl = self._filelog
1486 pcl = self._changectx._parents
1481 pcl = self._changectx._parents
1487 renamed = self.renamed()
1482 renamed = self.renamed()
1488
1483
1489 if renamed:
1484 if renamed:
1490 pl = [renamed + (None,)]
1485 pl = [renamed + (None,)]
1491 else:
1486 else:
1492 pl = [(path, filenode(pcl[0], path), fl)]
1487 pl = [(path, filenode(pcl[0], path), fl)]
1493
1488
1494 for pc in pcl[1:]:
1489 for pc in pcl[1:]:
1495 pl.append((path, filenode(pc, path), fl))
1490 pl.append((path, filenode(pc, path), fl))
1496
1491
1497 return [filectx(self._repo, p, fileid=n, filelog=l)
1492 return [filectx(self._repo, p, fileid=n, filelog=l)
1498 for p, n, l in pl if n != nullid]
1493 for p, n, l in pl if n != nullid]
1499
1494
1500 def children(self):
1495 def children(self):
1501 return []
1496 return []
1502
1497
1503 class workingfilectx(committablefilectx):
1498 class workingfilectx(committablefilectx):
1504 """A workingfilectx object makes access to data related to a particular
1499 """A workingfilectx object makes access to data related to a particular
1505 file in the working directory convenient."""
1500 file in the working directory convenient."""
1506 def __init__(self, repo, path, filelog=None, workingctx=None):
1501 def __init__(self, repo, path, filelog=None, workingctx=None):
1507 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1502 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1508
1503
1509 @propertycache
1504 @propertycache
1510 def _changectx(self):
1505 def _changectx(self):
1511 return workingctx(self._repo)
1506 return workingctx(self._repo)
1512
1507
1513 def data(self):
1508 def data(self):
1514 return self._repo.wread(self._path)
1509 return self._repo.wread(self._path)
1515 def renamed(self):
1510 def renamed(self):
1516 rp = self._repo.dirstate.copied(self._path)
1511 rp = self._repo.dirstate.copied(self._path)
1517 if not rp:
1512 if not rp:
1518 return None
1513 return None
1519 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1514 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1520
1515
1521 def size(self):
1516 def size(self):
1522 return self._repo.wvfs.lstat(self._path).st_size
1517 return self._repo.wvfs.lstat(self._path).st_size
1523 def date(self):
1518 def date(self):
1524 t, tz = self._changectx.date()
1519 t, tz = self._changectx.date()
1525 try:
1520 try:
1526 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1521 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1527 except OSError, err:
1522 except OSError, err:
1528 if err.errno != errno.ENOENT:
1523 if err.errno != errno.ENOENT:
1529 raise
1524 raise
1530 return (t, tz)
1525 return (t, tz)
1531
1526
1532 def cmp(self, fctx):
1527 def cmp(self, fctx):
1533 """compare with other file context
1528 """compare with other file context
1534
1529
1535 returns True if different than fctx.
1530 returns True if different than fctx.
1536 """
1531 """
1537 # fctx should be a filectx (not a workingfilectx)
1532 # fctx should be a filectx (not a workingfilectx)
1538 # invert comparison to reuse the same code path
1533 # invert comparison to reuse the same code path
1539 return fctx.cmp(self)
1534 return fctx.cmp(self)
1540
1535
1541 def remove(self, ignoremissing=False):
1536 def remove(self, ignoremissing=False):
1542 """wraps unlink for a repo's working directory"""
1537 """wraps unlink for a repo's working directory"""
1543 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1538 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1544
1539
1545 def write(self, data, flags):
1540 def write(self, data, flags):
1546 """wraps repo.wwrite"""
1541 """wraps repo.wwrite"""
1547 self._repo.wwrite(self._path, data, flags)
1542 self._repo.wwrite(self._path, data, flags)
1548
1543
1549 class memctx(committablectx):
1544 class memctx(committablectx):
1550 """Use memctx to perform in-memory commits via localrepo.commitctx().
1545 """Use memctx to perform in-memory commits via localrepo.commitctx().
1551
1546
1552 Revision information is supplied at initialization time while
1547 Revision information is supplied at initialization time while
1553 related files data and is made available through a callback
1548 related files data and is made available through a callback
1554 mechanism. 'repo' is the current localrepo, 'parents' is a
1549 mechanism. 'repo' is the current localrepo, 'parents' is a
1555 sequence of two parent revisions identifiers (pass None for every
1550 sequence of two parent revisions identifiers (pass None for every
1556 missing parent), 'text' is the commit message and 'files' lists
1551 missing parent), 'text' is the commit message and 'files' lists
1557 names of files touched by the revision (normalized and relative to
1552 names of files touched by the revision (normalized and relative to
1558 repository root).
1553 repository root).
1559
1554
1560 filectxfn(repo, memctx, path) is a callable receiving the
1555 filectxfn(repo, memctx, path) is a callable receiving the
1561 repository, the current memctx object and the normalized path of
1556 repository, the current memctx object and the normalized path of
1562 requested file, relative to repository root. It is fired by the
1557 requested file, relative to repository root. It is fired by the
1563 commit function for every file in 'files', but calls order is
1558 commit function for every file in 'files', but calls order is
1564 undefined. If the file is available in the revision being
1559 undefined. If the file is available in the revision being
1565 committed (updated or added), filectxfn returns a memfilectx
1560 committed (updated or added), filectxfn returns a memfilectx
1566 object. If the file was removed, filectxfn raises an
1561 object. If the file was removed, filectxfn raises an
1567 IOError. Moved files are represented by marking the source file
1562 IOError. Moved files are represented by marking the source file
1568 removed and the new file added with copy information (see
1563 removed and the new file added with copy information (see
1569 memfilectx).
1564 memfilectx).
1570
1565
1571 user receives the committer name and defaults to current
1566 user receives the committer name and defaults to current
1572 repository username, date is the commit date in any format
1567 repository username, date is the commit date in any format
1573 supported by util.parsedate() and defaults to current date, extra
1568 supported by util.parsedate() and defaults to current date, extra
1574 is a dictionary of metadata or is left empty.
1569 is a dictionary of metadata or is left empty.
1575 """
1570 """
1576
1571
1577 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1572 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1578 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1573 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1579 # this field to determine what to do in filectxfn.
1574 # this field to determine what to do in filectxfn.
1580 _returnnoneformissingfiles = True
1575 _returnnoneformissingfiles = True
1581
1576
1582 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1577 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1583 date=None, extra=None, editor=False):
1578 date=None, extra=None, editor=False):
1584 super(memctx, self).__init__(repo, text, user, date, extra)
1579 super(memctx, self).__init__(repo, text, user, date, extra)
1585 self._rev = None
1580 self._rev = None
1586 self._node = None
1581 self._node = None
1587 parents = [(p or nullid) for p in parents]
1582 parents = [(p or nullid) for p in parents]
1588 p1, p2 = parents
1583 p1, p2 = parents
1589 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1584 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1590 files = sorted(set(files))
1585 files = sorted(set(files))
1591 self._status = scmutil.status(files, [], [], [], [], [], [])
1586 self._status = scmutil.status(files, [], [], [], [], [], [])
1592 self._filectxfn = filectxfn
1587 self._filectxfn = filectxfn
1593 self.substate = {}
1588 self.substate = {}
1594
1589
1595 # if store is not callable, wrap it in a function
1590 # if store is not callable, wrap it in a function
1596 if not callable(filectxfn):
1591 if not callable(filectxfn):
1597 def getfilectx(repo, memctx, path):
1592 def getfilectx(repo, memctx, path):
1598 fctx = filectxfn[path]
1593 fctx = filectxfn[path]
1599 # this is weird but apparently we only keep track of one parent
1594 # this is weird but apparently we only keep track of one parent
1600 # (why not only store that instead of a tuple?)
1595 # (why not only store that instead of a tuple?)
1601 copied = fctx.renamed()
1596 copied = fctx.renamed()
1602 if copied:
1597 if copied:
1603 copied = copied[0]
1598 copied = copied[0]
1604 return memfilectx(repo, path, fctx.data(),
1599 return memfilectx(repo, path, fctx.data(),
1605 islink=fctx.islink(), isexec=fctx.isexec(),
1600 islink=fctx.islink(), isexec=fctx.isexec(),
1606 copied=copied, memctx=memctx)
1601 copied=copied, memctx=memctx)
1607 self._filectxfn = getfilectx
1602 self._filectxfn = getfilectx
1608
1603
1609 self._extra = extra and extra.copy() or {}
1604 self._extra = extra and extra.copy() or {}
1610 if self._extra.get('branch', '') == '':
1605 if self._extra.get('branch', '') == '':
1611 self._extra['branch'] = 'default'
1606 self._extra['branch'] = 'default'
1612
1607
1613 if editor:
1608 if editor:
1614 self._text = editor(self._repo, self, [])
1609 self._text = editor(self._repo, self, [])
1615 self._repo.savecommitmessage(self._text)
1610 self._repo.savecommitmessage(self._text)
1616
1611
1617 def filectx(self, path, filelog=None):
1612 def filectx(self, path, filelog=None):
1618 """get a file context from the working directory
1613 """get a file context from the working directory
1619
1614
1620 Returns None if file doesn't exist and should be removed."""
1615 Returns None if file doesn't exist and should be removed."""
1621 return self._filectxfn(self._repo, self, path)
1616 return self._filectxfn(self._repo, self, path)
1622
1617
1623 def commit(self):
1618 def commit(self):
1624 """commit context to the repo"""
1619 """commit context to the repo"""
1625 return self._repo.commitctx(self)
1620 return self._repo.commitctx(self)
1626
1621
1627 @propertycache
1622 @propertycache
1628 def _manifest(self):
1623 def _manifest(self):
1629 """generate a manifest based on the return values of filectxfn"""
1624 """generate a manifest based on the return values of filectxfn"""
1630
1625
1631 # keep this simple for now; just worry about p1
1626 # keep this simple for now; just worry about p1
1632 pctx = self._parents[0]
1627 pctx = self._parents[0]
1633 man = pctx.manifest().copy()
1628 man = pctx.manifest().copy()
1634
1629
1635 for f, fnode in man.iteritems():
1630 for f, fnode in man.iteritems():
1636 p1node = nullid
1631 p1node = nullid
1637 p2node = nullid
1632 p2node = nullid
1638 p = pctx[f].parents() # if file isn't in pctx, check p2?
1633 p = pctx[f].parents() # if file isn't in pctx, check p2?
1639 if len(p) > 0:
1634 if len(p) > 0:
1640 p1node = p[0].node()
1635 p1node = p[0].node()
1641 if len(p) > 1:
1636 if len(p) > 1:
1642 p2node = p[1].node()
1637 p2node = p[1].node()
1643 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1638 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1644
1639
1645 return man
1640 return man
1646
1641
1647
1642
1648 class memfilectx(committablefilectx):
1643 class memfilectx(committablefilectx):
1649 """memfilectx represents an in-memory file to commit.
1644 """memfilectx represents an in-memory file to commit.
1650
1645
1651 See memctx and committablefilectx for more details.
1646 See memctx and committablefilectx for more details.
1652 """
1647 """
1653 def __init__(self, repo, path, data, islink=False,
1648 def __init__(self, repo, path, data, islink=False,
1654 isexec=False, copied=None, memctx=None):
1649 isexec=False, copied=None, memctx=None):
1655 """
1650 """
1656 path is the normalized file path relative to repository root.
1651 path is the normalized file path relative to repository root.
1657 data is the file content as a string.
1652 data is the file content as a string.
1658 islink is True if the file is a symbolic link.
1653 islink is True if the file is a symbolic link.
1659 isexec is True if the file is executable.
1654 isexec is True if the file is executable.
1660 copied is the source file path if current file was copied in the
1655 copied is the source file path if current file was copied in the
1661 revision being committed, or None."""
1656 revision being committed, or None."""
1662 super(memfilectx, self).__init__(repo, path, None, memctx)
1657 super(memfilectx, self).__init__(repo, path, None, memctx)
1663 self._data = data
1658 self._data = data
1664 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1659 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1665 self._copied = None
1660 self._copied = None
1666 if copied:
1661 if copied:
1667 self._copied = (copied, nullid)
1662 self._copied = (copied, nullid)
1668
1663
1669 def data(self):
1664 def data(self):
1670 return self._data
1665 return self._data
1671 def size(self):
1666 def size(self):
1672 return len(self.data())
1667 return len(self.data())
1673 def flags(self):
1668 def flags(self):
1674 return self._flags
1669 return self._flags
1675 def renamed(self):
1670 def renamed(self):
1676 return self._copied
1671 return self._copied
1677
1672
1678 def remove(self, ignoremissing=False):
1673 def remove(self, ignoremissing=False):
1679 """wraps unlink for a repo's working directory"""
1674 """wraps unlink for a repo's working directory"""
1680 # need to figure out what to do here
1675 # need to figure out what to do here
1681 del self._changectx[self._path]
1676 del self._changectx[self._path]
1682
1677
1683 def write(self, data, flags):
1678 def write(self, data, flags):
1684 """wraps repo.wwrite"""
1679 """wraps repo.wwrite"""
1685 self._data = data
1680 self._data = data
@@ -1,79 +1,82 b''
1 from i18n import _
1 from i18n import _
2 from mercurial import util
2 from mercurial import util
3
3
4 def tolist(val):
4 def tolist(val):
5 """
5 """
6 a convenience method to return an empty list instead of None
6 a convenience method to return an empty list instead of None
7 """
7 """
8 if val is None:
8 if val is None:
9 return []
9 return []
10 else:
10 else:
11 return [val]
11 return [val]
12
12
13 class namespaces(object):
13 class namespaces(object):
14 """
14 """
15 provides an interface to register a generic many-to-many mapping between
15 provides an interface to register a generic many-to-many mapping between
16 some (namespaced) names and nodes. The goal here is to control the
16 some (namespaced) names and nodes. The goal here is to control the
17 pollution of jamming things into tags or bookmarks (in extension-land) and
17 pollution of jamming things into tags or bookmarks (in extension-land) and
18 to simplify internal bits of mercurial: log output, tab completion, etc.
18 to simplify internal bits of mercurial: log output, tab completion, etc.
19
19
20 More precisely, we define a list of names (the namespace) and a mapping of
20 More precisely, we define a list of names (the namespace) and a mapping of
21 names to nodes. This name mapping returns a list of nodes.
21 names to nodes. This name mapping returns a list of nodes.
22
22
23 Furthermore, each name mapping will be passed a name to lookup which might
23 Furthermore, each name mapping will be passed a name to lookup which might
24 not be in its domain. In this case, each method should return an empty list
24 not be in its domain. In this case, each method should return an empty list
25 and not raise an error.
25 and not raise an error.
26
26
27 We'll have a dictionary '_names' where each key is a namespace and
27 We'll have a dictionary '_names' where each key is a namespace and
28 its value is a dictionary of functions:
28 its value is a dictionary of functions:
29 'namemap': function that takes a name and returns a list of nodes
29 'namemap': function that takes a name and returns a list of nodes
30 """
30 """
31
31
32 _names_version = 0
32 _names_version = 0
33
33
34 def __init__(self):
34 def __init__(self):
35 self._names = util.sortdict()
35 self._names = util.sortdict()
36
36
37 addns = self.addnamespace
37 addns = self.addnamespace
38
38
39 # we need current mercurial named objects (bookmarks, tags, and
39 # we need current mercurial named objects (bookmarks, tags, and
40 # branches) to be initialized somewhere, so that place is here
40 # branches) to be initialized somewhere, so that place is here
41 addns("bookmarks",
41 addns("bookmarks",
42 lambda repo, name: tolist(repo._bookmarks.get(name)))
42 lambda repo, name: tolist(repo._bookmarks.get(name)))
43
43
44 addns("tags",
44 addns("tags",
45 lambda repo, name: tolist(repo._tagscache.tags.get(name)))
45 lambda repo, name: tolist(repo._tagscache.tags.get(name)))
46
46
47 addns("branches",
48 lambda repo, name: tolist(repo.branchtip(name)))
49
47 def addnamespace(self, namespace, namemap, order=None):
50 def addnamespace(self, namespace, namemap, order=None):
48 """
51 """
49 register a namespace
52 register a namespace
50
53
51 namespace: the name to be registered (in plural form)
54 namespace: the name to be registered (in plural form)
52 namemap: function that inputs a node, output name(s)
55 namemap: function that inputs a node, output name(s)
53 order: optional argument to specify the order of namespaces
56 order: optional argument to specify the order of namespaces
54 (e.g. 'branches' should be listed before 'bookmarks')
57 (e.g. 'branches' should be listed before 'bookmarks')
55 """
58 """
56 val = {'namemap': namemap}
59 val = {'namemap': namemap}
57 if order is not None:
60 if order is not None:
58 self._names.insert(order, namespace, val)
61 self._names.insert(order, namespace, val)
59 else:
62 else:
60 self._names[namespace] = val
63 self._names[namespace] = val
61
64
62 def singlenode(self, repo, name):
65 def singlenode(self, repo, name):
63 """
66 """
64 Return the 'best' node for the given name. Best means the first node
67 Return the 'best' node for the given name. Best means the first node
65 in the first nonempty list returned by a name-to-nodes mapping function
68 in the first nonempty list returned by a name-to-nodes mapping function
66 in the defined precedence order.
69 in the defined precedence order.
67
70
68 Raises a KeyError if there is no such node.
71 Raises a KeyError if there is no such node.
69 """
72 """
70 for ns, v in self._names.iteritems():
73 for ns, v in self._names.iteritems():
71 n = v['namemap'](repo, name)
74 n = v['namemap'](repo, name)
72 if n:
75 if n:
73 # return max revision number
76 # return max revision number
74 if len(n) > 1:
77 if len(n) > 1:
75 cl = repo.changelog
78 cl = repo.changelog
76 maxrev = max(cl.rev(node) for node in n)
79 maxrev = max(cl.rev(node) for node in n)
77 return cl.node(maxrev)
80 return cl.node(maxrev)
78 return n[0]
81 return n[0]
79 raise KeyError(_('no such name: %s') % name)
82 raise KeyError(_('no such name: %s') % name)
General Comments 0
You need to be logged in to leave comments. Login now