##// END OF EJS Templates
context: add workingcommitctx for exact context to be committed...
FUJIWARA Katsunori -
r23710:745e3b48 default
parent child Browse files
Show More
@@ -1,1796 +1,1808 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 """return the first ancestor of <srcrev> introducting <fnode>
26 """return the first ancestor of <srcrev> introducting <fnode>
27
27
28 If the linkrev of the file revision does not point to an ancestor of
28 If the linkrev of the file revision does not point to an ancestor of
29 srcrev, we'll walk down the ancestors until we find one introducing this
29 srcrev, we'll walk down the ancestors until we find one introducing this
30 file revision.
30 file revision.
31
31
32 :repo: a localrepository object (used to access changelog and manifest)
32 :repo: a localrepository object (used to access changelog and manifest)
33 :path: the file path
33 :path: the file path
34 :fnode: the nodeid of the file revision
34 :fnode: the nodeid of the file revision
35 :filelog: the filelog of this path
35 :filelog: the filelog of this path
36 :srcrev: the changeset revision we search ancestors from
36 :srcrev: the changeset revision we search ancestors from
37 :inclusive: if true, the src revision will also be checked
37 :inclusive: if true, the src revision will also be checked
38 """
38 """
39 cl = repo.unfiltered().changelog
39 cl = repo.unfiltered().changelog
40 ma = repo.manifest
40 ma = repo.manifest
41 # fetch the linkrev
41 # fetch the linkrev
42 fr = filelog.rev(fnode)
42 fr = filelog.rev(fnode)
43 lkr = filelog.linkrev(fr)
43 lkr = filelog.linkrev(fr)
44 # check if this linkrev is an ancestor of srcrev
44 # check if this linkrev is an ancestor of srcrev
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 if lkr not in anc:
46 if lkr not in anc:
47 for a in anc:
47 for a in anc:
48 ac = cl.read(a) # get changeset data (we avoid object creation).
48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 if path in ac[3]: # checking the 'files' field.
49 if path in ac[3]: # checking the 'files' field.
50 # The file has been touched, check if the content is similar
50 # The file has been touched, check if the content is similar
51 # to the one we search for.
51 # to the one we search for.
52 if fnode == ma.readdelta(ac[0]).get(path):
52 if fnode == ma.readdelta(ac[0]).get(path):
53 return a
53 return a
54 # In theory, we should never get out of that loop without a result. But
54 # In theory, we should never get out of that loop without a result. But
55 # if manifest uses a buggy file revision (not children of the one it
55 # if manifest uses a buggy file revision (not children of the one it
56 # replaces) we could. Such a buggy situation will likely result is crash
56 # replaces) we could. Such a buggy situation will likely result is crash
57 # somewhere else at to some point.
57 # somewhere else at to some point.
58 return lkr
58 return lkr
59
59
60 class basectx(object):
60 class basectx(object):
61 """A basectx object represents the common logic for its children:
61 """A basectx object represents the common logic for its children:
62 changectx: read-only context that is already present in the repo,
62 changectx: read-only context that is already present in the repo,
63 workingctx: a context that represents the working directory and can
63 workingctx: a context that represents the working directory and can
64 be committed,
64 be committed,
65 memctx: a context that represents changes in-memory and can also
65 memctx: a context that represents changes in-memory and can also
66 be committed."""
66 be committed."""
67 def __new__(cls, repo, changeid='', *args, **kwargs):
67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 if isinstance(changeid, basectx):
68 if isinstance(changeid, basectx):
69 return changeid
69 return changeid
70
70
71 o = super(basectx, cls).__new__(cls)
71 o = super(basectx, cls).__new__(cls)
72
72
73 o._repo = repo
73 o._repo = repo
74 o._rev = nullrev
74 o._rev = nullrev
75 o._node = nullid
75 o._node = nullid
76
76
77 return o
77 return o
78
78
79 def __str__(self):
79 def __str__(self):
80 return short(self.node())
80 return short(self.node())
81
81
82 def __int__(self):
82 def __int__(self):
83 return self.rev()
83 return self.rev()
84
84
85 def __repr__(self):
85 def __repr__(self):
86 return "<%s %s>" % (type(self).__name__, str(self))
86 return "<%s %s>" % (type(self).__name__, str(self))
87
87
88 def __eq__(self, other):
88 def __eq__(self, other):
89 try:
89 try:
90 return type(self) == type(other) and self._rev == other._rev
90 return type(self) == type(other) and self._rev == other._rev
91 except AttributeError:
91 except AttributeError:
92 return False
92 return False
93
93
94 def __ne__(self, other):
94 def __ne__(self, other):
95 return not (self == other)
95 return not (self == other)
96
96
97 def __contains__(self, key):
97 def __contains__(self, key):
98 return key in self._manifest
98 return key in self._manifest
99
99
100 def __getitem__(self, key):
100 def __getitem__(self, key):
101 return self.filectx(key)
101 return self.filectx(key)
102
102
103 def __iter__(self):
103 def __iter__(self):
104 for f in sorted(self._manifest):
104 for f in sorted(self._manifest):
105 yield f
105 yield f
106
106
107 def _manifestmatches(self, match, s):
107 def _manifestmatches(self, match, s):
108 """generate a new manifest filtered by the match argument
108 """generate a new manifest filtered by the match argument
109
109
110 This method is for internal use only and mainly exists to provide an
110 This method is for internal use only and mainly exists to provide an
111 object oriented way for other contexts to customize the manifest
111 object oriented way for other contexts to customize the manifest
112 generation.
112 generation.
113 """
113 """
114 return self.manifest().matches(match)
114 return self.manifest().matches(match)
115
115
116 def _matchstatus(self, other, match):
116 def _matchstatus(self, other, match):
117 """return match.always if match is none
117 """return match.always if match is none
118
118
119 This internal method provides a way for child objects to override the
119 This internal method provides a way for child objects to override the
120 match operator.
120 match operator.
121 """
121 """
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123
123
124 def _buildstatus(self, other, s, match, listignored, listclean,
124 def _buildstatus(self, other, s, match, listignored, listclean,
125 listunknown):
125 listunknown):
126 """build a status with respect to another context"""
126 """build a status with respect to another context"""
127 # Load earliest manifest first for caching reasons. More specifically,
127 # Load earliest manifest first for caching reasons. More specifically,
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 # 1000 and cache it so that when you read 1001, we just need to apply a
130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 # delta to what's in the cache. So that's one full reconstruction + one
131 # delta to what's in the cache. So that's one full reconstruction + one
132 # delta application.
132 # delta application.
133 if self.rev() is not None and self.rev() < other.rev():
133 if self.rev() is not None and self.rev() < other.rev():
134 self.manifest()
134 self.manifest()
135 mf1 = other._manifestmatches(match, s)
135 mf1 = other._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
137
137
138 modified, added, clean = [], [], []
138 modified, added, clean = [], [], []
139 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
139 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
140 deletedset = set(deleted)
140 deletedset = set(deleted)
141 withflags = mf1.withflags() | mf2.withflags()
141 withflags = mf1.withflags() | mf2.withflags()
142 for fn, mf2node in mf2.iteritems():
142 for fn, mf2node in mf2.iteritems():
143 if fn in mf1:
143 if fn in mf1:
144 if (fn not in deletedset and
144 if (fn not in deletedset and
145 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
145 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
146 (mf1[fn] != mf2node and
146 (mf1[fn] != mf2node and
147 (mf2node != _newnode or self[fn].cmp(other[fn]))))):
147 (mf2node != _newnode or self[fn].cmp(other[fn]))))):
148 modified.append(fn)
148 modified.append(fn)
149 elif listclean:
149 elif listclean:
150 clean.append(fn)
150 clean.append(fn)
151 del mf1[fn]
151 del mf1[fn]
152 elif fn not in deletedset:
152 elif fn not in deletedset:
153 added.append(fn)
153 added.append(fn)
154 removed = mf1.keys()
154 removed = mf1.keys()
155 if removed:
155 if removed:
156 # need to filter files if they are already reported as removed
156 # need to filter files if they are already reported as removed
157 unknown = [fn for fn in unknown if fn not in mf1]
157 unknown = [fn for fn in unknown if fn not in mf1]
158 ignored = [fn for fn in ignored if fn not in mf1]
158 ignored = [fn for fn in ignored if fn not in mf1]
159
159
160 return scmutil.status(modified, added, removed, deleted, unknown,
160 return scmutil.status(modified, added, removed, deleted, unknown,
161 ignored, clean)
161 ignored, clean)
162
162
163 @propertycache
163 @propertycache
164 def substate(self):
164 def substate(self):
165 return subrepo.state(self, self._repo.ui)
165 return subrepo.state(self, self._repo.ui)
166
166
167 def subrev(self, subpath):
167 def subrev(self, subpath):
168 return self.substate[subpath][1]
168 return self.substate[subpath][1]
169
169
170 def rev(self):
170 def rev(self):
171 return self._rev
171 return self._rev
172 def node(self):
172 def node(self):
173 return self._node
173 return self._node
174 def hex(self):
174 def hex(self):
175 return hex(self.node())
175 return hex(self.node())
176 def manifest(self):
176 def manifest(self):
177 return self._manifest
177 return self._manifest
178 def phasestr(self):
178 def phasestr(self):
179 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
180 def mutable(self):
180 def mutable(self):
181 return self.phase() > phases.public
181 return self.phase() > phases.public
182
182
183 def getfileset(self, expr):
183 def getfileset(self, expr):
184 return fileset.getfileset(self, expr)
184 return fileset.getfileset(self, expr)
185
185
186 def obsolete(self):
186 def obsolete(self):
187 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189
189
190 def extinct(self):
190 def extinct(self):
191 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193
193
194 def unstable(self):
194 def unstable(self):
195 """True if the changeset is not obsolete but it's ancestor are"""
195 """True if the changeset is not obsolete but it's ancestor are"""
196 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
196 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
197
197
198 def bumped(self):
198 def bumped(self):
199 """True if the changeset try to be a successor of a public changeset
199 """True if the changeset try to be a successor of a public changeset
200
200
201 Only non-public and non-obsolete changesets may be bumped.
201 Only non-public and non-obsolete changesets may be bumped.
202 """
202 """
203 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
203 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
204
204
205 def divergent(self):
205 def divergent(self):
206 """Is a successors of a changeset with multiple possible successors set
206 """Is a successors of a changeset with multiple possible successors set
207
207
208 Only non-public and non-obsolete changesets may be divergent.
208 Only non-public and non-obsolete changesets may be divergent.
209 """
209 """
210 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
211
211
212 def troubled(self):
212 def troubled(self):
213 """True if the changeset is either unstable, bumped or divergent"""
213 """True if the changeset is either unstable, bumped or divergent"""
214 return self.unstable() or self.bumped() or self.divergent()
214 return self.unstable() or self.bumped() or self.divergent()
215
215
216 def troubles(self):
216 def troubles(self):
217 """return the list of troubles affecting this changesets.
217 """return the list of troubles affecting this changesets.
218
218
219 Troubles are returned as strings. possible values are:
219 Troubles are returned as strings. possible values are:
220 - unstable,
220 - unstable,
221 - bumped,
221 - bumped,
222 - divergent.
222 - divergent.
223 """
223 """
224 troubles = []
224 troubles = []
225 if self.unstable():
225 if self.unstable():
226 troubles.append('unstable')
226 troubles.append('unstable')
227 if self.bumped():
227 if self.bumped():
228 troubles.append('bumped')
228 troubles.append('bumped')
229 if self.divergent():
229 if self.divergent():
230 troubles.append('divergent')
230 troubles.append('divergent')
231 return troubles
231 return troubles
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 if len(self._parents) == 2:
241 if len(self._parents) == 2:
242 return self._parents[1]
242 return self._parents[1]
243 return changectx(self._repo, -1)
243 return changectx(self._repo, -1)
244
244
245 def _fileinfo(self, path):
245 def _fileinfo(self, path):
246 if '_manifest' in self.__dict__:
246 if '_manifest' in self.__dict__:
247 try:
247 try:
248 return self._manifest[path], self._manifest.flags(path)
248 return self._manifest[path], self._manifest.flags(path)
249 except KeyError:
249 except KeyError:
250 raise error.ManifestLookupError(self._node, path,
250 raise error.ManifestLookupError(self._node, path,
251 _('not found in manifest'))
251 _('not found in manifest'))
252 if '_manifestdelta' in self.__dict__ or path in self.files():
252 if '_manifestdelta' in self.__dict__ or path in self.files():
253 if path in self._manifestdelta:
253 if path in self._manifestdelta:
254 return (self._manifestdelta[path],
254 return (self._manifestdelta[path],
255 self._manifestdelta.flags(path))
255 self._manifestdelta.flags(path))
256 node, flag = self._repo.manifest.find(self._changeset[0], path)
256 node, flag = self._repo.manifest.find(self._changeset[0], path)
257 if not node:
257 if not node:
258 raise error.ManifestLookupError(self._node, path,
258 raise error.ManifestLookupError(self._node, path,
259 _('not found in manifest'))
259 _('not found in manifest'))
260
260
261 return node, flag
261 return node, flag
262
262
263 def filenode(self, path):
263 def filenode(self, path):
264 return self._fileinfo(path)[0]
264 return self._fileinfo(path)[0]
265
265
266 def flags(self, path):
266 def flags(self, path):
267 try:
267 try:
268 return self._fileinfo(path)[1]
268 return self._fileinfo(path)[1]
269 except error.LookupError:
269 except error.LookupError:
270 return ''
270 return ''
271
271
272 def sub(self, path):
272 def sub(self, path):
273 return subrepo.subrepo(self, path)
273 return subrepo.subrepo(self, path)
274
274
275 def match(self, pats=[], include=None, exclude=None, default='glob'):
275 def match(self, pats=[], include=None, exclude=None, default='glob'):
276 r = self._repo
276 r = self._repo
277 return matchmod.match(r.root, r.getcwd(), pats,
277 return matchmod.match(r.root, r.getcwd(), pats,
278 include, exclude, default,
278 include, exclude, default,
279 auditor=r.auditor, ctx=self)
279 auditor=r.auditor, ctx=self)
280
280
281 def diff(self, ctx2=None, match=None, **opts):
281 def diff(self, ctx2=None, match=None, **opts):
282 """Returns a diff generator for the given contexts and matcher"""
282 """Returns a diff generator for the given contexts and matcher"""
283 if ctx2 is None:
283 if ctx2 is None:
284 ctx2 = self.p1()
284 ctx2 = self.p1()
285 if ctx2 is not None:
285 if ctx2 is not None:
286 ctx2 = self._repo[ctx2]
286 ctx2 = self._repo[ctx2]
287 diffopts = patch.diffopts(self._repo.ui, opts)
287 diffopts = patch.diffopts(self._repo.ui, opts)
288 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
288 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
289
289
290 @propertycache
290 @propertycache
291 def _dirs(self):
291 def _dirs(self):
292 return scmutil.dirs(self._manifest)
292 return scmutil.dirs(self._manifest)
293
293
294 def dirs(self):
294 def dirs(self):
295 return self._dirs
295 return self._dirs
296
296
297 def dirty(self, missing=False, merge=True, branch=True):
297 def dirty(self, missing=False, merge=True, branch=True):
298 return False
298 return False
299
299
300 def status(self, other=None, match=None, listignored=False,
300 def status(self, other=None, match=None, listignored=False,
301 listclean=False, listunknown=False, listsubrepos=False):
301 listclean=False, listunknown=False, listsubrepos=False):
302 """return status of files between two nodes or node and working
302 """return status of files between two nodes or node and working
303 directory.
303 directory.
304
304
305 If other is None, compare this node with working directory.
305 If other is None, compare this node with working directory.
306
306
307 returns (modified, added, removed, deleted, unknown, ignored, clean)
307 returns (modified, added, removed, deleted, unknown, ignored, clean)
308 """
308 """
309
309
310 ctx1 = self
310 ctx1 = self
311 ctx2 = self._repo[other]
311 ctx2 = self._repo[other]
312
312
313 # This next code block is, admittedly, fragile logic that tests for
313 # This next code block is, admittedly, fragile logic that tests for
314 # reversing the contexts and wouldn't need to exist if it weren't for
314 # reversing the contexts and wouldn't need to exist if it weren't for
315 # the fast (and common) code path of comparing the working directory
315 # the fast (and common) code path of comparing the working directory
316 # with its first parent.
316 # with its first parent.
317 #
317 #
318 # What we're aiming for here is the ability to call:
318 # What we're aiming for here is the ability to call:
319 #
319 #
320 # workingctx.status(parentctx)
320 # workingctx.status(parentctx)
321 #
321 #
322 # If we always built the manifest for each context and compared those,
322 # If we always built the manifest for each context and compared those,
323 # then we'd be done. But the special case of the above call means we
323 # then we'd be done. But the special case of the above call means we
324 # just copy the manifest of the parent.
324 # just copy the manifest of the parent.
325 reversed = False
325 reversed = False
326 if (not isinstance(ctx1, changectx)
326 if (not isinstance(ctx1, changectx)
327 and isinstance(ctx2, changectx)):
327 and isinstance(ctx2, changectx)):
328 reversed = True
328 reversed = True
329 ctx1, ctx2 = ctx2, ctx1
329 ctx1, ctx2 = ctx2, ctx1
330
330
331 match = ctx2._matchstatus(ctx1, match)
331 match = ctx2._matchstatus(ctx1, match)
332 r = scmutil.status([], [], [], [], [], [], [])
332 r = scmutil.status([], [], [], [], [], [], [])
333 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
333 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
334 listunknown)
334 listunknown)
335
335
336 if reversed:
336 if reversed:
337 # Reverse added and removed. Clear deleted, unknown and ignored as
337 # Reverse added and removed. Clear deleted, unknown and ignored as
338 # these make no sense to reverse.
338 # these make no sense to reverse.
339 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
339 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
340 r.clean)
340 r.clean)
341
341
342 if listsubrepos:
342 if listsubrepos:
343 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
343 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
344 rev2 = ctx2.subrev(subpath)
344 rev2 = ctx2.subrev(subpath)
345 try:
345 try:
346 submatch = matchmod.narrowmatcher(subpath, match)
346 submatch = matchmod.narrowmatcher(subpath, match)
347 s = sub.status(rev2, match=submatch, ignored=listignored,
347 s = sub.status(rev2, match=submatch, ignored=listignored,
348 clean=listclean, unknown=listunknown,
348 clean=listclean, unknown=listunknown,
349 listsubrepos=True)
349 listsubrepos=True)
350 for rfiles, sfiles in zip(r, s):
350 for rfiles, sfiles in zip(r, s):
351 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
351 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
352 except error.LookupError:
352 except error.LookupError:
353 self._repo.ui.status(_("skipping missing "
353 self._repo.ui.status(_("skipping missing "
354 "subrepository: %s\n") % subpath)
354 "subrepository: %s\n") % subpath)
355
355
356 for l in r:
356 for l in r:
357 l.sort()
357 l.sort()
358
358
359 return r
359 return r
360
360
361
361
362 def makememctx(repo, parents, text, user, date, branch, files, store,
362 def makememctx(repo, parents, text, user, date, branch, files, store,
363 editor=None):
363 editor=None):
364 def getfilectx(repo, memctx, path):
364 def getfilectx(repo, memctx, path):
365 data, mode, copied = store.getfile(path)
365 data, mode, copied = store.getfile(path)
366 if data is None:
366 if data is None:
367 return None
367 return None
368 islink, isexec = mode
368 islink, isexec = mode
369 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
369 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
370 copied=copied, memctx=memctx)
370 copied=copied, memctx=memctx)
371 extra = {}
371 extra = {}
372 if branch:
372 if branch:
373 extra['branch'] = encoding.fromlocal(branch)
373 extra['branch'] = encoding.fromlocal(branch)
374 ctx = memctx(repo, parents, text, files, getfilectx, user,
374 ctx = memctx(repo, parents, text, files, getfilectx, user,
375 date, extra, editor)
375 date, extra, editor)
376 return ctx
376 return ctx
377
377
378 class changectx(basectx):
378 class changectx(basectx):
379 """A changecontext object makes access to data related to a particular
379 """A changecontext object makes access to data related to a particular
380 changeset convenient. It represents a read-only context already present in
380 changeset convenient. It represents a read-only context already present in
381 the repo."""
381 the repo."""
382 def __init__(self, repo, changeid=''):
382 def __init__(self, repo, changeid=''):
383 """changeid is a revision number, node, or tag"""
383 """changeid is a revision number, node, or tag"""
384
384
385 # since basectx.__new__ already took care of copying the object, we
385 # since basectx.__new__ already took care of copying the object, we
386 # don't need to do anything in __init__, so we just exit here
386 # don't need to do anything in __init__, so we just exit here
387 if isinstance(changeid, basectx):
387 if isinstance(changeid, basectx):
388 return
388 return
389
389
390 if changeid == '':
390 if changeid == '':
391 changeid = '.'
391 changeid = '.'
392 self._repo = repo
392 self._repo = repo
393
393
394 try:
394 try:
395 if isinstance(changeid, int):
395 if isinstance(changeid, int):
396 self._node = repo.changelog.node(changeid)
396 self._node = repo.changelog.node(changeid)
397 self._rev = changeid
397 self._rev = changeid
398 return
398 return
399 if isinstance(changeid, long):
399 if isinstance(changeid, long):
400 changeid = str(changeid)
400 changeid = str(changeid)
401 if changeid == '.':
401 if changeid == '.':
402 self._node = repo.dirstate.p1()
402 self._node = repo.dirstate.p1()
403 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
404 return
404 return
405 if changeid == 'null':
405 if changeid == 'null':
406 self._node = nullid
406 self._node = nullid
407 self._rev = nullrev
407 self._rev = nullrev
408 return
408 return
409 if changeid == 'tip':
409 if changeid == 'tip':
410 self._node = repo.changelog.tip()
410 self._node = repo.changelog.tip()
411 self._rev = repo.changelog.rev(self._node)
411 self._rev = repo.changelog.rev(self._node)
412 return
412 return
413 if len(changeid) == 20:
413 if len(changeid) == 20:
414 try:
414 try:
415 self._node = changeid
415 self._node = changeid
416 self._rev = repo.changelog.rev(changeid)
416 self._rev = repo.changelog.rev(changeid)
417 return
417 return
418 except error.FilteredRepoLookupError:
418 except error.FilteredRepoLookupError:
419 raise
419 raise
420 except LookupError:
420 except LookupError:
421 pass
421 pass
422
422
423 try:
423 try:
424 r = int(changeid)
424 r = int(changeid)
425 if str(r) != changeid:
425 if str(r) != changeid:
426 raise ValueError
426 raise ValueError
427 l = len(repo.changelog)
427 l = len(repo.changelog)
428 if r < 0:
428 if r < 0:
429 r += l
429 r += l
430 if r < 0 or r >= l:
430 if r < 0 or r >= l:
431 raise ValueError
431 raise ValueError
432 self._rev = r
432 self._rev = r
433 self._node = repo.changelog.node(r)
433 self._node = repo.changelog.node(r)
434 return
434 return
435 except error.FilteredIndexError:
435 except error.FilteredIndexError:
436 raise
436 raise
437 except (ValueError, OverflowError, IndexError):
437 except (ValueError, OverflowError, IndexError):
438 pass
438 pass
439
439
440 if len(changeid) == 40:
440 if len(changeid) == 40:
441 try:
441 try:
442 self._node = bin(changeid)
442 self._node = bin(changeid)
443 self._rev = repo.changelog.rev(self._node)
443 self._rev = repo.changelog.rev(self._node)
444 return
444 return
445 except error.FilteredLookupError:
445 except error.FilteredLookupError:
446 raise
446 raise
447 except (TypeError, LookupError):
447 except (TypeError, LookupError):
448 pass
448 pass
449
449
450 # lookup bookmarks through the name interface
450 # lookup bookmarks through the name interface
451 try:
451 try:
452 self._node = repo.names.singlenode(repo, changeid)
452 self._node = repo.names.singlenode(repo, changeid)
453 self._rev = repo.changelog.rev(self._node)
453 self._rev = repo.changelog.rev(self._node)
454 return
454 return
455 except KeyError:
455 except KeyError:
456 pass
456 pass
457 except error.FilteredRepoLookupError:
457 except error.FilteredRepoLookupError:
458 raise
458 raise
459 except error.RepoLookupError:
459 except error.RepoLookupError:
460 pass
460 pass
461
461
462 self._node = repo.unfiltered().changelog._partialmatch(changeid)
462 self._node = repo.unfiltered().changelog._partialmatch(changeid)
463 if self._node is not None:
463 if self._node is not None:
464 self._rev = repo.changelog.rev(self._node)
464 self._rev = repo.changelog.rev(self._node)
465 return
465 return
466
466
467 # lookup failed
467 # lookup failed
468 # check if it might have come from damaged dirstate
468 # check if it might have come from damaged dirstate
469 #
469 #
470 # XXX we could avoid the unfiltered if we had a recognizable
470 # XXX we could avoid the unfiltered if we had a recognizable
471 # exception for filtered changeset access
471 # exception for filtered changeset access
472 if changeid in repo.unfiltered().dirstate.parents():
472 if changeid in repo.unfiltered().dirstate.parents():
473 msg = _("working directory has unknown parent '%s'!")
473 msg = _("working directory has unknown parent '%s'!")
474 raise error.Abort(msg % short(changeid))
474 raise error.Abort(msg % short(changeid))
475 try:
475 try:
476 if len(changeid) == 20:
476 if len(changeid) == 20:
477 changeid = hex(changeid)
477 changeid = hex(changeid)
478 except TypeError:
478 except TypeError:
479 pass
479 pass
480 except (error.FilteredIndexError, error.FilteredLookupError,
480 except (error.FilteredIndexError, error.FilteredLookupError,
481 error.FilteredRepoLookupError):
481 error.FilteredRepoLookupError):
482 if repo.filtername == 'visible':
482 if repo.filtername == 'visible':
483 msg = _("hidden revision '%s'") % changeid
483 msg = _("hidden revision '%s'") % changeid
484 hint = _('use --hidden to access hidden revisions')
484 hint = _('use --hidden to access hidden revisions')
485 raise error.FilteredRepoLookupError(msg, hint=hint)
485 raise error.FilteredRepoLookupError(msg, hint=hint)
486 msg = _("filtered revision '%s' (not in '%s' subset)")
486 msg = _("filtered revision '%s' (not in '%s' subset)")
487 msg %= (changeid, repo.filtername)
487 msg %= (changeid, repo.filtername)
488 raise error.FilteredRepoLookupError(msg)
488 raise error.FilteredRepoLookupError(msg)
489 except IndexError:
489 except IndexError:
490 pass
490 pass
491 raise error.RepoLookupError(
491 raise error.RepoLookupError(
492 _("unknown revision '%s'") % changeid)
492 _("unknown revision '%s'") % changeid)
493
493
494 def __hash__(self):
494 def __hash__(self):
495 try:
495 try:
496 return hash(self._rev)
496 return hash(self._rev)
497 except AttributeError:
497 except AttributeError:
498 return id(self)
498 return id(self)
499
499
500 def __nonzero__(self):
500 def __nonzero__(self):
501 return self._rev != nullrev
501 return self._rev != nullrev
502
502
503 @propertycache
503 @propertycache
504 def _changeset(self):
504 def _changeset(self):
505 return self._repo.changelog.read(self.rev())
505 return self._repo.changelog.read(self.rev())
506
506
507 @propertycache
507 @propertycache
508 def _manifest(self):
508 def _manifest(self):
509 return self._repo.manifest.read(self._changeset[0])
509 return self._repo.manifest.read(self._changeset[0])
510
510
511 @propertycache
511 @propertycache
512 def _manifestdelta(self):
512 def _manifestdelta(self):
513 return self._repo.manifest.readdelta(self._changeset[0])
513 return self._repo.manifest.readdelta(self._changeset[0])
514
514
515 @propertycache
515 @propertycache
516 def _parents(self):
516 def _parents(self):
517 p = self._repo.changelog.parentrevs(self._rev)
517 p = self._repo.changelog.parentrevs(self._rev)
518 if p[1] == nullrev:
518 if p[1] == nullrev:
519 p = p[:-1]
519 p = p[:-1]
520 return [changectx(self._repo, x) for x in p]
520 return [changectx(self._repo, x) for x in p]
521
521
522 def changeset(self):
522 def changeset(self):
523 return self._changeset
523 return self._changeset
524 def manifestnode(self):
524 def manifestnode(self):
525 return self._changeset[0]
525 return self._changeset[0]
526
526
527 def user(self):
527 def user(self):
528 return self._changeset[1]
528 return self._changeset[1]
529 def date(self):
529 def date(self):
530 return self._changeset[2]
530 return self._changeset[2]
531 def files(self):
531 def files(self):
532 return self._changeset[3]
532 return self._changeset[3]
533 def description(self):
533 def description(self):
534 return self._changeset[4]
534 return self._changeset[4]
535 def branch(self):
535 def branch(self):
536 return encoding.tolocal(self._changeset[5].get("branch"))
536 return encoding.tolocal(self._changeset[5].get("branch"))
537 def closesbranch(self):
537 def closesbranch(self):
538 return 'close' in self._changeset[5]
538 return 'close' in self._changeset[5]
539 def extra(self):
539 def extra(self):
540 return self._changeset[5]
540 return self._changeset[5]
541 def tags(self):
541 def tags(self):
542 return self._repo.nodetags(self._node)
542 return self._repo.nodetags(self._node)
543 def bookmarks(self):
543 def bookmarks(self):
544 return self._repo.nodebookmarks(self._node)
544 return self._repo.nodebookmarks(self._node)
545 def phase(self):
545 def phase(self):
546 return self._repo._phasecache.phase(self._repo, self._rev)
546 return self._repo._phasecache.phase(self._repo, self._rev)
547 def hidden(self):
547 def hidden(self):
548 return self._rev in repoview.filterrevs(self._repo, 'visible')
548 return self._rev in repoview.filterrevs(self._repo, 'visible')
549
549
550 def children(self):
550 def children(self):
551 """return contexts for each child changeset"""
551 """return contexts for each child changeset"""
552 c = self._repo.changelog.children(self._node)
552 c = self._repo.changelog.children(self._node)
553 return [changectx(self._repo, x) for x in c]
553 return [changectx(self._repo, x) for x in c]
554
554
555 def ancestors(self):
555 def ancestors(self):
556 for a in self._repo.changelog.ancestors([self._rev]):
556 for a in self._repo.changelog.ancestors([self._rev]):
557 yield changectx(self._repo, a)
557 yield changectx(self._repo, a)
558
558
559 def descendants(self):
559 def descendants(self):
560 for d in self._repo.changelog.descendants([self._rev]):
560 for d in self._repo.changelog.descendants([self._rev]):
561 yield changectx(self._repo, d)
561 yield changectx(self._repo, d)
562
562
563 def filectx(self, path, fileid=None, filelog=None):
563 def filectx(self, path, fileid=None, filelog=None):
564 """get a file context from this changeset"""
564 """get a file context from this changeset"""
565 if fileid is None:
565 if fileid is None:
566 fileid = self.filenode(path)
566 fileid = self.filenode(path)
567 return filectx(self._repo, path, fileid=fileid,
567 return filectx(self._repo, path, fileid=fileid,
568 changectx=self, filelog=filelog)
568 changectx=self, filelog=filelog)
569
569
570 def ancestor(self, c2, warn=False):
570 def ancestor(self, c2, warn=False):
571 """return the "best" ancestor context of self and c2
571 """return the "best" ancestor context of self and c2
572
572
573 If there are multiple candidates, it will show a message and check
573 If there are multiple candidates, it will show a message and check
574 merge.preferancestor configuration before falling back to the
574 merge.preferancestor configuration before falling back to the
575 revlog ancestor."""
575 revlog ancestor."""
576 # deal with workingctxs
576 # deal with workingctxs
577 n2 = c2._node
577 n2 = c2._node
578 if n2 is None:
578 if n2 is None:
579 n2 = c2._parents[0]._node
579 n2 = c2._parents[0]._node
580 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
580 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
581 if not cahs:
581 if not cahs:
582 anc = nullid
582 anc = nullid
583 elif len(cahs) == 1:
583 elif len(cahs) == 1:
584 anc = cahs[0]
584 anc = cahs[0]
585 else:
585 else:
586 for r in self._repo.ui.configlist('merge', 'preferancestor'):
586 for r in self._repo.ui.configlist('merge', 'preferancestor'):
587 try:
587 try:
588 ctx = changectx(self._repo, r)
588 ctx = changectx(self._repo, r)
589 except error.RepoLookupError:
589 except error.RepoLookupError:
590 continue
590 continue
591 anc = ctx.node()
591 anc = ctx.node()
592 if anc in cahs:
592 if anc in cahs:
593 break
593 break
594 else:
594 else:
595 anc = self._repo.changelog.ancestor(self._node, n2)
595 anc = self._repo.changelog.ancestor(self._node, n2)
596 if warn:
596 if warn:
597 self._repo.ui.status(
597 self._repo.ui.status(
598 (_("note: using %s as ancestor of %s and %s\n") %
598 (_("note: using %s as ancestor of %s and %s\n") %
599 (short(anc), short(self._node), short(n2))) +
599 (short(anc), short(self._node), short(n2))) +
600 ''.join(_(" alternatively, use --config "
600 ''.join(_(" alternatively, use --config "
601 "merge.preferancestor=%s\n") %
601 "merge.preferancestor=%s\n") %
602 short(n) for n in sorted(cahs) if n != anc))
602 short(n) for n in sorted(cahs) if n != anc))
603 return changectx(self._repo, anc)
603 return changectx(self._repo, anc)
604
604
605 def descendant(self, other):
605 def descendant(self, other):
606 """True if other is descendant of this changeset"""
606 """True if other is descendant of this changeset"""
607 return self._repo.changelog.descendant(self._rev, other._rev)
607 return self._repo.changelog.descendant(self._rev, other._rev)
608
608
609 def walk(self, match):
609 def walk(self, match):
610 fset = set(match.files())
610 fset = set(match.files())
611 # for dirstate.walk, files=['.'] means "walk the whole tree".
611 # for dirstate.walk, files=['.'] means "walk the whole tree".
612 # follow that here, too
612 # follow that here, too
613 fset.discard('.')
613 fset.discard('.')
614
614
615 # avoid the entire walk if we're only looking for specific files
615 # avoid the entire walk if we're only looking for specific files
616 if fset and not match.anypats():
616 if fset and not match.anypats():
617 if util.all([fn in self for fn in fset]):
617 if util.all([fn in self for fn in fset]):
618 for fn in sorted(fset):
618 for fn in sorted(fset):
619 if match(fn):
619 if match(fn):
620 yield fn
620 yield fn
621 raise StopIteration
621 raise StopIteration
622
622
623 for fn in self:
623 for fn in self:
624 if fn in fset:
624 if fn in fset:
625 # specified pattern is the exact name
625 # specified pattern is the exact name
626 fset.remove(fn)
626 fset.remove(fn)
627 if match(fn):
627 if match(fn):
628 yield fn
628 yield fn
629 for fn in sorted(fset):
629 for fn in sorted(fset):
630 if fn in self._dirs:
630 if fn in self._dirs:
631 # specified pattern is a directory
631 # specified pattern is a directory
632 continue
632 continue
633 match.bad(fn, _('no such file in rev %s') % self)
633 match.bad(fn, _('no such file in rev %s') % self)
634
634
635 def matches(self, match):
635 def matches(self, match):
636 return self.walk(match)
636 return self.walk(match)
637
637
638 class basefilectx(object):
638 class basefilectx(object):
639 """A filecontext object represents the common logic for its children:
639 """A filecontext object represents the common logic for its children:
640 filectx: read-only access to a filerevision that is already present
640 filectx: read-only access to a filerevision that is already present
641 in the repo,
641 in the repo,
642 workingfilectx: a filecontext that represents files from the working
642 workingfilectx: a filecontext that represents files from the working
643 directory,
643 directory,
644 memfilectx: a filecontext that represents files in-memory."""
644 memfilectx: a filecontext that represents files in-memory."""
645 def __new__(cls, repo, path, *args, **kwargs):
645 def __new__(cls, repo, path, *args, **kwargs):
646 return super(basefilectx, cls).__new__(cls)
646 return super(basefilectx, cls).__new__(cls)
647
647
648 @propertycache
648 @propertycache
649 def _filelog(self):
649 def _filelog(self):
650 return self._repo.file(self._path)
650 return self._repo.file(self._path)
651
651
652 @propertycache
652 @propertycache
653 def _changeid(self):
653 def _changeid(self):
654 if '_changeid' in self.__dict__:
654 if '_changeid' in self.__dict__:
655 return self._changeid
655 return self._changeid
656 elif '_changectx' in self.__dict__:
656 elif '_changectx' in self.__dict__:
657 return self._changectx.rev()
657 return self._changectx.rev()
658 else:
658 else:
659 return self._filelog.linkrev(self._filerev)
659 return self._filelog.linkrev(self._filerev)
660
660
661 @propertycache
661 @propertycache
662 def _filenode(self):
662 def _filenode(self):
663 if '_fileid' in self.__dict__:
663 if '_fileid' in self.__dict__:
664 return self._filelog.lookup(self._fileid)
664 return self._filelog.lookup(self._fileid)
665 else:
665 else:
666 return self._changectx.filenode(self._path)
666 return self._changectx.filenode(self._path)
667
667
668 @propertycache
668 @propertycache
669 def _filerev(self):
669 def _filerev(self):
670 return self._filelog.rev(self._filenode)
670 return self._filelog.rev(self._filenode)
671
671
672 @propertycache
672 @propertycache
673 def _repopath(self):
673 def _repopath(self):
674 return self._path
674 return self._path
675
675
676 def __nonzero__(self):
676 def __nonzero__(self):
677 try:
677 try:
678 self._filenode
678 self._filenode
679 return True
679 return True
680 except error.LookupError:
680 except error.LookupError:
681 # file is missing
681 # file is missing
682 return False
682 return False
683
683
684 def __str__(self):
684 def __str__(self):
685 return "%s@%s" % (self.path(), self._changectx)
685 return "%s@%s" % (self.path(), self._changectx)
686
686
687 def __repr__(self):
687 def __repr__(self):
688 return "<%s %s>" % (type(self).__name__, str(self))
688 return "<%s %s>" % (type(self).__name__, str(self))
689
689
690 def __hash__(self):
690 def __hash__(self):
691 try:
691 try:
692 return hash((self._path, self._filenode))
692 return hash((self._path, self._filenode))
693 except AttributeError:
693 except AttributeError:
694 return id(self)
694 return id(self)
695
695
696 def __eq__(self, other):
696 def __eq__(self, other):
697 try:
697 try:
698 return (type(self) == type(other) and self._path == other._path
698 return (type(self) == type(other) and self._path == other._path
699 and self._filenode == other._filenode)
699 and self._filenode == other._filenode)
700 except AttributeError:
700 except AttributeError:
701 return False
701 return False
702
702
703 def __ne__(self, other):
703 def __ne__(self, other):
704 return not (self == other)
704 return not (self == other)
705
705
706 def filerev(self):
706 def filerev(self):
707 return self._filerev
707 return self._filerev
708 def filenode(self):
708 def filenode(self):
709 return self._filenode
709 return self._filenode
710 def flags(self):
710 def flags(self):
711 return self._changectx.flags(self._path)
711 return self._changectx.flags(self._path)
712 def filelog(self):
712 def filelog(self):
713 return self._filelog
713 return self._filelog
714 def rev(self):
714 def rev(self):
715 return self._changeid
715 return self._changeid
716 def linkrev(self):
716 def linkrev(self):
717 return self._filelog.linkrev(self._filerev)
717 return self._filelog.linkrev(self._filerev)
718 def node(self):
718 def node(self):
719 return self._changectx.node()
719 return self._changectx.node()
720 def hex(self):
720 def hex(self):
721 return self._changectx.hex()
721 return self._changectx.hex()
722 def user(self):
722 def user(self):
723 return self._changectx.user()
723 return self._changectx.user()
724 def date(self):
724 def date(self):
725 return self._changectx.date()
725 return self._changectx.date()
726 def files(self):
726 def files(self):
727 return self._changectx.files()
727 return self._changectx.files()
728 def description(self):
728 def description(self):
729 return self._changectx.description()
729 return self._changectx.description()
730 def branch(self):
730 def branch(self):
731 return self._changectx.branch()
731 return self._changectx.branch()
732 def extra(self):
732 def extra(self):
733 return self._changectx.extra()
733 return self._changectx.extra()
734 def phase(self):
734 def phase(self):
735 return self._changectx.phase()
735 return self._changectx.phase()
736 def phasestr(self):
736 def phasestr(self):
737 return self._changectx.phasestr()
737 return self._changectx.phasestr()
738 def manifest(self):
738 def manifest(self):
739 return self._changectx.manifest()
739 return self._changectx.manifest()
740 def changectx(self):
740 def changectx(self):
741 return self._changectx
741 return self._changectx
742
742
743 def path(self):
743 def path(self):
744 return self._path
744 return self._path
745
745
746 def isbinary(self):
746 def isbinary(self):
747 try:
747 try:
748 return util.binary(self.data())
748 return util.binary(self.data())
749 except IOError:
749 except IOError:
750 return False
750 return False
751 def isexec(self):
751 def isexec(self):
752 return 'x' in self.flags()
752 return 'x' in self.flags()
753 def islink(self):
753 def islink(self):
754 return 'l' in self.flags()
754 return 'l' in self.flags()
755
755
756 def cmp(self, fctx):
756 def cmp(self, fctx):
757 """compare with other file context
757 """compare with other file context
758
758
759 returns True if different than fctx.
759 returns True if different than fctx.
760 """
760 """
761 if (fctx._filerev is None
761 if (fctx._filerev is None
762 and (self._repo._encodefilterpats
762 and (self._repo._encodefilterpats
763 # if file data starts with '\1\n', empty metadata block is
763 # if file data starts with '\1\n', empty metadata block is
764 # prepended, which adds 4 bytes to filelog.size().
764 # prepended, which adds 4 bytes to filelog.size().
765 or self.size() - 4 == fctx.size())
765 or self.size() - 4 == fctx.size())
766 or self.size() == fctx.size()):
766 or self.size() == fctx.size()):
767 return self._filelog.cmp(self._filenode, fctx.data())
767 return self._filelog.cmp(self._filenode, fctx.data())
768
768
769 return True
769 return True
770
770
771 def introrev(self):
771 def introrev(self):
772 """return the rev of the changeset which introduced this file revision
772 """return the rev of the changeset which introduced this file revision
773
773
774 This method is different from linkrev because it take into account the
774 This method is different from linkrev because it take into account the
775 changeset the filectx was created from. It ensures the returned
775 changeset the filectx was created from. It ensures the returned
776 revision is one of its ancestors. This prevents bugs from
776 revision is one of its ancestors. This prevents bugs from
777 'linkrev-shadowing' when a file revision is used by multiple
777 'linkrev-shadowing' when a file revision is used by multiple
778 changesets.
778 changesets.
779 """
779 """
780 lkr = self.linkrev()
780 lkr = self.linkrev()
781 attrs = vars(self)
781 attrs = vars(self)
782 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
782 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
783 if noctx or self.rev() == lkr:
783 if noctx or self.rev() == lkr:
784 return self.linkrev()
784 return self.linkrev()
785 return _adjustlinkrev(self._repo, self._path, self._filelog,
785 return _adjustlinkrev(self._repo, self._path, self._filelog,
786 self._filenode, self.rev(), inclusive=True)
786 self._filenode, self.rev(), inclusive=True)
787
787
788 def parents(self):
788 def parents(self):
789 _path = self._path
789 _path = self._path
790 fl = self._filelog
790 fl = self._filelog
791 parents = self._filelog.parents(self._filenode)
791 parents = self._filelog.parents(self._filenode)
792 pl = [(_path, node, fl) for node in parents if node != nullid]
792 pl = [(_path, node, fl) for node in parents if node != nullid]
793
793
794 r = fl.renamed(self._filenode)
794 r = fl.renamed(self._filenode)
795 if r:
795 if r:
796 # - In the simple rename case, both parent are nullid, pl is empty.
796 # - In the simple rename case, both parent are nullid, pl is empty.
797 # - In case of merge, only one of the parent is null id and should
797 # - In case of merge, only one of the parent is null id and should
798 # be replaced with the rename information. This parent is -always-
798 # be replaced with the rename information. This parent is -always-
799 # the first one.
799 # the first one.
800 #
800 #
801 # As null id have alway been filtered out in the previous list
801 # As null id have alway been filtered out in the previous list
802 # comprehension, inserting to 0 will always result in "replacing
802 # comprehension, inserting to 0 will always result in "replacing
803 # first nullid parent with rename information.
803 # first nullid parent with rename information.
804 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
804 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
805
805
806 ret = []
806 ret = []
807 for path, fnode, l in pl:
807 for path, fnode, l in pl:
808 if '_changeid' in vars(self) or '_changectx' in vars(self):
808 if '_changeid' in vars(self) or '_changectx' in vars(self):
809 # If self is associated with a changeset (probably explicitly
809 # If self is associated with a changeset (probably explicitly
810 # fed), ensure the created filectx is associated with a
810 # fed), ensure the created filectx is associated with a
811 # changeset that is an ancestor of self.changectx.
811 # changeset that is an ancestor of self.changectx.
812 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
812 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
813 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
813 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
814 changeid=rev)
814 changeid=rev)
815 else:
815 else:
816 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
816 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
817 ret.append(fctx)
817 ret.append(fctx)
818 return ret
818 return ret
819
819
820 def p1(self):
820 def p1(self):
821 return self.parents()[0]
821 return self.parents()[0]
822
822
823 def p2(self):
823 def p2(self):
824 p = self.parents()
824 p = self.parents()
825 if len(p) == 2:
825 if len(p) == 2:
826 return p[1]
826 return p[1]
827 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
827 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
828
828
829 def annotate(self, follow=False, linenumber=None, diffopts=None):
829 def annotate(self, follow=False, linenumber=None, diffopts=None):
830 '''returns a list of tuples of (ctx, line) for each line
830 '''returns a list of tuples of (ctx, line) for each line
831 in the file, where ctx is the filectx of the node where
831 in the file, where ctx is the filectx of the node where
832 that line was last changed.
832 that line was last changed.
833 This returns tuples of ((ctx, linenumber), line) for each line,
833 This returns tuples of ((ctx, linenumber), line) for each line,
834 if "linenumber" parameter is NOT "None".
834 if "linenumber" parameter is NOT "None".
835 In such tuples, linenumber means one at the first appearance
835 In such tuples, linenumber means one at the first appearance
836 in the managed file.
836 in the managed file.
837 To reduce annotation cost,
837 To reduce annotation cost,
838 this returns fixed value(False is used) as linenumber,
838 this returns fixed value(False is used) as linenumber,
839 if "linenumber" parameter is "False".'''
839 if "linenumber" parameter is "False".'''
840
840
841 if linenumber is None:
841 if linenumber is None:
842 def decorate(text, rev):
842 def decorate(text, rev):
843 return ([rev] * len(text.splitlines()), text)
843 return ([rev] * len(text.splitlines()), text)
844 elif linenumber:
844 elif linenumber:
845 def decorate(text, rev):
845 def decorate(text, rev):
846 size = len(text.splitlines())
846 size = len(text.splitlines())
847 return ([(rev, i) for i in xrange(1, size + 1)], text)
847 return ([(rev, i) for i in xrange(1, size + 1)], text)
848 else:
848 else:
849 def decorate(text, rev):
849 def decorate(text, rev):
850 return ([(rev, False)] * len(text.splitlines()), text)
850 return ([(rev, False)] * len(text.splitlines()), text)
851
851
852 def pair(parent, child):
852 def pair(parent, child):
853 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
853 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
854 refine=True)
854 refine=True)
855 for (a1, a2, b1, b2), t in blocks:
855 for (a1, a2, b1, b2), t in blocks:
856 # Changed blocks ('!') or blocks made only of blank lines ('~')
856 # Changed blocks ('!') or blocks made only of blank lines ('~')
857 # belong to the child.
857 # belong to the child.
858 if t == '=':
858 if t == '=':
859 child[0][b1:b2] = parent[0][a1:a2]
859 child[0][b1:b2] = parent[0][a1:a2]
860 return child
860 return child
861
861
862 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
862 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
863
863
864 def parents(f):
864 def parents(f):
865 pl = f.parents()
865 pl = f.parents()
866
866
867 # Don't return renamed parents if we aren't following.
867 # Don't return renamed parents if we aren't following.
868 if not follow:
868 if not follow:
869 pl = [p for p in pl if p.path() == f.path()]
869 pl = [p for p in pl if p.path() == f.path()]
870
870
871 # renamed filectx won't have a filelog yet, so set it
871 # renamed filectx won't have a filelog yet, so set it
872 # from the cache to save time
872 # from the cache to save time
873 for p in pl:
873 for p in pl:
874 if not '_filelog' in p.__dict__:
874 if not '_filelog' in p.__dict__:
875 p._filelog = getlog(p.path())
875 p._filelog = getlog(p.path())
876
876
877 return pl
877 return pl
878
878
879 # use linkrev to find the first changeset where self appeared
879 # use linkrev to find the first changeset where self appeared
880 base = self
880 base = self
881 introrev = self.introrev()
881 introrev = self.introrev()
882 if self.rev() != introrev:
882 if self.rev() != introrev:
883 base = filectx(self._repo, self._path, filelog=self.filelog(),
883 base = filectx(self._repo, self._path, filelog=self.filelog(),
884 fileid=self.filenode(), changeid=introrev)
884 fileid=self.filenode(), changeid=introrev)
885
885
886 # This algorithm would prefer to be recursive, but Python is a
886 # This algorithm would prefer to be recursive, but Python is a
887 # bit recursion-hostile. Instead we do an iterative
887 # bit recursion-hostile. Instead we do an iterative
888 # depth-first search.
888 # depth-first search.
889
889
890 visit = [base]
890 visit = [base]
891 hist = {}
891 hist = {}
892 pcache = {}
892 pcache = {}
893 needed = {base: 1}
893 needed = {base: 1}
894 while visit:
894 while visit:
895 f = visit[-1]
895 f = visit[-1]
896 pcached = f in pcache
896 pcached = f in pcache
897 if not pcached:
897 if not pcached:
898 pcache[f] = parents(f)
898 pcache[f] = parents(f)
899
899
900 ready = True
900 ready = True
901 pl = pcache[f]
901 pl = pcache[f]
902 for p in pl:
902 for p in pl:
903 if p not in hist:
903 if p not in hist:
904 ready = False
904 ready = False
905 visit.append(p)
905 visit.append(p)
906 if not pcached:
906 if not pcached:
907 needed[p] = needed.get(p, 0) + 1
907 needed[p] = needed.get(p, 0) + 1
908 if ready:
908 if ready:
909 visit.pop()
909 visit.pop()
910 reusable = f in hist
910 reusable = f in hist
911 if reusable:
911 if reusable:
912 curr = hist[f]
912 curr = hist[f]
913 else:
913 else:
914 curr = decorate(f.data(), f)
914 curr = decorate(f.data(), f)
915 for p in pl:
915 for p in pl:
916 if not reusable:
916 if not reusable:
917 curr = pair(hist[p], curr)
917 curr = pair(hist[p], curr)
918 if needed[p] == 1:
918 if needed[p] == 1:
919 del hist[p]
919 del hist[p]
920 del needed[p]
920 del needed[p]
921 else:
921 else:
922 needed[p] -= 1
922 needed[p] -= 1
923
923
924 hist[f] = curr
924 hist[f] = curr
925 pcache[f] = []
925 pcache[f] = []
926
926
927 return zip(hist[base][0], hist[base][1].splitlines(True))
927 return zip(hist[base][0], hist[base][1].splitlines(True))
928
928
929 def ancestors(self, followfirst=False):
929 def ancestors(self, followfirst=False):
930 visit = {}
930 visit = {}
931 c = self
931 c = self
932 cut = followfirst and 1 or None
932 cut = followfirst and 1 or None
933 while True:
933 while True:
934 for parent in c.parents()[:cut]:
934 for parent in c.parents()[:cut]:
935 visit[(parent.rev(), parent.node())] = parent
935 visit[(parent.rev(), parent.node())] = parent
936 if not visit:
936 if not visit:
937 break
937 break
938 c = visit.pop(max(visit))
938 c = visit.pop(max(visit))
939 yield c
939 yield c
940
940
941 class filectx(basefilectx):
941 class filectx(basefilectx):
942 """A filecontext object makes access to data related to a particular
942 """A filecontext object makes access to data related to a particular
943 filerevision convenient."""
943 filerevision convenient."""
944 def __init__(self, repo, path, changeid=None, fileid=None,
944 def __init__(self, repo, path, changeid=None, fileid=None,
945 filelog=None, changectx=None):
945 filelog=None, changectx=None):
946 """changeid can be a changeset revision, node, or tag.
946 """changeid can be a changeset revision, node, or tag.
947 fileid can be a file revision or node."""
947 fileid can be a file revision or node."""
948 self._repo = repo
948 self._repo = repo
949 self._path = path
949 self._path = path
950
950
951 assert (changeid is not None
951 assert (changeid is not None
952 or fileid is not None
952 or fileid is not None
953 or changectx is not None), \
953 or changectx is not None), \
954 ("bad args: changeid=%r, fileid=%r, changectx=%r"
954 ("bad args: changeid=%r, fileid=%r, changectx=%r"
955 % (changeid, fileid, changectx))
955 % (changeid, fileid, changectx))
956
956
957 if filelog is not None:
957 if filelog is not None:
958 self._filelog = filelog
958 self._filelog = filelog
959
959
960 if changeid is not None:
960 if changeid is not None:
961 self._changeid = changeid
961 self._changeid = changeid
962 if changectx is not None:
962 if changectx is not None:
963 self._changectx = changectx
963 self._changectx = changectx
964 if fileid is not None:
964 if fileid is not None:
965 self._fileid = fileid
965 self._fileid = fileid
966
966
967 @propertycache
967 @propertycache
968 def _changectx(self):
968 def _changectx(self):
969 try:
969 try:
970 return changectx(self._repo, self._changeid)
970 return changectx(self._repo, self._changeid)
971 except error.FilteredRepoLookupError:
971 except error.FilteredRepoLookupError:
972 # Linkrev may point to any revision in the repository. When the
972 # Linkrev may point to any revision in the repository. When the
973 # repository is filtered this may lead to `filectx` trying to build
973 # repository is filtered this may lead to `filectx` trying to build
974 # `changectx` for filtered revision. In such case we fallback to
974 # `changectx` for filtered revision. In such case we fallback to
975 # creating `changectx` on the unfiltered version of the reposition.
975 # creating `changectx` on the unfiltered version of the reposition.
976 # This fallback should not be an issue because `changectx` from
976 # This fallback should not be an issue because `changectx` from
977 # `filectx` are not used in complex operations that care about
977 # `filectx` are not used in complex operations that care about
978 # filtering.
978 # filtering.
979 #
979 #
980 # This fallback is a cheap and dirty fix that prevent several
980 # This fallback is a cheap and dirty fix that prevent several
981 # crashes. It does not ensure the behavior is correct. However the
981 # crashes. It does not ensure the behavior is correct. However the
982 # behavior was not correct before filtering either and "incorrect
982 # behavior was not correct before filtering either and "incorrect
983 # behavior" is seen as better as "crash"
983 # behavior" is seen as better as "crash"
984 #
984 #
985 # Linkrevs have several serious troubles with filtering that are
985 # Linkrevs have several serious troubles with filtering that are
986 # complicated to solve. Proper handling of the issue here should be
986 # complicated to solve. Proper handling of the issue here should be
987 # considered when solving linkrev issue are on the table.
987 # considered when solving linkrev issue are on the table.
988 return changectx(self._repo.unfiltered(), self._changeid)
988 return changectx(self._repo.unfiltered(), self._changeid)
989
989
990 def filectx(self, fileid):
990 def filectx(self, fileid):
991 '''opens an arbitrary revision of the file without
991 '''opens an arbitrary revision of the file without
992 opening a new filelog'''
992 opening a new filelog'''
993 return filectx(self._repo, self._path, fileid=fileid,
993 return filectx(self._repo, self._path, fileid=fileid,
994 filelog=self._filelog)
994 filelog=self._filelog)
995
995
996 def data(self):
996 def data(self):
997 try:
997 try:
998 return self._filelog.read(self._filenode)
998 return self._filelog.read(self._filenode)
999 except error.CensoredNodeError:
999 except error.CensoredNodeError:
1000 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1000 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1001 return ""
1001 return ""
1002 raise util.Abort(_("censored node: %s") % short(self._filenode),
1002 raise util.Abort(_("censored node: %s") % short(self._filenode),
1003 hint=_("set censor.policy to ignore errors"))
1003 hint=_("set censor.policy to ignore errors"))
1004
1004
1005 def size(self):
1005 def size(self):
1006 return self._filelog.size(self._filerev)
1006 return self._filelog.size(self._filerev)
1007
1007
1008 def renamed(self):
1008 def renamed(self):
1009 """check if file was actually renamed in this changeset revision
1009 """check if file was actually renamed in this changeset revision
1010
1010
1011 If rename logged in file revision, we report copy for changeset only
1011 If rename logged in file revision, we report copy for changeset only
1012 if file revisions linkrev points back to the changeset in question
1012 if file revisions linkrev points back to the changeset in question
1013 or both changeset parents contain different file revisions.
1013 or both changeset parents contain different file revisions.
1014 """
1014 """
1015
1015
1016 renamed = self._filelog.renamed(self._filenode)
1016 renamed = self._filelog.renamed(self._filenode)
1017 if not renamed:
1017 if not renamed:
1018 return renamed
1018 return renamed
1019
1019
1020 if self.rev() == self.linkrev():
1020 if self.rev() == self.linkrev():
1021 return renamed
1021 return renamed
1022
1022
1023 name = self.path()
1023 name = self.path()
1024 fnode = self._filenode
1024 fnode = self._filenode
1025 for p in self._changectx.parents():
1025 for p in self._changectx.parents():
1026 try:
1026 try:
1027 if fnode == p.filenode(name):
1027 if fnode == p.filenode(name):
1028 return None
1028 return None
1029 except error.LookupError:
1029 except error.LookupError:
1030 pass
1030 pass
1031 return renamed
1031 return renamed
1032
1032
1033 def children(self):
1033 def children(self):
1034 # hard for renames
1034 # hard for renames
1035 c = self._filelog.children(self._filenode)
1035 c = self._filelog.children(self._filenode)
1036 return [filectx(self._repo, self._path, fileid=x,
1036 return [filectx(self._repo, self._path, fileid=x,
1037 filelog=self._filelog) for x in c]
1037 filelog=self._filelog) for x in c]
1038
1038
1039 class committablectx(basectx):
1039 class committablectx(basectx):
1040 """A committablectx object provides common functionality for a context that
1040 """A committablectx object provides common functionality for a context that
1041 wants the ability to commit, e.g. workingctx or memctx."""
1041 wants the ability to commit, e.g. workingctx or memctx."""
1042 def __init__(self, repo, text="", user=None, date=None, extra=None,
1042 def __init__(self, repo, text="", user=None, date=None, extra=None,
1043 changes=None):
1043 changes=None):
1044 self._repo = repo
1044 self._repo = repo
1045 self._rev = None
1045 self._rev = None
1046 self._node = None
1046 self._node = None
1047 self._text = text
1047 self._text = text
1048 if date:
1048 if date:
1049 self._date = util.parsedate(date)
1049 self._date = util.parsedate(date)
1050 if user:
1050 if user:
1051 self._user = user
1051 self._user = user
1052 if changes:
1052 if changes:
1053 self._status = changes
1053 self._status = changes
1054
1054
1055 self._extra = {}
1055 self._extra = {}
1056 if extra:
1056 if extra:
1057 self._extra = extra.copy()
1057 self._extra = extra.copy()
1058 if 'branch' not in self._extra:
1058 if 'branch' not in self._extra:
1059 try:
1059 try:
1060 branch = encoding.fromlocal(self._repo.dirstate.branch())
1060 branch = encoding.fromlocal(self._repo.dirstate.branch())
1061 except UnicodeDecodeError:
1061 except UnicodeDecodeError:
1062 raise util.Abort(_('branch name not in UTF-8!'))
1062 raise util.Abort(_('branch name not in UTF-8!'))
1063 self._extra['branch'] = branch
1063 self._extra['branch'] = branch
1064 if self._extra['branch'] == '':
1064 if self._extra['branch'] == '':
1065 self._extra['branch'] = 'default'
1065 self._extra['branch'] = 'default'
1066
1066
1067 def __str__(self):
1067 def __str__(self):
1068 return str(self._parents[0]) + "+"
1068 return str(self._parents[0]) + "+"
1069
1069
1070 def __nonzero__(self):
1070 def __nonzero__(self):
1071 return True
1071 return True
1072
1072
1073 def _buildflagfunc(self):
1073 def _buildflagfunc(self):
1074 # Create a fallback function for getting file flags when the
1074 # Create a fallback function for getting file flags when the
1075 # filesystem doesn't support them
1075 # filesystem doesn't support them
1076
1076
1077 copiesget = self._repo.dirstate.copies().get
1077 copiesget = self._repo.dirstate.copies().get
1078
1078
1079 if len(self._parents) < 2:
1079 if len(self._parents) < 2:
1080 # when we have one parent, it's easy: copy from parent
1080 # when we have one parent, it's easy: copy from parent
1081 man = self._parents[0].manifest()
1081 man = self._parents[0].manifest()
1082 def func(f):
1082 def func(f):
1083 f = copiesget(f, f)
1083 f = copiesget(f, f)
1084 return man.flags(f)
1084 return man.flags(f)
1085 else:
1085 else:
1086 # merges are tricky: we try to reconstruct the unstored
1086 # merges are tricky: we try to reconstruct the unstored
1087 # result from the merge (issue1802)
1087 # result from the merge (issue1802)
1088 p1, p2 = self._parents
1088 p1, p2 = self._parents
1089 pa = p1.ancestor(p2)
1089 pa = p1.ancestor(p2)
1090 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1090 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1091
1091
1092 def func(f):
1092 def func(f):
1093 f = copiesget(f, f) # may be wrong for merges with copies
1093 f = copiesget(f, f) # may be wrong for merges with copies
1094 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1094 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1095 if fl1 == fl2:
1095 if fl1 == fl2:
1096 return fl1
1096 return fl1
1097 if fl1 == fla:
1097 if fl1 == fla:
1098 return fl2
1098 return fl2
1099 if fl2 == fla:
1099 if fl2 == fla:
1100 return fl1
1100 return fl1
1101 return '' # punt for conflicts
1101 return '' # punt for conflicts
1102
1102
1103 return func
1103 return func
1104
1104
1105 @propertycache
1105 @propertycache
1106 def _flagfunc(self):
1106 def _flagfunc(self):
1107 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1107 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1108
1108
1109 @propertycache
1109 @propertycache
1110 def _manifest(self):
1110 def _manifest(self):
1111 """generate a manifest corresponding to the values in self._status
1111 """generate a manifest corresponding to the values in self._status
1112
1112
1113 This reuse the file nodeid from parent, but we append an extra letter
1113 This reuse the file nodeid from parent, but we append an extra letter
1114 when modified. Modified files get an extra 'm' while added files get
1114 when modified. Modified files get an extra 'm' while added files get
1115 an extra 'a'. This is used by manifests merge to see that files
1115 an extra 'a'. This is used by manifests merge to see that files
1116 are different and by update logic to avoid deleting newly added files.
1116 are different and by update logic to avoid deleting newly added files.
1117 """
1117 """
1118
1118
1119 man1 = self._parents[0].manifest()
1119 man1 = self._parents[0].manifest()
1120 man = man1.copy()
1120 man = man1.copy()
1121 if len(self._parents) > 1:
1121 if len(self._parents) > 1:
1122 man2 = self.p2().manifest()
1122 man2 = self.p2().manifest()
1123 def getman(f):
1123 def getman(f):
1124 if f in man1:
1124 if f in man1:
1125 return man1
1125 return man1
1126 return man2
1126 return man2
1127 else:
1127 else:
1128 getman = lambda f: man1
1128 getman = lambda f: man1
1129
1129
1130 copied = self._repo.dirstate.copies()
1130 copied = self._repo.dirstate.copies()
1131 ff = self._flagfunc
1131 ff = self._flagfunc
1132 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1132 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1133 for f in l:
1133 for f in l:
1134 orig = copied.get(f, f)
1134 orig = copied.get(f, f)
1135 man[f] = getman(orig).get(orig, nullid) + i
1135 man[f] = getman(orig).get(orig, nullid) + i
1136 try:
1136 try:
1137 man.setflag(f, ff(f))
1137 man.setflag(f, ff(f))
1138 except OSError:
1138 except OSError:
1139 pass
1139 pass
1140
1140
1141 for f in self._status.deleted + self._status.removed:
1141 for f in self._status.deleted + self._status.removed:
1142 if f in man:
1142 if f in man:
1143 del man[f]
1143 del man[f]
1144
1144
1145 return man
1145 return man
1146
1146
1147 @propertycache
1147 @propertycache
1148 def _status(self):
1148 def _status(self):
1149 return self._repo.status()
1149 return self._repo.status()
1150
1150
1151 @propertycache
1151 @propertycache
1152 def _user(self):
1152 def _user(self):
1153 return self._repo.ui.username()
1153 return self._repo.ui.username()
1154
1154
1155 @propertycache
1155 @propertycache
1156 def _date(self):
1156 def _date(self):
1157 return util.makedate()
1157 return util.makedate()
1158
1158
1159 def subrev(self, subpath):
1159 def subrev(self, subpath):
1160 return None
1160 return None
1161
1161
1162 def user(self):
1162 def user(self):
1163 return self._user or self._repo.ui.username()
1163 return self._user or self._repo.ui.username()
1164 def date(self):
1164 def date(self):
1165 return self._date
1165 return self._date
1166 def description(self):
1166 def description(self):
1167 return self._text
1167 return self._text
1168 def files(self):
1168 def files(self):
1169 return sorted(self._status.modified + self._status.added +
1169 return sorted(self._status.modified + self._status.added +
1170 self._status.removed)
1170 self._status.removed)
1171
1171
1172 def modified(self):
1172 def modified(self):
1173 return self._status.modified
1173 return self._status.modified
1174 def added(self):
1174 def added(self):
1175 return self._status.added
1175 return self._status.added
1176 def removed(self):
1176 def removed(self):
1177 return self._status.removed
1177 return self._status.removed
1178 def deleted(self):
1178 def deleted(self):
1179 return self._status.deleted
1179 return self._status.deleted
1180 def branch(self):
1180 def branch(self):
1181 return encoding.tolocal(self._extra['branch'])
1181 return encoding.tolocal(self._extra['branch'])
1182 def closesbranch(self):
1182 def closesbranch(self):
1183 return 'close' in self._extra
1183 return 'close' in self._extra
1184 def extra(self):
1184 def extra(self):
1185 return self._extra
1185 return self._extra
1186
1186
1187 def tags(self):
1187 def tags(self):
1188 t = []
1188 t = []
1189 for p in self.parents():
1189 for p in self.parents():
1190 t.extend(p.tags())
1190 t.extend(p.tags())
1191 return t
1191 return t
1192
1192
1193 def bookmarks(self):
1193 def bookmarks(self):
1194 b = []
1194 b = []
1195 for p in self.parents():
1195 for p in self.parents():
1196 b.extend(p.bookmarks())
1196 b.extend(p.bookmarks())
1197 return b
1197 return b
1198
1198
1199 def phase(self):
1199 def phase(self):
1200 phase = phases.draft # default phase to draft
1200 phase = phases.draft # default phase to draft
1201 for p in self.parents():
1201 for p in self.parents():
1202 phase = max(phase, p.phase())
1202 phase = max(phase, p.phase())
1203 return phase
1203 return phase
1204
1204
1205 def hidden(self):
1205 def hidden(self):
1206 return False
1206 return False
1207
1207
1208 def children(self):
1208 def children(self):
1209 return []
1209 return []
1210
1210
1211 def flags(self, path):
1211 def flags(self, path):
1212 if '_manifest' in self.__dict__:
1212 if '_manifest' in self.__dict__:
1213 try:
1213 try:
1214 return self._manifest.flags(path)
1214 return self._manifest.flags(path)
1215 except KeyError:
1215 except KeyError:
1216 return ''
1216 return ''
1217
1217
1218 try:
1218 try:
1219 return self._flagfunc(path)
1219 return self._flagfunc(path)
1220 except OSError:
1220 except OSError:
1221 return ''
1221 return ''
1222
1222
1223 def ancestor(self, c2):
1223 def ancestor(self, c2):
1224 """return the "best" ancestor context of self and c2"""
1224 """return the "best" ancestor context of self and c2"""
1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1226
1226
1227 def walk(self, match):
1227 def walk(self, match):
1228 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1228 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1229 True, False))
1229 True, False))
1230
1230
1231 def matches(self, match):
1231 def matches(self, match):
1232 return sorted(self._repo.dirstate.matches(match))
1232 return sorted(self._repo.dirstate.matches(match))
1233
1233
1234 def ancestors(self):
1234 def ancestors(self):
1235 for p in self._parents:
1235 for p in self._parents:
1236 yield p
1236 yield p
1237 for a in self._repo.changelog.ancestors(
1237 for a in self._repo.changelog.ancestors(
1238 [p.rev() for p in self._parents]):
1238 [p.rev() for p in self._parents]):
1239 yield changectx(self._repo, a)
1239 yield changectx(self._repo, a)
1240
1240
1241 def markcommitted(self, node):
1241 def markcommitted(self, node):
1242 """Perform post-commit cleanup necessary after committing this ctx
1242 """Perform post-commit cleanup necessary after committing this ctx
1243
1243
1244 Specifically, this updates backing stores this working context
1244 Specifically, this updates backing stores this working context
1245 wraps to reflect the fact that the changes reflected by this
1245 wraps to reflect the fact that the changes reflected by this
1246 workingctx have been committed. For example, it marks
1246 workingctx have been committed. For example, it marks
1247 modified and added files as normal in the dirstate.
1247 modified and added files as normal in the dirstate.
1248
1248
1249 """
1249 """
1250
1250
1251 self._repo.dirstate.beginparentchange()
1251 self._repo.dirstate.beginparentchange()
1252 for f in self.modified() + self.added():
1252 for f in self.modified() + self.added():
1253 self._repo.dirstate.normal(f)
1253 self._repo.dirstate.normal(f)
1254 for f in self.removed():
1254 for f in self.removed():
1255 self._repo.dirstate.drop(f)
1255 self._repo.dirstate.drop(f)
1256 self._repo.dirstate.setparents(node)
1256 self._repo.dirstate.setparents(node)
1257 self._repo.dirstate.endparentchange()
1257 self._repo.dirstate.endparentchange()
1258
1258
1259 def dirs(self):
1259 def dirs(self):
1260 return self._repo.dirstate.dirs()
1260 return self._repo.dirstate.dirs()
1261
1261
1262 class workingctx(committablectx):
1262 class workingctx(committablectx):
1263 """A workingctx object makes access to data related to
1263 """A workingctx object makes access to data related to
1264 the current working directory convenient.
1264 the current working directory convenient.
1265 date - any valid date string or (unixtime, offset), or None.
1265 date - any valid date string or (unixtime, offset), or None.
1266 user - username string, or None.
1266 user - username string, or None.
1267 extra - a dictionary of extra values, or None.
1267 extra - a dictionary of extra values, or None.
1268 changes - a list of file lists as returned by localrepo.status()
1268 changes - a list of file lists as returned by localrepo.status()
1269 or None to use the repository status.
1269 or None to use the repository status.
1270 """
1270 """
1271 def __init__(self, repo, text="", user=None, date=None, extra=None,
1271 def __init__(self, repo, text="", user=None, date=None, extra=None,
1272 changes=None):
1272 changes=None):
1273 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1273 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1274
1274
1275 def __iter__(self):
1275 def __iter__(self):
1276 d = self._repo.dirstate
1276 d = self._repo.dirstate
1277 for f in d:
1277 for f in d:
1278 if d[f] != 'r':
1278 if d[f] != 'r':
1279 yield f
1279 yield f
1280
1280
1281 def __contains__(self, key):
1281 def __contains__(self, key):
1282 return self._repo.dirstate[key] not in "?r"
1282 return self._repo.dirstate[key] not in "?r"
1283
1283
1284 @propertycache
1284 @propertycache
1285 def _parents(self):
1285 def _parents(self):
1286 p = self._repo.dirstate.parents()
1286 p = self._repo.dirstate.parents()
1287 if p[1] == nullid:
1287 if p[1] == nullid:
1288 p = p[:-1]
1288 p = p[:-1]
1289 return [changectx(self._repo, x) for x in p]
1289 return [changectx(self._repo, x) for x in p]
1290
1290
1291 def filectx(self, path, filelog=None):
1291 def filectx(self, path, filelog=None):
1292 """get a file context from the working directory"""
1292 """get a file context from the working directory"""
1293 return workingfilectx(self._repo, path, workingctx=self,
1293 return workingfilectx(self._repo, path, workingctx=self,
1294 filelog=filelog)
1294 filelog=filelog)
1295
1295
1296 def dirty(self, missing=False, merge=True, branch=True):
1296 def dirty(self, missing=False, merge=True, branch=True):
1297 "check whether a working directory is modified"
1297 "check whether a working directory is modified"
1298 # check subrepos first
1298 # check subrepos first
1299 for s in sorted(self.substate):
1299 for s in sorted(self.substate):
1300 if self.sub(s).dirty():
1300 if self.sub(s).dirty():
1301 return True
1301 return True
1302 # check current working dir
1302 # check current working dir
1303 return ((merge and self.p2()) or
1303 return ((merge and self.p2()) or
1304 (branch and self.branch() != self.p1().branch()) or
1304 (branch and self.branch() != self.p1().branch()) or
1305 self.modified() or self.added() or self.removed() or
1305 self.modified() or self.added() or self.removed() or
1306 (missing and self.deleted()))
1306 (missing and self.deleted()))
1307
1307
1308 def add(self, list, prefix=""):
1308 def add(self, list, prefix=""):
1309 join = lambda f: os.path.join(prefix, f)
1309 join = lambda f: os.path.join(prefix, f)
1310 wlock = self._repo.wlock()
1310 wlock = self._repo.wlock()
1311 ui, ds = self._repo.ui, self._repo.dirstate
1311 ui, ds = self._repo.ui, self._repo.dirstate
1312 try:
1312 try:
1313 rejected = []
1313 rejected = []
1314 lstat = self._repo.wvfs.lstat
1314 lstat = self._repo.wvfs.lstat
1315 for f in list:
1315 for f in list:
1316 scmutil.checkportable(ui, join(f))
1316 scmutil.checkportable(ui, join(f))
1317 try:
1317 try:
1318 st = lstat(f)
1318 st = lstat(f)
1319 except OSError:
1319 except OSError:
1320 ui.warn(_("%s does not exist!\n") % join(f))
1320 ui.warn(_("%s does not exist!\n") % join(f))
1321 rejected.append(f)
1321 rejected.append(f)
1322 continue
1322 continue
1323 if st.st_size > 10000000:
1323 if st.st_size > 10000000:
1324 ui.warn(_("%s: up to %d MB of RAM may be required "
1324 ui.warn(_("%s: up to %d MB of RAM may be required "
1325 "to manage this file\n"
1325 "to manage this file\n"
1326 "(use 'hg revert %s' to cancel the "
1326 "(use 'hg revert %s' to cancel the "
1327 "pending addition)\n")
1327 "pending addition)\n")
1328 % (f, 3 * st.st_size // 1000000, join(f)))
1328 % (f, 3 * st.st_size // 1000000, join(f)))
1329 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1329 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1330 ui.warn(_("%s not added: only files and symlinks "
1330 ui.warn(_("%s not added: only files and symlinks "
1331 "supported currently\n") % join(f))
1331 "supported currently\n") % join(f))
1332 rejected.append(f)
1332 rejected.append(f)
1333 elif ds[f] in 'amn':
1333 elif ds[f] in 'amn':
1334 ui.warn(_("%s already tracked!\n") % join(f))
1334 ui.warn(_("%s already tracked!\n") % join(f))
1335 elif ds[f] == 'r':
1335 elif ds[f] == 'r':
1336 ds.normallookup(f)
1336 ds.normallookup(f)
1337 else:
1337 else:
1338 ds.add(f)
1338 ds.add(f)
1339 return rejected
1339 return rejected
1340 finally:
1340 finally:
1341 wlock.release()
1341 wlock.release()
1342
1342
1343 def forget(self, files, prefix=""):
1343 def forget(self, files, prefix=""):
1344 join = lambda f: os.path.join(prefix, f)
1344 join = lambda f: os.path.join(prefix, f)
1345 wlock = self._repo.wlock()
1345 wlock = self._repo.wlock()
1346 try:
1346 try:
1347 rejected = []
1347 rejected = []
1348 for f in files:
1348 for f in files:
1349 if f not in self._repo.dirstate:
1349 if f not in self._repo.dirstate:
1350 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1350 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1351 rejected.append(f)
1351 rejected.append(f)
1352 elif self._repo.dirstate[f] != 'a':
1352 elif self._repo.dirstate[f] != 'a':
1353 self._repo.dirstate.remove(f)
1353 self._repo.dirstate.remove(f)
1354 else:
1354 else:
1355 self._repo.dirstate.drop(f)
1355 self._repo.dirstate.drop(f)
1356 return rejected
1356 return rejected
1357 finally:
1357 finally:
1358 wlock.release()
1358 wlock.release()
1359
1359
1360 def undelete(self, list):
1360 def undelete(self, list):
1361 pctxs = self.parents()
1361 pctxs = self.parents()
1362 wlock = self._repo.wlock()
1362 wlock = self._repo.wlock()
1363 try:
1363 try:
1364 for f in list:
1364 for f in list:
1365 if self._repo.dirstate[f] != 'r':
1365 if self._repo.dirstate[f] != 'r':
1366 self._repo.ui.warn(_("%s not removed!\n") % f)
1366 self._repo.ui.warn(_("%s not removed!\n") % f)
1367 else:
1367 else:
1368 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1368 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1369 t = fctx.data()
1369 t = fctx.data()
1370 self._repo.wwrite(f, t, fctx.flags())
1370 self._repo.wwrite(f, t, fctx.flags())
1371 self._repo.dirstate.normal(f)
1371 self._repo.dirstate.normal(f)
1372 finally:
1372 finally:
1373 wlock.release()
1373 wlock.release()
1374
1374
1375 def copy(self, source, dest):
1375 def copy(self, source, dest):
1376 try:
1376 try:
1377 st = self._repo.wvfs.lstat(dest)
1377 st = self._repo.wvfs.lstat(dest)
1378 except OSError, err:
1378 except OSError, err:
1379 if err.errno != errno.ENOENT:
1379 if err.errno != errno.ENOENT:
1380 raise
1380 raise
1381 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1381 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1382 return
1382 return
1383 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1383 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1384 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1384 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1385 "symbolic link\n") % dest)
1385 "symbolic link\n") % dest)
1386 else:
1386 else:
1387 wlock = self._repo.wlock()
1387 wlock = self._repo.wlock()
1388 try:
1388 try:
1389 if self._repo.dirstate[dest] in '?':
1389 if self._repo.dirstate[dest] in '?':
1390 self._repo.dirstate.add(dest)
1390 self._repo.dirstate.add(dest)
1391 elif self._repo.dirstate[dest] in 'r':
1391 elif self._repo.dirstate[dest] in 'r':
1392 self._repo.dirstate.normallookup(dest)
1392 self._repo.dirstate.normallookup(dest)
1393 self._repo.dirstate.copy(source, dest)
1393 self._repo.dirstate.copy(source, dest)
1394 finally:
1394 finally:
1395 wlock.release()
1395 wlock.release()
1396
1396
1397 def _filtersuspectsymlink(self, files):
1397 def _filtersuspectsymlink(self, files):
1398 if not files or self._repo.dirstate._checklink:
1398 if not files or self._repo.dirstate._checklink:
1399 return files
1399 return files
1400
1400
1401 # Symlink placeholders may get non-symlink-like contents
1401 # Symlink placeholders may get non-symlink-like contents
1402 # via user error or dereferencing by NFS or Samba servers,
1402 # via user error or dereferencing by NFS or Samba servers,
1403 # so we filter out any placeholders that don't look like a
1403 # so we filter out any placeholders that don't look like a
1404 # symlink
1404 # symlink
1405 sane = []
1405 sane = []
1406 for f in files:
1406 for f in files:
1407 if self.flags(f) == 'l':
1407 if self.flags(f) == 'l':
1408 d = self[f].data()
1408 d = self[f].data()
1409 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1409 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1410 self._repo.ui.debug('ignoring suspect symlink placeholder'
1410 self._repo.ui.debug('ignoring suspect symlink placeholder'
1411 ' "%s"\n' % f)
1411 ' "%s"\n' % f)
1412 continue
1412 continue
1413 sane.append(f)
1413 sane.append(f)
1414 return sane
1414 return sane
1415
1415
1416 def _checklookup(self, files):
1416 def _checklookup(self, files):
1417 # check for any possibly clean files
1417 # check for any possibly clean files
1418 if not files:
1418 if not files:
1419 return [], []
1419 return [], []
1420
1420
1421 modified = []
1421 modified = []
1422 fixup = []
1422 fixup = []
1423 pctx = self._parents[0]
1423 pctx = self._parents[0]
1424 # do a full compare of any files that might have changed
1424 # do a full compare of any files that might have changed
1425 for f in sorted(files):
1425 for f in sorted(files):
1426 if (f not in pctx or self.flags(f) != pctx.flags(f)
1426 if (f not in pctx or self.flags(f) != pctx.flags(f)
1427 or pctx[f].cmp(self[f])):
1427 or pctx[f].cmp(self[f])):
1428 modified.append(f)
1428 modified.append(f)
1429 else:
1429 else:
1430 fixup.append(f)
1430 fixup.append(f)
1431
1431
1432 # update dirstate for files that are actually clean
1432 # update dirstate for files that are actually clean
1433 if fixup:
1433 if fixup:
1434 try:
1434 try:
1435 # updating the dirstate is optional
1435 # updating the dirstate is optional
1436 # so we don't wait on the lock
1436 # so we don't wait on the lock
1437 # wlock can invalidate the dirstate, so cache normal _after_
1437 # wlock can invalidate the dirstate, so cache normal _after_
1438 # taking the lock
1438 # taking the lock
1439 wlock = self._repo.wlock(False)
1439 wlock = self._repo.wlock(False)
1440 normal = self._repo.dirstate.normal
1440 normal = self._repo.dirstate.normal
1441 try:
1441 try:
1442 for f in fixup:
1442 for f in fixup:
1443 normal(f)
1443 normal(f)
1444 finally:
1444 finally:
1445 wlock.release()
1445 wlock.release()
1446 except error.LockError:
1446 except error.LockError:
1447 pass
1447 pass
1448 return modified, fixup
1448 return modified, fixup
1449
1449
1450 def _manifestmatches(self, match, s):
1450 def _manifestmatches(self, match, s):
1451 """Slow path for workingctx
1451 """Slow path for workingctx
1452
1452
1453 The fast path is when we compare the working directory to its parent
1453 The fast path is when we compare the working directory to its parent
1454 which means this function is comparing with a non-parent; therefore we
1454 which means this function is comparing with a non-parent; therefore we
1455 need to build a manifest and return what matches.
1455 need to build a manifest and return what matches.
1456 """
1456 """
1457 mf = self._repo['.']._manifestmatches(match, s)
1457 mf = self._repo['.']._manifestmatches(match, s)
1458 for f in s.modified + s.added:
1458 for f in s.modified + s.added:
1459 mf[f] = _newnode
1459 mf[f] = _newnode
1460 mf.setflag(f, self.flags(f))
1460 mf.setflag(f, self.flags(f))
1461 for f in s.removed:
1461 for f in s.removed:
1462 if f in mf:
1462 if f in mf:
1463 del mf[f]
1463 del mf[f]
1464 return mf
1464 return mf
1465
1465
1466 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1466 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1467 unknown=False):
1467 unknown=False):
1468 '''Gets the status from the dirstate -- internal use only.'''
1468 '''Gets the status from the dirstate -- internal use only.'''
1469 listignored, listclean, listunknown = ignored, clean, unknown
1469 listignored, listclean, listunknown = ignored, clean, unknown
1470 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1470 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1471 subrepos = []
1471 subrepos = []
1472 if '.hgsub' in self:
1472 if '.hgsub' in self:
1473 subrepos = sorted(self.substate)
1473 subrepos = sorted(self.substate)
1474 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1474 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1475 listclean, listunknown)
1475 listclean, listunknown)
1476
1476
1477 # check for any possibly clean files
1477 # check for any possibly clean files
1478 if cmp:
1478 if cmp:
1479 modified2, fixup = self._checklookup(cmp)
1479 modified2, fixup = self._checklookup(cmp)
1480 s.modified.extend(modified2)
1480 s.modified.extend(modified2)
1481
1481
1482 # update dirstate for files that are actually clean
1482 # update dirstate for files that are actually clean
1483 if fixup and listclean:
1483 if fixup and listclean:
1484 s.clean.extend(fixup)
1484 s.clean.extend(fixup)
1485
1485
1486 return s
1486 return s
1487
1487
1488 def _buildstatus(self, other, s, match, listignored, listclean,
1488 def _buildstatus(self, other, s, match, listignored, listclean,
1489 listunknown):
1489 listunknown):
1490 """build a status with respect to another context
1490 """build a status with respect to another context
1491
1491
1492 This includes logic for maintaining the fast path of status when
1492 This includes logic for maintaining the fast path of status when
1493 comparing the working directory against its parent, which is to skip
1493 comparing the working directory against its parent, which is to skip
1494 building a new manifest if self (working directory) is not comparing
1494 building a new manifest if self (working directory) is not comparing
1495 against its parent (repo['.']).
1495 against its parent (repo['.']).
1496 """
1496 """
1497 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1497 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1498 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1498 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1499 # might have accidentally ended up with the entire contents of the file
1499 # might have accidentally ended up with the entire contents of the file
1500 # they are supposed to be linking to.
1500 # they are supposed to be linking to.
1501 s.modified[:] = self._filtersuspectsymlink(s.modified)
1501 s.modified[:] = self._filtersuspectsymlink(s.modified)
1502 if other != self._repo['.']:
1502 if other != self._repo['.']:
1503 s = super(workingctx, self)._buildstatus(other, s, match,
1503 s = super(workingctx, self)._buildstatus(other, s, match,
1504 listignored, listclean,
1504 listignored, listclean,
1505 listunknown)
1505 listunknown)
1506 elif match.always():
1506 elif match.always():
1507 # cache for performance
1507 # cache for performance
1508 if s.unknown or s.ignored or s.clean:
1508 if s.unknown or s.ignored or s.clean:
1509 # "_status" is cached with list*=False in the normal route
1509 # "_status" is cached with list*=False in the normal route
1510 self._status = scmutil.status(s.modified, s.added, s.removed,
1510 self._status = scmutil.status(s.modified, s.added, s.removed,
1511 s.deleted, [], [], [])
1511 s.deleted, [], [], [])
1512 else:
1512 else:
1513 self._status = s
1513 self._status = s
1514 return s
1514 return s
1515
1515
1516 def _matchstatus(self, other, match):
1516 def _matchstatus(self, other, match):
1517 """override the match method with a filter for directory patterns
1517 """override the match method with a filter for directory patterns
1518
1518
1519 We use inheritance to customize the match.bad method only in cases of
1519 We use inheritance to customize the match.bad method only in cases of
1520 workingctx since it belongs only to the working directory when
1520 workingctx since it belongs only to the working directory when
1521 comparing against the parent changeset.
1521 comparing against the parent changeset.
1522
1522
1523 If we aren't comparing against the working directory's parent, then we
1523 If we aren't comparing against the working directory's parent, then we
1524 just use the default match object sent to us.
1524 just use the default match object sent to us.
1525 """
1525 """
1526 superself = super(workingctx, self)
1526 superself = super(workingctx, self)
1527 match = superself._matchstatus(other, match)
1527 match = superself._matchstatus(other, match)
1528 if other != self._repo['.']:
1528 if other != self._repo['.']:
1529 def bad(f, msg):
1529 def bad(f, msg):
1530 # 'f' may be a directory pattern from 'match.files()',
1530 # 'f' may be a directory pattern from 'match.files()',
1531 # so 'f not in ctx1' is not enough
1531 # so 'f not in ctx1' is not enough
1532 if f not in other and f not in other.dirs():
1532 if f not in other and f not in other.dirs():
1533 self._repo.ui.warn('%s: %s\n' %
1533 self._repo.ui.warn('%s: %s\n' %
1534 (self._repo.dirstate.pathto(f), msg))
1534 (self._repo.dirstate.pathto(f), msg))
1535 match.bad = bad
1535 match.bad = bad
1536 return match
1536 return match
1537
1537
1538 class committablefilectx(basefilectx):
1538 class committablefilectx(basefilectx):
1539 """A committablefilectx provides common functionality for a file context
1539 """A committablefilectx provides common functionality for a file context
1540 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1540 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1541 def __init__(self, repo, path, filelog=None, ctx=None):
1541 def __init__(self, repo, path, filelog=None, ctx=None):
1542 self._repo = repo
1542 self._repo = repo
1543 self._path = path
1543 self._path = path
1544 self._changeid = None
1544 self._changeid = None
1545 self._filerev = self._filenode = None
1545 self._filerev = self._filenode = None
1546
1546
1547 if filelog is not None:
1547 if filelog is not None:
1548 self._filelog = filelog
1548 self._filelog = filelog
1549 if ctx:
1549 if ctx:
1550 self._changectx = ctx
1550 self._changectx = ctx
1551
1551
1552 def __nonzero__(self):
1552 def __nonzero__(self):
1553 return True
1553 return True
1554
1554
1555 def parents(self):
1555 def parents(self):
1556 '''return parent filectxs, following copies if necessary'''
1556 '''return parent filectxs, following copies if necessary'''
1557 def filenode(ctx, path):
1557 def filenode(ctx, path):
1558 return ctx._manifest.get(path, nullid)
1558 return ctx._manifest.get(path, nullid)
1559
1559
1560 path = self._path
1560 path = self._path
1561 fl = self._filelog
1561 fl = self._filelog
1562 pcl = self._changectx._parents
1562 pcl = self._changectx._parents
1563 renamed = self.renamed()
1563 renamed = self.renamed()
1564
1564
1565 if renamed:
1565 if renamed:
1566 pl = [renamed + (None,)]
1566 pl = [renamed + (None,)]
1567 else:
1567 else:
1568 pl = [(path, filenode(pcl[0], path), fl)]
1568 pl = [(path, filenode(pcl[0], path), fl)]
1569
1569
1570 for pc in pcl[1:]:
1570 for pc in pcl[1:]:
1571 pl.append((path, filenode(pc, path), fl))
1571 pl.append((path, filenode(pc, path), fl))
1572
1572
1573 return [filectx(self._repo, p, fileid=n, filelog=l)
1573 return [filectx(self._repo, p, fileid=n, filelog=l)
1574 for p, n, l in pl if n != nullid]
1574 for p, n, l in pl if n != nullid]
1575
1575
1576 def children(self):
1576 def children(self):
1577 return []
1577 return []
1578
1578
1579 class workingfilectx(committablefilectx):
1579 class workingfilectx(committablefilectx):
1580 """A workingfilectx object makes access to data related to a particular
1580 """A workingfilectx object makes access to data related to a particular
1581 file in the working directory convenient."""
1581 file in the working directory convenient."""
1582 def __init__(self, repo, path, filelog=None, workingctx=None):
1582 def __init__(self, repo, path, filelog=None, workingctx=None):
1583 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1583 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1584
1584
1585 @propertycache
1585 @propertycache
1586 def _changectx(self):
1586 def _changectx(self):
1587 return workingctx(self._repo)
1587 return workingctx(self._repo)
1588
1588
1589 def data(self):
1589 def data(self):
1590 return self._repo.wread(self._path)
1590 return self._repo.wread(self._path)
1591 def renamed(self):
1591 def renamed(self):
1592 rp = self._repo.dirstate.copied(self._path)
1592 rp = self._repo.dirstate.copied(self._path)
1593 if not rp:
1593 if not rp:
1594 return None
1594 return None
1595 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1595 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1596
1596
1597 def size(self):
1597 def size(self):
1598 return self._repo.wvfs.lstat(self._path).st_size
1598 return self._repo.wvfs.lstat(self._path).st_size
1599 def date(self):
1599 def date(self):
1600 t, tz = self._changectx.date()
1600 t, tz = self._changectx.date()
1601 try:
1601 try:
1602 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1602 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1603 except OSError, err:
1603 except OSError, err:
1604 if err.errno != errno.ENOENT:
1604 if err.errno != errno.ENOENT:
1605 raise
1605 raise
1606 return (t, tz)
1606 return (t, tz)
1607
1607
1608 def cmp(self, fctx):
1608 def cmp(self, fctx):
1609 """compare with other file context
1609 """compare with other file context
1610
1610
1611 returns True if different than fctx.
1611 returns True if different than fctx.
1612 """
1612 """
1613 # fctx should be a filectx (not a workingfilectx)
1613 # fctx should be a filectx (not a workingfilectx)
1614 # invert comparison to reuse the same code path
1614 # invert comparison to reuse the same code path
1615 return fctx.cmp(self)
1615 return fctx.cmp(self)
1616
1616
1617 def remove(self, ignoremissing=False):
1617 def remove(self, ignoremissing=False):
1618 """wraps unlink for a repo's working directory"""
1618 """wraps unlink for a repo's working directory"""
1619 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1619 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1620
1620
1621 def write(self, data, flags):
1621 def write(self, data, flags):
1622 """wraps repo.wwrite"""
1622 """wraps repo.wwrite"""
1623 self._repo.wwrite(self._path, data, flags)
1623 self._repo.wwrite(self._path, data, flags)
1624
1624
1625 class workingcommitctx(workingctx):
1626 """A workingcommitctx object makes access to data related to
1627 the revision being committed convenient.
1628
1629 This hides changes in the working directory, if they aren't
1630 committed in this context.
1631 """
1632 def __init__(self, repo, changes,
1633 text="", user=None, date=None, extra=None):
1634 super(workingctx, self).__init__(repo, text, user, date, extra,
1635 changes)
1636
1625 class memctx(committablectx):
1637 class memctx(committablectx):
1626 """Use memctx to perform in-memory commits via localrepo.commitctx().
1638 """Use memctx to perform in-memory commits via localrepo.commitctx().
1627
1639
1628 Revision information is supplied at initialization time while
1640 Revision information is supplied at initialization time while
1629 related files data and is made available through a callback
1641 related files data and is made available through a callback
1630 mechanism. 'repo' is the current localrepo, 'parents' is a
1642 mechanism. 'repo' is the current localrepo, 'parents' is a
1631 sequence of two parent revisions identifiers (pass None for every
1643 sequence of two parent revisions identifiers (pass None for every
1632 missing parent), 'text' is the commit message and 'files' lists
1644 missing parent), 'text' is the commit message and 'files' lists
1633 names of files touched by the revision (normalized and relative to
1645 names of files touched by the revision (normalized and relative to
1634 repository root).
1646 repository root).
1635
1647
1636 filectxfn(repo, memctx, path) is a callable receiving the
1648 filectxfn(repo, memctx, path) is a callable receiving the
1637 repository, the current memctx object and the normalized path of
1649 repository, the current memctx object and the normalized path of
1638 requested file, relative to repository root. It is fired by the
1650 requested file, relative to repository root. It is fired by the
1639 commit function for every file in 'files', but calls order is
1651 commit function for every file in 'files', but calls order is
1640 undefined. If the file is available in the revision being
1652 undefined. If the file is available in the revision being
1641 committed (updated or added), filectxfn returns a memfilectx
1653 committed (updated or added), filectxfn returns a memfilectx
1642 object. If the file was removed, filectxfn raises an
1654 object. If the file was removed, filectxfn raises an
1643 IOError. Moved files are represented by marking the source file
1655 IOError. Moved files are represented by marking the source file
1644 removed and the new file added with copy information (see
1656 removed and the new file added with copy information (see
1645 memfilectx).
1657 memfilectx).
1646
1658
1647 user receives the committer name and defaults to current
1659 user receives the committer name and defaults to current
1648 repository username, date is the commit date in any format
1660 repository username, date is the commit date in any format
1649 supported by util.parsedate() and defaults to current date, extra
1661 supported by util.parsedate() and defaults to current date, extra
1650 is a dictionary of metadata or is left empty.
1662 is a dictionary of metadata or is left empty.
1651 """
1663 """
1652
1664
1653 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1665 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1654 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1666 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1655 # this field to determine what to do in filectxfn.
1667 # this field to determine what to do in filectxfn.
1656 _returnnoneformissingfiles = True
1668 _returnnoneformissingfiles = True
1657
1669
1658 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1670 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1659 date=None, extra=None, editor=False):
1671 date=None, extra=None, editor=False):
1660 super(memctx, self).__init__(repo, text, user, date, extra)
1672 super(memctx, self).__init__(repo, text, user, date, extra)
1661 self._rev = None
1673 self._rev = None
1662 self._node = None
1674 self._node = None
1663 parents = [(p or nullid) for p in parents]
1675 parents = [(p or nullid) for p in parents]
1664 p1, p2 = parents
1676 p1, p2 = parents
1665 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1677 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1666 files = sorted(set(files))
1678 files = sorted(set(files))
1667 self._files = files
1679 self._files = files
1668 self.substate = {}
1680 self.substate = {}
1669
1681
1670 # if store is not callable, wrap it in a function
1682 # if store is not callable, wrap it in a function
1671 if not callable(filectxfn):
1683 if not callable(filectxfn):
1672 def getfilectx(repo, memctx, path):
1684 def getfilectx(repo, memctx, path):
1673 fctx = filectxfn[path]
1685 fctx = filectxfn[path]
1674 # this is weird but apparently we only keep track of one parent
1686 # this is weird but apparently we only keep track of one parent
1675 # (why not only store that instead of a tuple?)
1687 # (why not only store that instead of a tuple?)
1676 copied = fctx.renamed()
1688 copied = fctx.renamed()
1677 if copied:
1689 if copied:
1678 copied = copied[0]
1690 copied = copied[0]
1679 return memfilectx(repo, path, fctx.data(),
1691 return memfilectx(repo, path, fctx.data(),
1680 islink=fctx.islink(), isexec=fctx.isexec(),
1692 islink=fctx.islink(), isexec=fctx.isexec(),
1681 copied=copied, memctx=memctx)
1693 copied=copied, memctx=memctx)
1682 self._filectxfn = getfilectx
1694 self._filectxfn = getfilectx
1683 else:
1695 else:
1684 # "util.cachefunc" reduces invocation of possibly expensive
1696 # "util.cachefunc" reduces invocation of possibly expensive
1685 # "filectxfn" for performance (e.g. converting from another VCS)
1697 # "filectxfn" for performance (e.g. converting from another VCS)
1686 self._filectxfn = util.cachefunc(filectxfn)
1698 self._filectxfn = util.cachefunc(filectxfn)
1687
1699
1688 self._extra = extra and extra.copy() or {}
1700 self._extra = extra and extra.copy() or {}
1689 if self._extra.get('branch', '') == '':
1701 if self._extra.get('branch', '') == '':
1690 self._extra['branch'] = 'default'
1702 self._extra['branch'] = 'default'
1691
1703
1692 if editor:
1704 if editor:
1693 self._text = editor(self._repo, self, [])
1705 self._text = editor(self._repo, self, [])
1694 self._repo.savecommitmessage(self._text)
1706 self._repo.savecommitmessage(self._text)
1695
1707
1696 def filectx(self, path, filelog=None):
1708 def filectx(self, path, filelog=None):
1697 """get a file context from the working directory
1709 """get a file context from the working directory
1698
1710
1699 Returns None if file doesn't exist and should be removed."""
1711 Returns None if file doesn't exist and should be removed."""
1700 return self._filectxfn(self._repo, self, path)
1712 return self._filectxfn(self._repo, self, path)
1701
1713
1702 def commit(self):
1714 def commit(self):
1703 """commit context to the repo"""
1715 """commit context to the repo"""
1704 return self._repo.commitctx(self)
1716 return self._repo.commitctx(self)
1705
1717
1706 @propertycache
1718 @propertycache
1707 def _manifest(self):
1719 def _manifest(self):
1708 """generate a manifest based on the return values of filectxfn"""
1720 """generate a manifest based on the return values of filectxfn"""
1709
1721
1710 # keep this simple for now; just worry about p1
1722 # keep this simple for now; just worry about p1
1711 pctx = self._parents[0]
1723 pctx = self._parents[0]
1712 man = pctx.manifest().copy()
1724 man = pctx.manifest().copy()
1713
1725
1714 for f in self._status.modified:
1726 for f in self._status.modified:
1715 p1node = nullid
1727 p1node = nullid
1716 p2node = nullid
1728 p2node = nullid
1717 p = pctx[f].parents() # if file isn't in pctx, check p2?
1729 p = pctx[f].parents() # if file isn't in pctx, check p2?
1718 if len(p) > 0:
1730 if len(p) > 0:
1719 p1node = p[0].node()
1731 p1node = p[0].node()
1720 if len(p) > 1:
1732 if len(p) > 1:
1721 p2node = p[1].node()
1733 p2node = p[1].node()
1722 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1734 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1723
1735
1724 for f in self._status.added:
1736 for f in self._status.added:
1725 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1737 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1726
1738
1727 for f in self._status.removed:
1739 for f in self._status.removed:
1728 if f in man:
1740 if f in man:
1729 del man[f]
1741 del man[f]
1730
1742
1731 return man
1743 return man
1732
1744
1733 @propertycache
1745 @propertycache
1734 def _status(self):
1746 def _status(self):
1735 """Calculate exact status from ``files`` specified at construction
1747 """Calculate exact status from ``files`` specified at construction
1736 """
1748 """
1737 man1 = self.p1().manifest()
1749 man1 = self.p1().manifest()
1738 p2 = self._parents[1]
1750 p2 = self._parents[1]
1739 # "1 < len(self._parents)" can't be used for checking
1751 # "1 < len(self._parents)" can't be used for checking
1740 # existence of the 2nd parent, because "memctx._parents" is
1752 # existence of the 2nd parent, because "memctx._parents" is
1741 # explicitly initialized by the list, of which length is 2.
1753 # explicitly initialized by the list, of which length is 2.
1742 if p2.node() != nullid:
1754 if p2.node() != nullid:
1743 man2 = p2.manifest()
1755 man2 = p2.manifest()
1744 managing = lambda f: f in man1 or f in man2
1756 managing = lambda f: f in man1 or f in man2
1745 else:
1757 else:
1746 managing = lambda f: f in man1
1758 managing = lambda f: f in man1
1747
1759
1748 modified, added, removed = [], [], []
1760 modified, added, removed = [], [], []
1749 for f in self._files:
1761 for f in self._files:
1750 if not managing(f):
1762 if not managing(f):
1751 added.append(f)
1763 added.append(f)
1752 elif self[f]:
1764 elif self[f]:
1753 modified.append(f)
1765 modified.append(f)
1754 else:
1766 else:
1755 removed.append(f)
1767 removed.append(f)
1756
1768
1757 return scmutil.status(modified, added, removed, [], [], [], [])
1769 return scmutil.status(modified, added, removed, [], [], [], [])
1758
1770
1759 class memfilectx(committablefilectx):
1771 class memfilectx(committablefilectx):
1760 """memfilectx represents an in-memory file to commit.
1772 """memfilectx represents an in-memory file to commit.
1761
1773
1762 See memctx and committablefilectx for more details.
1774 See memctx and committablefilectx for more details.
1763 """
1775 """
1764 def __init__(self, repo, path, data, islink=False,
1776 def __init__(self, repo, path, data, islink=False,
1765 isexec=False, copied=None, memctx=None):
1777 isexec=False, copied=None, memctx=None):
1766 """
1778 """
1767 path is the normalized file path relative to repository root.
1779 path is the normalized file path relative to repository root.
1768 data is the file content as a string.
1780 data is the file content as a string.
1769 islink is True if the file is a symbolic link.
1781 islink is True if the file is a symbolic link.
1770 isexec is True if the file is executable.
1782 isexec is True if the file is executable.
1771 copied is the source file path if current file was copied in the
1783 copied is the source file path if current file was copied in the
1772 revision being committed, or None."""
1784 revision being committed, or None."""
1773 super(memfilectx, self).__init__(repo, path, None, memctx)
1785 super(memfilectx, self).__init__(repo, path, None, memctx)
1774 self._data = data
1786 self._data = data
1775 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1787 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1776 self._copied = None
1788 self._copied = None
1777 if copied:
1789 if copied:
1778 self._copied = (copied, nullid)
1790 self._copied = (copied, nullid)
1779
1791
1780 def data(self):
1792 def data(self):
1781 return self._data
1793 return self._data
1782 def size(self):
1794 def size(self):
1783 return len(self.data())
1795 return len(self.data())
1784 def flags(self):
1796 def flags(self):
1785 return self._flags
1797 return self._flags
1786 def renamed(self):
1798 def renamed(self):
1787 return self._copied
1799 return self._copied
1788
1800
1789 def remove(self, ignoremissing=False):
1801 def remove(self, ignoremissing=False):
1790 """wraps unlink for a repo's working directory"""
1802 """wraps unlink for a repo's working directory"""
1791 # need to figure out what to do here
1803 # need to figure out what to do here
1792 del self._changectx[self._path]
1804 del self._changectx[self._path]
1793
1805
1794 def write(self, data, flags):
1806 def write(self, data, flags):
1795 """wraps repo.wwrite"""
1807 """wraps repo.wwrite"""
1796 self._data = data
1808 self._data = data
@@ -1,1839 +1,1840 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 format='HG10', **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 requirements = set(requirements)
244 requirements = set(requirements)
245 else:
245 else:
246 raise error.RepoError(_("repository %s not found") % path)
246 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
247 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
248 raise error.RepoError(_("repository %s already exists") % path)
249 else:
249 else:
250 try:
250 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
252 except IOError, inst:
253 if inst.errno != errno.ENOENT:
253 if inst.errno != errno.ENOENT:
254 raise
254 raise
255 requirements = set()
255 requirements = set()
256
256
257 self.sharedpath = self.path
257 self.sharedpath = self.path
258 try:
258 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
260 realpath=True)
261 s = vfs.base
261 s = vfs.base
262 if not vfs.exists():
262 if not vfs.exists():
263 raise error.RepoError(
263 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
265 self.sharedpath = s
266 except IOError, inst:
266 except IOError, inst:
267 if inst.errno != errno.ENOENT:
267 if inst.errno != errno.ENOENT:
268 raise
268 raise
269
269
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
271 self.spath = self.store.path
272 self.svfs = self.store.vfs
272 self.svfs = self.store.vfs
273 self.sopener = self.svfs
273 self.sopener = self.svfs
274 self.sjoin = self.store.join
274 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
275 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
276 self._applyrequirements(requirements)
277 if create:
277 if create:
278 self._writerequirements()
278 self._writerequirements()
279
279
280
280
281 self._branchcaches = {}
281 self._branchcaches = {}
282 self.filterpats = {}
282 self.filterpats = {}
283 self._datafilters = {}
283 self._datafilters = {}
284 self._transref = self._lockref = self._wlockref = None
284 self._transref = self._lockref = self._wlockref = None
285
285
286 # A cache for various files under .hg/ that tracks file changes,
286 # A cache for various files under .hg/ that tracks file changes,
287 # (used by the filecache decorator)
287 # (used by the filecache decorator)
288 #
288 #
289 # Maps a property name to its util.filecacheentry
289 # Maps a property name to its util.filecacheentry
290 self._filecache = {}
290 self._filecache = {}
291
291
292 # hold sets of revision to be filtered
292 # hold sets of revision to be filtered
293 # should be cleared when something might have changed the filter value:
293 # should be cleared when something might have changed the filter value:
294 # - new changesets,
294 # - new changesets,
295 # - phase change,
295 # - phase change,
296 # - new obsolescence marker,
296 # - new obsolescence marker,
297 # - working directory parent change,
297 # - working directory parent change,
298 # - bookmark changes
298 # - bookmark changes
299 self.filteredrevcache = {}
299 self.filteredrevcache = {}
300
300
301 # generic mapping between names and nodes
301 # generic mapping between names and nodes
302 self.names = namespaces.namespaces()
302 self.names = namespaces.namespaces()
303
303
304 def close(self):
304 def close(self):
305 pass
305 pass
306
306
307 def _restrictcapabilities(self, caps):
307 def _restrictcapabilities(self, caps):
308 # bundle2 is not ready for prime time, drop it unless explicitly
308 # bundle2 is not ready for prime time, drop it unless explicitly
309 # required by the tests (or some brave tester)
309 # required by the tests (or some brave tester)
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 caps = set(caps)
311 caps = set(caps)
312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 return caps
314 return caps
315
315
316 def _applyrequirements(self, requirements):
316 def _applyrequirements(self, requirements):
317 self.requirements = requirements
317 self.requirements = requirements
318 self.sopener.options = dict((r, 1) for r in requirements
318 self.sopener.options = dict((r, 1) for r in requirements
319 if r in self.openerreqs)
319 if r in self.openerreqs)
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 if chunkcachesize is not None:
321 if chunkcachesize is not None:
322 self.sopener.options['chunkcachesize'] = chunkcachesize
322 self.sopener.options['chunkcachesize'] = chunkcachesize
323 maxchainlen = self.ui.configint('format', 'maxchainlen')
323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 if maxchainlen is not None:
324 if maxchainlen is not None:
325 self.sopener.options['maxchainlen'] = maxchainlen
325 self.sopener.options['maxchainlen'] = maxchainlen
326
326
327 def _writerequirements(self):
327 def _writerequirements(self):
328 reqfile = self.opener("requires", "w")
328 reqfile = self.opener("requires", "w")
329 for r in sorted(self.requirements):
329 for r in sorted(self.requirements):
330 reqfile.write("%s\n" % r)
330 reqfile.write("%s\n" % r)
331 reqfile.close()
331 reqfile.close()
332
332
333 def _checknested(self, path):
333 def _checknested(self, path):
334 """Determine if path is a legal nested repository."""
334 """Determine if path is a legal nested repository."""
335 if not path.startswith(self.root):
335 if not path.startswith(self.root):
336 return False
336 return False
337 subpath = path[len(self.root) + 1:]
337 subpath = path[len(self.root) + 1:]
338 normsubpath = util.pconvert(subpath)
338 normsubpath = util.pconvert(subpath)
339
339
340 # XXX: Checking against the current working copy is wrong in
340 # XXX: Checking against the current working copy is wrong in
341 # the sense that it can reject things like
341 # the sense that it can reject things like
342 #
342 #
343 # $ hg cat -r 10 sub/x.txt
343 # $ hg cat -r 10 sub/x.txt
344 #
344 #
345 # if sub/ is no longer a subrepository in the working copy
345 # if sub/ is no longer a subrepository in the working copy
346 # parent revision.
346 # parent revision.
347 #
347 #
348 # However, it can of course also allow things that would have
348 # However, it can of course also allow things that would have
349 # been rejected before, such as the above cat command if sub/
349 # been rejected before, such as the above cat command if sub/
350 # is a subrepository now, but was a normal directory before.
350 # is a subrepository now, but was a normal directory before.
351 # The old path auditor would have rejected by mistake since it
351 # The old path auditor would have rejected by mistake since it
352 # panics when it sees sub/.hg/.
352 # panics when it sees sub/.hg/.
353 #
353 #
354 # All in all, checking against the working copy seems sensible
354 # All in all, checking against the working copy seems sensible
355 # since we want to prevent access to nested repositories on
355 # since we want to prevent access to nested repositories on
356 # the filesystem *now*.
356 # the filesystem *now*.
357 ctx = self[None]
357 ctx = self[None]
358 parts = util.splitpath(subpath)
358 parts = util.splitpath(subpath)
359 while parts:
359 while parts:
360 prefix = '/'.join(parts)
360 prefix = '/'.join(parts)
361 if prefix in ctx.substate:
361 if prefix in ctx.substate:
362 if prefix == normsubpath:
362 if prefix == normsubpath:
363 return True
363 return True
364 else:
364 else:
365 sub = ctx.sub(prefix)
365 sub = ctx.sub(prefix)
366 return sub.checknested(subpath[len(prefix) + 1:])
366 return sub.checknested(subpath[len(prefix) + 1:])
367 else:
367 else:
368 parts.pop()
368 parts.pop()
369 return False
369 return False
370
370
371 def peer(self):
371 def peer(self):
372 return localpeer(self) # not cached to avoid reference cycle
372 return localpeer(self) # not cached to avoid reference cycle
373
373
374 def unfiltered(self):
374 def unfiltered(self):
375 """Return unfiltered version of the repository
375 """Return unfiltered version of the repository
376
376
377 Intended to be overwritten by filtered repo."""
377 Intended to be overwritten by filtered repo."""
378 return self
378 return self
379
379
380 def filtered(self, name):
380 def filtered(self, name):
381 """Return a filtered version of a repository"""
381 """Return a filtered version of a repository"""
382 # build a new class with the mixin and the current class
382 # build a new class with the mixin and the current class
383 # (possibly subclass of the repo)
383 # (possibly subclass of the repo)
384 class proxycls(repoview.repoview, self.unfiltered().__class__):
384 class proxycls(repoview.repoview, self.unfiltered().__class__):
385 pass
385 pass
386 return proxycls(self, name)
386 return proxycls(self, name)
387
387
388 @repofilecache('bookmarks')
388 @repofilecache('bookmarks')
389 def _bookmarks(self):
389 def _bookmarks(self):
390 return bookmarks.bmstore(self)
390 return bookmarks.bmstore(self)
391
391
392 @repofilecache('bookmarks.current')
392 @repofilecache('bookmarks.current')
393 def _bookmarkcurrent(self):
393 def _bookmarkcurrent(self):
394 return bookmarks.readcurrent(self)
394 return bookmarks.readcurrent(self)
395
395
396 def bookmarkheads(self, bookmark):
396 def bookmarkheads(self, bookmark):
397 name = bookmark.split('@', 1)[0]
397 name = bookmark.split('@', 1)[0]
398 heads = []
398 heads = []
399 for mark, n in self._bookmarks.iteritems():
399 for mark, n in self._bookmarks.iteritems():
400 if mark.split('@', 1)[0] == name:
400 if mark.split('@', 1)[0] == name:
401 heads.append(n)
401 heads.append(n)
402 return heads
402 return heads
403
403
404 @storecache('phaseroots')
404 @storecache('phaseroots')
405 def _phasecache(self):
405 def _phasecache(self):
406 return phases.phasecache(self, self._phasedefaults)
406 return phases.phasecache(self, self._phasedefaults)
407
407
408 @storecache('obsstore')
408 @storecache('obsstore')
409 def obsstore(self):
409 def obsstore(self):
410 # read default format for new obsstore.
410 # read default format for new obsstore.
411 defaultformat = self.ui.configint('format', 'obsstore-version', None)
411 defaultformat = self.ui.configint('format', 'obsstore-version', None)
412 # rely on obsstore class default when possible.
412 # rely on obsstore class default when possible.
413 kwargs = {}
413 kwargs = {}
414 if defaultformat is not None:
414 if defaultformat is not None:
415 kwargs['defaultformat'] = defaultformat
415 kwargs['defaultformat'] = defaultformat
416 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
416 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
417 store = obsolete.obsstore(self.sopener, readonly=readonly,
417 store = obsolete.obsstore(self.sopener, readonly=readonly,
418 **kwargs)
418 **kwargs)
419 if store and readonly:
419 if store and readonly:
420 # message is rare enough to not be translated
420 # message is rare enough to not be translated
421 msg = 'obsolete feature not enabled but %i markers found!\n'
421 msg = 'obsolete feature not enabled but %i markers found!\n'
422 self.ui.warn(msg % len(list(store)))
422 self.ui.warn(msg % len(list(store)))
423 return store
423 return store
424
424
425 @storecache('00changelog.i')
425 @storecache('00changelog.i')
426 def changelog(self):
426 def changelog(self):
427 c = changelog.changelog(self.sopener)
427 c = changelog.changelog(self.sopener)
428 if 'HG_PENDING' in os.environ:
428 if 'HG_PENDING' in os.environ:
429 p = os.environ['HG_PENDING']
429 p = os.environ['HG_PENDING']
430 if p.startswith(self.root):
430 if p.startswith(self.root):
431 c.readpending('00changelog.i.a')
431 c.readpending('00changelog.i.a')
432 return c
432 return c
433
433
434 @storecache('00manifest.i')
434 @storecache('00manifest.i')
435 def manifest(self):
435 def manifest(self):
436 return manifest.manifest(self.sopener)
436 return manifest.manifest(self.sopener)
437
437
438 @repofilecache('dirstate')
438 @repofilecache('dirstate')
439 def dirstate(self):
439 def dirstate(self):
440 warned = [0]
440 warned = [0]
441 def validate(node):
441 def validate(node):
442 try:
442 try:
443 self.changelog.rev(node)
443 self.changelog.rev(node)
444 return node
444 return node
445 except error.LookupError:
445 except error.LookupError:
446 if not warned[0]:
446 if not warned[0]:
447 warned[0] = True
447 warned[0] = True
448 self.ui.warn(_("warning: ignoring unknown"
448 self.ui.warn(_("warning: ignoring unknown"
449 " working parent %s!\n") % short(node))
449 " working parent %s!\n") % short(node))
450 return nullid
450 return nullid
451
451
452 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
452 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
453
453
454 def __getitem__(self, changeid):
454 def __getitem__(self, changeid):
455 if changeid is None:
455 if changeid is None:
456 return context.workingctx(self)
456 return context.workingctx(self)
457 if isinstance(changeid, slice):
457 if isinstance(changeid, slice):
458 return [context.changectx(self, i)
458 return [context.changectx(self, i)
459 for i in xrange(*changeid.indices(len(self)))
459 for i in xrange(*changeid.indices(len(self)))
460 if i not in self.changelog.filteredrevs]
460 if i not in self.changelog.filteredrevs]
461 return context.changectx(self, changeid)
461 return context.changectx(self, changeid)
462
462
463 def __contains__(self, changeid):
463 def __contains__(self, changeid):
464 try:
464 try:
465 return bool(self.lookup(changeid))
465 return bool(self.lookup(changeid))
466 except error.RepoLookupError:
466 except error.RepoLookupError:
467 return False
467 return False
468
468
469 def __nonzero__(self):
469 def __nonzero__(self):
470 return True
470 return True
471
471
472 def __len__(self):
472 def __len__(self):
473 return len(self.changelog)
473 return len(self.changelog)
474
474
475 def __iter__(self):
475 def __iter__(self):
476 return iter(self.changelog)
476 return iter(self.changelog)
477
477
478 def revs(self, expr, *args):
478 def revs(self, expr, *args):
479 '''Return a list of revisions matching the given revset'''
479 '''Return a list of revisions matching the given revset'''
480 expr = revset.formatspec(expr, *args)
480 expr = revset.formatspec(expr, *args)
481 m = revset.match(None, expr)
481 m = revset.match(None, expr)
482 return m(self, revset.spanset(self))
482 return m(self, revset.spanset(self))
483
483
484 def set(self, expr, *args):
484 def set(self, expr, *args):
485 '''
485 '''
486 Yield a context for each matching revision, after doing arg
486 Yield a context for each matching revision, after doing arg
487 replacement via revset.formatspec
487 replacement via revset.formatspec
488 '''
488 '''
489 for r in self.revs(expr, *args):
489 for r in self.revs(expr, *args):
490 yield self[r]
490 yield self[r]
491
491
492 def url(self):
492 def url(self):
493 return 'file:' + self.root
493 return 'file:' + self.root
494
494
495 def hook(self, name, throw=False, **args):
495 def hook(self, name, throw=False, **args):
496 """Call a hook, passing this repo instance.
496 """Call a hook, passing this repo instance.
497
497
498 This a convenience method to aid invoking hooks. Extensions likely
498 This a convenience method to aid invoking hooks. Extensions likely
499 won't call this unless they have registered a custom hook or are
499 won't call this unless they have registered a custom hook or are
500 replacing code that is expected to call a hook.
500 replacing code that is expected to call a hook.
501 """
501 """
502 return hook.hook(self.ui, self, name, throw, **args)
502 return hook.hook(self.ui, self, name, throw, **args)
503
503
504 @unfilteredmethod
504 @unfilteredmethod
505 def _tag(self, names, node, message, local, user, date, extra={},
505 def _tag(self, names, node, message, local, user, date, extra={},
506 editor=False):
506 editor=False):
507 if isinstance(names, str):
507 if isinstance(names, str):
508 names = (names,)
508 names = (names,)
509
509
510 branches = self.branchmap()
510 branches = self.branchmap()
511 for name in names:
511 for name in names:
512 self.hook('pretag', throw=True, node=hex(node), tag=name,
512 self.hook('pretag', throw=True, node=hex(node), tag=name,
513 local=local)
513 local=local)
514 if name in branches:
514 if name in branches:
515 self.ui.warn(_("warning: tag %s conflicts with existing"
515 self.ui.warn(_("warning: tag %s conflicts with existing"
516 " branch name\n") % name)
516 " branch name\n") % name)
517
517
518 def writetags(fp, names, munge, prevtags):
518 def writetags(fp, names, munge, prevtags):
519 fp.seek(0, 2)
519 fp.seek(0, 2)
520 if prevtags and prevtags[-1] != '\n':
520 if prevtags and prevtags[-1] != '\n':
521 fp.write('\n')
521 fp.write('\n')
522 for name in names:
522 for name in names:
523 m = munge and munge(name) or name
523 m = munge and munge(name) or name
524 if (self._tagscache.tagtypes and
524 if (self._tagscache.tagtypes and
525 name in self._tagscache.tagtypes):
525 name in self._tagscache.tagtypes):
526 old = self.tags().get(name, nullid)
526 old = self.tags().get(name, nullid)
527 fp.write('%s %s\n' % (hex(old), m))
527 fp.write('%s %s\n' % (hex(old), m))
528 fp.write('%s %s\n' % (hex(node), m))
528 fp.write('%s %s\n' % (hex(node), m))
529 fp.close()
529 fp.close()
530
530
531 prevtags = ''
531 prevtags = ''
532 if local:
532 if local:
533 try:
533 try:
534 fp = self.opener('localtags', 'r+')
534 fp = self.opener('localtags', 'r+')
535 except IOError:
535 except IOError:
536 fp = self.opener('localtags', 'a')
536 fp = self.opener('localtags', 'a')
537 else:
537 else:
538 prevtags = fp.read()
538 prevtags = fp.read()
539
539
540 # local tags are stored in the current charset
540 # local tags are stored in the current charset
541 writetags(fp, names, None, prevtags)
541 writetags(fp, names, None, prevtags)
542 for name in names:
542 for name in names:
543 self.hook('tag', node=hex(node), tag=name, local=local)
543 self.hook('tag', node=hex(node), tag=name, local=local)
544 return
544 return
545
545
546 try:
546 try:
547 fp = self.wfile('.hgtags', 'rb+')
547 fp = self.wfile('.hgtags', 'rb+')
548 except IOError, e:
548 except IOError, e:
549 if e.errno != errno.ENOENT:
549 if e.errno != errno.ENOENT:
550 raise
550 raise
551 fp = self.wfile('.hgtags', 'ab')
551 fp = self.wfile('.hgtags', 'ab')
552 else:
552 else:
553 prevtags = fp.read()
553 prevtags = fp.read()
554
554
555 # committed tags are stored in UTF-8
555 # committed tags are stored in UTF-8
556 writetags(fp, names, encoding.fromlocal, prevtags)
556 writetags(fp, names, encoding.fromlocal, prevtags)
557
557
558 fp.close()
558 fp.close()
559
559
560 self.invalidatecaches()
560 self.invalidatecaches()
561
561
562 if '.hgtags' not in self.dirstate:
562 if '.hgtags' not in self.dirstate:
563 self[None].add(['.hgtags'])
563 self[None].add(['.hgtags'])
564
564
565 m = matchmod.exact(self.root, '', ['.hgtags'])
565 m = matchmod.exact(self.root, '', ['.hgtags'])
566 tagnode = self.commit(message, user, date, extra=extra, match=m,
566 tagnode = self.commit(message, user, date, extra=extra, match=m,
567 editor=editor)
567 editor=editor)
568
568
569 for name in names:
569 for name in names:
570 self.hook('tag', node=hex(node), tag=name, local=local)
570 self.hook('tag', node=hex(node), tag=name, local=local)
571
571
572 return tagnode
572 return tagnode
573
573
574 def tag(self, names, node, message, local, user, date, editor=False):
574 def tag(self, names, node, message, local, user, date, editor=False):
575 '''tag a revision with one or more symbolic names.
575 '''tag a revision with one or more symbolic names.
576
576
577 names is a list of strings or, when adding a single tag, names may be a
577 names is a list of strings or, when adding a single tag, names may be a
578 string.
578 string.
579
579
580 if local is True, the tags are stored in a per-repository file.
580 if local is True, the tags are stored in a per-repository file.
581 otherwise, they are stored in the .hgtags file, and a new
581 otherwise, they are stored in the .hgtags file, and a new
582 changeset is committed with the change.
582 changeset is committed with the change.
583
583
584 keyword arguments:
584 keyword arguments:
585
585
586 local: whether to store tags in non-version-controlled file
586 local: whether to store tags in non-version-controlled file
587 (default False)
587 (default False)
588
588
589 message: commit message to use if committing
589 message: commit message to use if committing
590
590
591 user: name of user to use if committing
591 user: name of user to use if committing
592
592
593 date: date tuple to use if committing'''
593 date: date tuple to use if committing'''
594
594
595 if not local:
595 if not local:
596 m = matchmod.exact(self.root, '', ['.hgtags'])
596 m = matchmod.exact(self.root, '', ['.hgtags'])
597 if util.any(self.status(match=m, unknown=True, ignored=True)):
597 if util.any(self.status(match=m, unknown=True, ignored=True)):
598 raise util.Abort(_('working copy of .hgtags is changed'),
598 raise util.Abort(_('working copy of .hgtags is changed'),
599 hint=_('please commit .hgtags manually'))
599 hint=_('please commit .hgtags manually'))
600
600
601 self.tags() # instantiate the cache
601 self.tags() # instantiate the cache
602 self._tag(names, node, message, local, user, date, editor=editor)
602 self._tag(names, node, message, local, user, date, editor=editor)
603
603
604 @filteredpropertycache
604 @filteredpropertycache
605 def _tagscache(self):
605 def _tagscache(self):
606 '''Returns a tagscache object that contains various tags related
606 '''Returns a tagscache object that contains various tags related
607 caches.'''
607 caches.'''
608
608
609 # This simplifies its cache management by having one decorated
609 # This simplifies its cache management by having one decorated
610 # function (this one) and the rest simply fetch things from it.
610 # function (this one) and the rest simply fetch things from it.
611 class tagscache(object):
611 class tagscache(object):
612 def __init__(self):
612 def __init__(self):
613 # These two define the set of tags for this repository. tags
613 # These two define the set of tags for this repository. tags
614 # maps tag name to node; tagtypes maps tag name to 'global' or
614 # maps tag name to node; tagtypes maps tag name to 'global' or
615 # 'local'. (Global tags are defined by .hgtags across all
615 # 'local'. (Global tags are defined by .hgtags across all
616 # heads, and local tags are defined in .hg/localtags.)
616 # heads, and local tags are defined in .hg/localtags.)
617 # They constitute the in-memory cache of tags.
617 # They constitute the in-memory cache of tags.
618 self.tags = self.tagtypes = None
618 self.tags = self.tagtypes = None
619
619
620 self.nodetagscache = self.tagslist = None
620 self.nodetagscache = self.tagslist = None
621
621
622 cache = tagscache()
622 cache = tagscache()
623 cache.tags, cache.tagtypes = self._findtags()
623 cache.tags, cache.tagtypes = self._findtags()
624
624
625 return cache
625 return cache
626
626
627 def tags(self):
627 def tags(self):
628 '''return a mapping of tag to node'''
628 '''return a mapping of tag to node'''
629 t = {}
629 t = {}
630 if self.changelog.filteredrevs:
630 if self.changelog.filteredrevs:
631 tags, tt = self._findtags()
631 tags, tt = self._findtags()
632 else:
632 else:
633 tags = self._tagscache.tags
633 tags = self._tagscache.tags
634 for k, v in tags.iteritems():
634 for k, v in tags.iteritems():
635 try:
635 try:
636 # ignore tags to unknown nodes
636 # ignore tags to unknown nodes
637 self.changelog.rev(v)
637 self.changelog.rev(v)
638 t[k] = v
638 t[k] = v
639 except (error.LookupError, ValueError):
639 except (error.LookupError, ValueError):
640 pass
640 pass
641 return t
641 return t
642
642
643 def _findtags(self):
643 def _findtags(self):
644 '''Do the hard work of finding tags. Return a pair of dicts
644 '''Do the hard work of finding tags. Return a pair of dicts
645 (tags, tagtypes) where tags maps tag name to node, and tagtypes
645 (tags, tagtypes) where tags maps tag name to node, and tagtypes
646 maps tag name to a string like \'global\' or \'local\'.
646 maps tag name to a string like \'global\' or \'local\'.
647 Subclasses or extensions are free to add their own tags, but
647 Subclasses or extensions are free to add their own tags, but
648 should be aware that the returned dicts will be retained for the
648 should be aware that the returned dicts will be retained for the
649 duration of the localrepo object.'''
649 duration of the localrepo object.'''
650
650
651 # XXX what tagtype should subclasses/extensions use? Currently
651 # XXX what tagtype should subclasses/extensions use? Currently
652 # mq and bookmarks add tags, but do not set the tagtype at all.
652 # mq and bookmarks add tags, but do not set the tagtype at all.
653 # Should each extension invent its own tag type? Should there
653 # Should each extension invent its own tag type? Should there
654 # be one tagtype for all such "virtual" tags? Or is the status
654 # be one tagtype for all such "virtual" tags? Or is the status
655 # quo fine?
655 # quo fine?
656
656
657 alltags = {} # map tag name to (node, hist)
657 alltags = {} # map tag name to (node, hist)
658 tagtypes = {}
658 tagtypes = {}
659
659
660 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
660 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
661 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
661 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
662
662
663 # Build the return dicts. Have to re-encode tag names because
663 # Build the return dicts. Have to re-encode tag names because
664 # the tags module always uses UTF-8 (in order not to lose info
664 # the tags module always uses UTF-8 (in order not to lose info
665 # writing to the cache), but the rest of Mercurial wants them in
665 # writing to the cache), but the rest of Mercurial wants them in
666 # local encoding.
666 # local encoding.
667 tags = {}
667 tags = {}
668 for (name, (node, hist)) in alltags.iteritems():
668 for (name, (node, hist)) in alltags.iteritems():
669 if node != nullid:
669 if node != nullid:
670 tags[encoding.tolocal(name)] = node
670 tags[encoding.tolocal(name)] = node
671 tags['tip'] = self.changelog.tip()
671 tags['tip'] = self.changelog.tip()
672 tagtypes = dict([(encoding.tolocal(name), value)
672 tagtypes = dict([(encoding.tolocal(name), value)
673 for (name, value) in tagtypes.iteritems()])
673 for (name, value) in tagtypes.iteritems()])
674 return (tags, tagtypes)
674 return (tags, tagtypes)
675
675
676 def tagtype(self, tagname):
676 def tagtype(self, tagname):
677 '''
677 '''
678 return the type of the given tag. result can be:
678 return the type of the given tag. result can be:
679
679
680 'local' : a local tag
680 'local' : a local tag
681 'global' : a global tag
681 'global' : a global tag
682 None : tag does not exist
682 None : tag does not exist
683 '''
683 '''
684
684
685 return self._tagscache.tagtypes.get(tagname)
685 return self._tagscache.tagtypes.get(tagname)
686
686
687 def tagslist(self):
687 def tagslist(self):
688 '''return a list of tags ordered by revision'''
688 '''return a list of tags ordered by revision'''
689 if not self._tagscache.tagslist:
689 if not self._tagscache.tagslist:
690 l = []
690 l = []
691 for t, n in self.tags().iteritems():
691 for t, n in self.tags().iteritems():
692 l.append((self.changelog.rev(n), t, n))
692 l.append((self.changelog.rev(n), t, n))
693 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
693 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
694
694
695 return self._tagscache.tagslist
695 return self._tagscache.tagslist
696
696
697 def nodetags(self, node):
697 def nodetags(self, node):
698 '''return the tags associated with a node'''
698 '''return the tags associated with a node'''
699 if not self._tagscache.nodetagscache:
699 if not self._tagscache.nodetagscache:
700 nodetagscache = {}
700 nodetagscache = {}
701 for t, n in self._tagscache.tags.iteritems():
701 for t, n in self._tagscache.tags.iteritems():
702 nodetagscache.setdefault(n, []).append(t)
702 nodetagscache.setdefault(n, []).append(t)
703 for tags in nodetagscache.itervalues():
703 for tags in nodetagscache.itervalues():
704 tags.sort()
704 tags.sort()
705 self._tagscache.nodetagscache = nodetagscache
705 self._tagscache.nodetagscache = nodetagscache
706 return self._tagscache.nodetagscache.get(node, [])
706 return self._tagscache.nodetagscache.get(node, [])
707
707
708 def nodebookmarks(self, node):
708 def nodebookmarks(self, node):
709 marks = []
709 marks = []
710 for bookmark, n in self._bookmarks.iteritems():
710 for bookmark, n in self._bookmarks.iteritems():
711 if n == node:
711 if n == node:
712 marks.append(bookmark)
712 marks.append(bookmark)
713 return sorted(marks)
713 return sorted(marks)
714
714
715 def branchmap(self):
715 def branchmap(self):
716 '''returns a dictionary {branch: [branchheads]} with branchheads
716 '''returns a dictionary {branch: [branchheads]} with branchheads
717 ordered by increasing revision number'''
717 ordered by increasing revision number'''
718 branchmap.updatecache(self)
718 branchmap.updatecache(self)
719 return self._branchcaches[self.filtername]
719 return self._branchcaches[self.filtername]
720
720
721 def branchtip(self, branch):
721 def branchtip(self, branch):
722 '''return the tip node for a given branch'''
722 '''return the tip node for a given branch'''
723 try:
723 try:
724 return self.branchmap().branchtip(branch)
724 return self.branchmap().branchtip(branch)
725 except KeyError:
725 except KeyError:
726 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
726 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
727
727
728 def lookup(self, key):
728 def lookup(self, key):
729 return self[key].node()
729 return self[key].node()
730
730
731 def lookupbranch(self, key, remote=None):
731 def lookupbranch(self, key, remote=None):
732 repo = remote or self
732 repo = remote or self
733 if key in repo.branchmap():
733 if key in repo.branchmap():
734 return key
734 return key
735
735
736 repo = (remote and remote.local()) and remote or self
736 repo = (remote and remote.local()) and remote or self
737 return repo[key].branch()
737 return repo[key].branch()
738
738
739 def known(self, nodes):
739 def known(self, nodes):
740 nm = self.changelog.nodemap
740 nm = self.changelog.nodemap
741 pc = self._phasecache
741 pc = self._phasecache
742 result = []
742 result = []
743 for n in nodes:
743 for n in nodes:
744 r = nm.get(n)
744 r = nm.get(n)
745 resp = not (r is None or pc.phase(self, r) >= phases.secret)
745 resp = not (r is None or pc.phase(self, r) >= phases.secret)
746 result.append(resp)
746 result.append(resp)
747 return result
747 return result
748
748
749 def local(self):
749 def local(self):
750 return self
750 return self
751
751
752 def cancopy(self):
752 def cancopy(self):
753 # so statichttprepo's override of local() works
753 # so statichttprepo's override of local() works
754 if not self.local():
754 if not self.local():
755 return False
755 return False
756 if not self.ui.configbool('phases', 'publish', True):
756 if not self.ui.configbool('phases', 'publish', True):
757 return True
757 return True
758 # if publishing we can't copy if there is filtered content
758 # if publishing we can't copy if there is filtered content
759 return not self.filtered('visible').changelog.filteredrevs
759 return not self.filtered('visible').changelog.filteredrevs
760
760
761 def shared(self):
761 def shared(self):
762 '''the type of shared repository (None if not shared)'''
762 '''the type of shared repository (None if not shared)'''
763 if self.sharedpath != self.path:
763 if self.sharedpath != self.path:
764 return 'store'
764 return 'store'
765 return None
765 return None
766
766
767 def join(self, f, *insidef):
767 def join(self, f, *insidef):
768 return self.vfs.join(os.path.join(f, *insidef))
768 return self.vfs.join(os.path.join(f, *insidef))
769
769
770 def wjoin(self, f, *insidef):
770 def wjoin(self, f, *insidef):
771 return os.path.join(self.root, f, *insidef)
771 return os.path.join(self.root, f, *insidef)
772
772
773 def file(self, f):
773 def file(self, f):
774 if f[0] == '/':
774 if f[0] == '/':
775 f = f[1:]
775 f = f[1:]
776 return filelog.filelog(self.sopener, f)
776 return filelog.filelog(self.sopener, f)
777
777
778 def changectx(self, changeid):
778 def changectx(self, changeid):
779 return self[changeid]
779 return self[changeid]
780
780
781 def parents(self, changeid=None):
781 def parents(self, changeid=None):
782 '''get list of changectxs for parents of changeid'''
782 '''get list of changectxs for parents of changeid'''
783 return self[changeid].parents()
783 return self[changeid].parents()
784
784
785 def setparents(self, p1, p2=nullid):
785 def setparents(self, p1, p2=nullid):
786 self.dirstate.beginparentchange()
786 self.dirstate.beginparentchange()
787 copies = self.dirstate.setparents(p1, p2)
787 copies = self.dirstate.setparents(p1, p2)
788 pctx = self[p1]
788 pctx = self[p1]
789 if copies:
789 if copies:
790 # Adjust copy records, the dirstate cannot do it, it
790 # Adjust copy records, the dirstate cannot do it, it
791 # requires access to parents manifests. Preserve them
791 # requires access to parents manifests. Preserve them
792 # only for entries added to first parent.
792 # only for entries added to first parent.
793 for f in copies:
793 for f in copies:
794 if f not in pctx and copies[f] in pctx:
794 if f not in pctx and copies[f] in pctx:
795 self.dirstate.copy(copies[f], f)
795 self.dirstate.copy(copies[f], f)
796 if p2 == nullid:
796 if p2 == nullid:
797 for f, s in sorted(self.dirstate.copies().items()):
797 for f, s in sorted(self.dirstate.copies().items()):
798 if f not in pctx and s not in pctx:
798 if f not in pctx and s not in pctx:
799 self.dirstate.copy(None, f)
799 self.dirstate.copy(None, f)
800 self.dirstate.endparentchange()
800 self.dirstate.endparentchange()
801
801
802 def filectx(self, path, changeid=None, fileid=None):
802 def filectx(self, path, changeid=None, fileid=None):
803 """changeid can be a changeset revision, node, or tag.
803 """changeid can be a changeset revision, node, or tag.
804 fileid can be a file revision or node."""
804 fileid can be a file revision or node."""
805 return context.filectx(self, path, changeid, fileid)
805 return context.filectx(self, path, changeid, fileid)
806
806
807 def getcwd(self):
807 def getcwd(self):
808 return self.dirstate.getcwd()
808 return self.dirstate.getcwd()
809
809
810 def pathto(self, f, cwd=None):
810 def pathto(self, f, cwd=None):
811 return self.dirstate.pathto(f, cwd)
811 return self.dirstate.pathto(f, cwd)
812
812
813 def wfile(self, f, mode='r'):
813 def wfile(self, f, mode='r'):
814 return self.wopener(f, mode)
814 return self.wopener(f, mode)
815
815
816 def _link(self, f):
816 def _link(self, f):
817 return self.wvfs.islink(f)
817 return self.wvfs.islink(f)
818
818
819 def _loadfilter(self, filter):
819 def _loadfilter(self, filter):
820 if filter not in self.filterpats:
820 if filter not in self.filterpats:
821 l = []
821 l = []
822 for pat, cmd in self.ui.configitems(filter):
822 for pat, cmd in self.ui.configitems(filter):
823 if cmd == '!':
823 if cmd == '!':
824 continue
824 continue
825 mf = matchmod.match(self.root, '', [pat])
825 mf = matchmod.match(self.root, '', [pat])
826 fn = None
826 fn = None
827 params = cmd
827 params = cmd
828 for name, filterfn in self._datafilters.iteritems():
828 for name, filterfn in self._datafilters.iteritems():
829 if cmd.startswith(name):
829 if cmd.startswith(name):
830 fn = filterfn
830 fn = filterfn
831 params = cmd[len(name):].lstrip()
831 params = cmd[len(name):].lstrip()
832 break
832 break
833 if not fn:
833 if not fn:
834 fn = lambda s, c, **kwargs: util.filter(s, c)
834 fn = lambda s, c, **kwargs: util.filter(s, c)
835 # Wrap old filters not supporting keyword arguments
835 # Wrap old filters not supporting keyword arguments
836 if not inspect.getargspec(fn)[2]:
836 if not inspect.getargspec(fn)[2]:
837 oldfn = fn
837 oldfn = fn
838 fn = lambda s, c, **kwargs: oldfn(s, c)
838 fn = lambda s, c, **kwargs: oldfn(s, c)
839 l.append((mf, fn, params))
839 l.append((mf, fn, params))
840 self.filterpats[filter] = l
840 self.filterpats[filter] = l
841 return self.filterpats[filter]
841 return self.filterpats[filter]
842
842
843 def _filter(self, filterpats, filename, data):
843 def _filter(self, filterpats, filename, data):
844 for mf, fn, cmd in filterpats:
844 for mf, fn, cmd in filterpats:
845 if mf(filename):
845 if mf(filename):
846 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
846 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
847 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
847 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
848 break
848 break
849
849
850 return data
850 return data
851
851
852 @unfilteredpropertycache
852 @unfilteredpropertycache
853 def _encodefilterpats(self):
853 def _encodefilterpats(self):
854 return self._loadfilter('encode')
854 return self._loadfilter('encode')
855
855
856 @unfilteredpropertycache
856 @unfilteredpropertycache
857 def _decodefilterpats(self):
857 def _decodefilterpats(self):
858 return self._loadfilter('decode')
858 return self._loadfilter('decode')
859
859
860 def adddatafilter(self, name, filter):
860 def adddatafilter(self, name, filter):
861 self._datafilters[name] = filter
861 self._datafilters[name] = filter
862
862
863 def wread(self, filename):
863 def wread(self, filename):
864 if self._link(filename):
864 if self._link(filename):
865 data = self.wvfs.readlink(filename)
865 data = self.wvfs.readlink(filename)
866 else:
866 else:
867 data = self.wopener.read(filename)
867 data = self.wopener.read(filename)
868 return self._filter(self._encodefilterpats, filename, data)
868 return self._filter(self._encodefilterpats, filename, data)
869
869
870 def wwrite(self, filename, data, flags):
870 def wwrite(self, filename, data, flags):
871 data = self._filter(self._decodefilterpats, filename, data)
871 data = self._filter(self._decodefilterpats, filename, data)
872 if 'l' in flags:
872 if 'l' in flags:
873 self.wopener.symlink(data, filename)
873 self.wopener.symlink(data, filename)
874 else:
874 else:
875 self.wopener.write(filename, data)
875 self.wopener.write(filename, data)
876 if 'x' in flags:
876 if 'x' in flags:
877 self.wvfs.setflags(filename, False, True)
877 self.wvfs.setflags(filename, False, True)
878
878
879 def wwritedata(self, filename, data):
879 def wwritedata(self, filename, data):
880 return self._filter(self._decodefilterpats, filename, data)
880 return self._filter(self._decodefilterpats, filename, data)
881
881
882 def currenttransaction(self):
882 def currenttransaction(self):
883 """return the current transaction or None if non exists"""
883 """return the current transaction or None if non exists"""
884 tr = self._transref and self._transref() or None
884 tr = self._transref and self._transref() or None
885 if tr and tr.running():
885 if tr and tr.running():
886 return tr
886 return tr
887 return None
887 return None
888
888
889 def transaction(self, desc, report=None):
889 def transaction(self, desc, report=None):
890 tr = self.currenttransaction()
890 tr = self.currenttransaction()
891 if tr is not None:
891 if tr is not None:
892 return tr.nest()
892 return tr.nest()
893
893
894 # abort here if the journal already exists
894 # abort here if the journal already exists
895 if self.svfs.exists("journal"):
895 if self.svfs.exists("journal"):
896 raise error.RepoError(
896 raise error.RepoError(
897 _("abandoned transaction found"),
897 _("abandoned transaction found"),
898 hint=_("run 'hg recover' to clean up transaction"))
898 hint=_("run 'hg recover' to clean up transaction"))
899
899
900 self._writejournal(desc)
900 self._writejournal(desc)
901 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
901 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
902 rp = report and report or self.ui.warn
902 rp = report and report or self.ui.warn
903 vfsmap = {'plain': self.opener} # root of .hg/
903 vfsmap = {'plain': self.opener} # root of .hg/
904 tr = transaction.transaction(rp, self.sopener, vfsmap,
904 tr = transaction.transaction(rp, self.sopener, vfsmap,
905 "journal",
905 "journal",
906 aftertrans(renames),
906 aftertrans(renames),
907 self.store.createmode)
907 self.store.createmode)
908 # note: writing the fncache only during finalize mean that the file is
908 # note: writing the fncache only during finalize mean that the file is
909 # outdated when running hooks. As fncache is used for streaming clone,
909 # outdated when running hooks. As fncache is used for streaming clone,
910 # this is not expected to break anything that happen during the hooks.
910 # this is not expected to break anything that happen during the hooks.
911 tr.addfinalize('flush-fncache', self.store.write)
911 tr.addfinalize('flush-fncache', self.store.write)
912 self._transref = weakref.ref(tr)
912 self._transref = weakref.ref(tr)
913 return tr
913 return tr
914
914
915 def _journalfiles(self):
915 def _journalfiles(self):
916 return ((self.svfs, 'journal'),
916 return ((self.svfs, 'journal'),
917 (self.vfs, 'journal.dirstate'),
917 (self.vfs, 'journal.dirstate'),
918 (self.vfs, 'journal.branch'),
918 (self.vfs, 'journal.branch'),
919 (self.vfs, 'journal.desc'),
919 (self.vfs, 'journal.desc'),
920 (self.vfs, 'journal.bookmarks'),
920 (self.vfs, 'journal.bookmarks'),
921 (self.svfs, 'journal.phaseroots'))
921 (self.svfs, 'journal.phaseroots'))
922
922
923 def undofiles(self):
923 def undofiles(self):
924 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
924 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
925
925
926 def _writejournal(self, desc):
926 def _writejournal(self, desc):
927 self.opener.write("journal.dirstate",
927 self.opener.write("journal.dirstate",
928 self.opener.tryread("dirstate"))
928 self.opener.tryread("dirstate"))
929 self.opener.write("journal.branch",
929 self.opener.write("journal.branch",
930 encoding.fromlocal(self.dirstate.branch()))
930 encoding.fromlocal(self.dirstate.branch()))
931 self.opener.write("journal.desc",
931 self.opener.write("journal.desc",
932 "%d\n%s\n" % (len(self), desc))
932 "%d\n%s\n" % (len(self), desc))
933 self.opener.write("journal.bookmarks",
933 self.opener.write("journal.bookmarks",
934 self.opener.tryread("bookmarks"))
934 self.opener.tryread("bookmarks"))
935 self.sopener.write("journal.phaseroots",
935 self.sopener.write("journal.phaseroots",
936 self.sopener.tryread("phaseroots"))
936 self.sopener.tryread("phaseroots"))
937
937
938 def recover(self):
938 def recover(self):
939 lock = self.lock()
939 lock = self.lock()
940 try:
940 try:
941 if self.svfs.exists("journal"):
941 if self.svfs.exists("journal"):
942 self.ui.status(_("rolling back interrupted transaction\n"))
942 self.ui.status(_("rolling back interrupted transaction\n"))
943 vfsmap = {'': self.sopener,
943 vfsmap = {'': self.sopener,
944 'plain': self.opener,}
944 'plain': self.opener,}
945 transaction.rollback(self.sopener, vfsmap, "journal",
945 transaction.rollback(self.sopener, vfsmap, "journal",
946 self.ui.warn)
946 self.ui.warn)
947 self.invalidate()
947 self.invalidate()
948 return True
948 return True
949 else:
949 else:
950 self.ui.warn(_("no interrupted transaction available\n"))
950 self.ui.warn(_("no interrupted transaction available\n"))
951 return False
951 return False
952 finally:
952 finally:
953 lock.release()
953 lock.release()
954
954
955 def rollback(self, dryrun=False, force=False):
955 def rollback(self, dryrun=False, force=False):
956 wlock = lock = None
956 wlock = lock = None
957 try:
957 try:
958 wlock = self.wlock()
958 wlock = self.wlock()
959 lock = self.lock()
959 lock = self.lock()
960 if self.svfs.exists("undo"):
960 if self.svfs.exists("undo"):
961 return self._rollback(dryrun, force)
961 return self._rollback(dryrun, force)
962 else:
962 else:
963 self.ui.warn(_("no rollback information available\n"))
963 self.ui.warn(_("no rollback information available\n"))
964 return 1
964 return 1
965 finally:
965 finally:
966 release(lock, wlock)
966 release(lock, wlock)
967
967
968 @unfilteredmethod # Until we get smarter cache management
968 @unfilteredmethod # Until we get smarter cache management
969 def _rollback(self, dryrun, force):
969 def _rollback(self, dryrun, force):
970 ui = self.ui
970 ui = self.ui
971 try:
971 try:
972 args = self.opener.read('undo.desc').splitlines()
972 args = self.opener.read('undo.desc').splitlines()
973 (oldlen, desc, detail) = (int(args[0]), args[1], None)
973 (oldlen, desc, detail) = (int(args[0]), args[1], None)
974 if len(args) >= 3:
974 if len(args) >= 3:
975 detail = args[2]
975 detail = args[2]
976 oldtip = oldlen - 1
976 oldtip = oldlen - 1
977
977
978 if detail and ui.verbose:
978 if detail and ui.verbose:
979 msg = (_('repository tip rolled back to revision %s'
979 msg = (_('repository tip rolled back to revision %s'
980 ' (undo %s: %s)\n')
980 ' (undo %s: %s)\n')
981 % (oldtip, desc, detail))
981 % (oldtip, desc, detail))
982 else:
982 else:
983 msg = (_('repository tip rolled back to revision %s'
983 msg = (_('repository tip rolled back to revision %s'
984 ' (undo %s)\n')
984 ' (undo %s)\n')
985 % (oldtip, desc))
985 % (oldtip, desc))
986 except IOError:
986 except IOError:
987 msg = _('rolling back unknown transaction\n')
987 msg = _('rolling back unknown transaction\n')
988 desc = None
988 desc = None
989
989
990 if not force and self['.'] != self['tip'] and desc == 'commit':
990 if not force and self['.'] != self['tip'] and desc == 'commit':
991 raise util.Abort(
991 raise util.Abort(
992 _('rollback of last commit while not checked out '
992 _('rollback of last commit while not checked out '
993 'may lose data'), hint=_('use -f to force'))
993 'may lose data'), hint=_('use -f to force'))
994
994
995 ui.status(msg)
995 ui.status(msg)
996 if dryrun:
996 if dryrun:
997 return 0
997 return 0
998
998
999 parents = self.dirstate.parents()
999 parents = self.dirstate.parents()
1000 self.destroying()
1000 self.destroying()
1001 vfsmap = {'plain': self.opener}
1001 vfsmap = {'plain': self.opener}
1002 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
1002 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
1003 if self.vfs.exists('undo.bookmarks'):
1003 if self.vfs.exists('undo.bookmarks'):
1004 self.vfs.rename('undo.bookmarks', 'bookmarks')
1004 self.vfs.rename('undo.bookmarks', 'bookmarks')
1005 if self.svfs.exists('undo.phaseroots'):
1005 if self.svfs.exists('undo.phaseroots'):
1006 self.svfs.rename('undo.phaseroots', 'phaseroots')
1006 self.svfs.rename('undo.phaseroots', 'phaseroots')
1007 self.invalidate()
1007 self.invalidate()
1008
1008
1009 parentgone = (parents[0] not in self.changelog.nodemap or
1009 parentgone = (parents[0] not in self.changelog.nodemap or
1010 parents[1] not in self.changelog.nodemap)
1010 parents[1] not in self.changelog.nodemap)
1011 if parentgone:
1011 if parentgone:
1012 self.vfs.rename('undo.dirstate', 'dirstate')
1012 self.vfs.rename('undo.dirstate', 'dirstate')
1013 try:
1013 try:
1014 branch = self.opener.read('undo.branch')
1014 branch = self.opener.read('undo.branch')
1015 self.dirstate.setbranch(encoding.tolocal(branch))
1015 self.dirstate.setbranch(encoding.tolocal(branch))
1016 except IOError:
1016 except IOError:
1017 ui.warn(_('named branch could not be reset: '
1017 ui.warn(_('named branch could not be reset: '
1018 'current branch is still \'%s\'\n')
1018 'current branch is still \'%s\'\n')
1019 % self.dirstate.branch())
1019 % self.dirstate.branch())
1020
1020
1021 self.dirstate.invalidate()
1021 self.dirstate.invalidate()
1022 parents = tuple([p.rev() for p in self.parents()])
1022 parents = tuple([p.rev() for p in self.parents()])
1023 if len(parents) > 1:
1023 if len(parents) > 1:
1024 ui.status(_('working directory now based on '
1024 ui.status(_('working directory now based on '
1025 'revisions %d and %d\n') % parents)
1025 'revisions %d and %d\n') % parents)
1026 else:
1026 else:
1027 ui.status(_('working directory now based on '
1027 ui.status(_('working directory now based on '
1028 'revision %d\n') % parents)
1028 'revision %d\n') % parents)
1029 # TODO: if we know which new heads may result from this rollback, pass
1029 # TODO: if we know which new heads may result from this rollback, pass
1030 # them to destroy(), which will prevent the branchhead cache from being
1030 # them to destroy(), which will prevent the branchhead cache from being
1031 # invalidated.
1031 # invalidated.
1032 self.destroyed()
1032 self.destroyed()
1033 return 0
1033 return 0
1034
1034
1035 def invalidatecaches(self):
1035 def invalidatecaches(self):
1036
1036
1037 if '_tagscache' in vars(self):
1037 if '_tagscache' in vars(self):
1038 # can't use delattr on proxy
1038 # can't use delattr on proxy
1039 del self.__dict__['_tagscache']
1039 del self.__dict__['_tagscache']
1040
1040
1041 self.unfiltered()._branchcaches.clear()
1041 self.unfiltered()._branchcaches.clear()
1042 self.invalidatevolatilesets()
1042 self.invalidatevolatilesets()
1043
1043
1044 def invalidatevolatilesets(self):
1044 def invalidatevolatilesets(self):
1045 self.filteredrevcache.clear()
1045 self.filteredrevcache.clear()
1046 obsolete.clearobscaches(self)
1046 obsolete.clearobscaches(self)
1047
1047
1048 def invalidatedirstate(self):
1048 def invalidatedirstate(self):
1049 '''Invalidates the dirstate, causing the next call to dirstate
1049 '''Invalidates the dirstate, causing the next call to dirstate
1050 to check if it was modified since the last time it was read,
1050 to check if it was modified since the last time it was read,
1051 rereading it if it has.
1051 rereading it if it has.
1052
1052
1053 This is different to dirstate.invalidate() that it doesn't always
1053 This is different to dirstate.invalidate() that it doesn't always
1054 rereads the dirstate. Use dirstate.invalidate() if you want to
1054 rereads the dirstate. Use dirstate.invalidate() if you want to
1055 explicitly read the dirstate again (i.e. restoring it to a previous
1055 explicitly read the dirstate again (i.e. restoring it to a previous
1056 known good state).'''
1056 known good state).'''
1057 if hasunfilteredcache(self, 'dirstate'):
1057 if hasunfilteredcache(self, 'dirstate'):
1058 for k in self.dirstate._filecache:
1058 for k in self.dirstate._filecache:
1059 try:
1059 try:
1060 delattr(self.dirstate, k)
1060 delattr(self.dirstate, k)
1061 except AttributeError:
1061 except AttributeError:
1062 pass
1062 pass
1063 delattr(self.unfiltered(), 'dirstate')
1063 delattr(self.unfiltered(), 'dirstate')
1064
1064
1065 def invalidate(self):
1065 def invalidate(self):
1066 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1066 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1067 for k in self._filecache:
1067 for k in self._filecache:
1068 # dirstate is invalidated separately in invalidatedirstate()
1068 # dirstate is invalidated separately in invalidatedirstate()
1069 if k == 'dirstate':
1069 if k == 'dirstate':
1070 continue
1070 continue
1071
1071
1072 try:
1072 try:
1073 delattr(unfiltered, k)
1073 delattr(unfiltered, k)
1074 except AttributeError:
1074 except AttributeError:
1075 pass
1075 pass
1076 self.invalidatecaches()
1076 self.invalidatecaches()
1077 self.store.invalidatecaches()
1077 self.store.invalidatecaches()
1078
1078
1079 def invalidateall(self):
1079 def invalidateall(self):
1080 '''Fully invalidates both store and non-store parts, causing the
1080 '''Fully invalidates both store and non-store parts, causing the
1081 subsequent operation to reread any outside changes.'''
1081 subsequent operation to reread any outside changes.'''
1082 # extension should hook this to invalidate its caches
1082 # extension should hook this to invalidate its caches
1083 self.invalidate()
1083 self.invalidate()
1084 self.invalidatedirstate()
1084 self.invalidatedirstate()
1085
1085
1086 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1086 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1087 try:
1087 try:
1088 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1088 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1089 except error.LockHeld, inst:
1089 except error.LockHeld, inst:
1090 if not wait:
1090 if not wait:
1091 raise
1091 raise
1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1093 (desc, inst.locker))
1093 (desc, inst.locker))
1094 # default to 600 seconds timeout
1094 # default to 600 seconds timeout
1095 l = lockmod.lock(vfs, lockname,
1095 l = lockmod.lock(vfs, lockname,
1096 int(self.ui.config("ui", "timeout", "600")),
1096 int(self.ui.config("ui", "timeout", "600")),
1097 releasefn, desc=desc)
1097 releasefn, desc=desc)
1098 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1098 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1099 if acquirefn:
1099 if acquirefn:
1100 acquirefn()
1100 acquirefn()
1101 return l
1101 return l
1102
1102
1103 def _afterlock(self, callback):
1103 def _afterlock(self, callback):
1104 """add a callback to the current repository lock.
1104 """add a callback to the current repository lock.
1105
1105
1106 The callback will be executed on lock release."""
1106 The callback will be executed on lock release."""
1107 l = self._lockref and self._lockref()
1107 l = self._lockref and self._lockref()
1108 if l:
1108 if l:
1109 l.postrelease.append(callback)
1109 l.postrelease.append(callback)
1110 else:
1110 else:
1111 callback()
1111 callback()
1112
1112
1113 def lock(self, wait=True):
1113 def lock(self, wait=True):
1114 '''Lock the repository store (.hg/store) and return a weak reference
1114 '''Lock the repository store (.hg/store) and return a weak reference
1115 to the lock. Use this before modifying the store (e.g. committing or
1115 to the lock. Use this before modifying the store (e.g. committing or
1116 stripping). If you are opening a transaction, get a lock as well.)'''
1116 stripping). If you are opening a transaction, get a lock as well.)'''
1117 l = self._lockref and self._lockref()
1117 l = self._lockref and self._lockref()
1118 if l is not None and l.held:
1118 if l is not None and l.held:
1119 l.lock()
1119 l.lock()
1120 return l
1120 return l
1121
1121
1122 def unlock():
1122 def unlock():
1123 for k, ce in self._filecache.items():
1123 for k, ce in self._filecache.items():
1124 if k == 'dirstate' or k not in self.__dict__:
1124 if k == 'dirstate' or k not in self.__dict__:
1125 continue
1125 continue
1126 ce.refresh()
1126 ce.refresh()
1127
1127
1128 l = self._lock(self.svfs, "lock", wait, unlock,
1128 l = self._lock(self.svfs, "lock", wait, unlock,
1129 self.invalidate, _('repository %s') % self.origroot)
1129 self.invalidate, _('repository %s') % self.origroot)
1130 self._lockref = weakref.ref(l)
1130 self._lockref = weakref.ref(l)
1131 return l
1131 return l
1132
1132
1133 def wlock(self, wait=True):
1133 def wlock(self, wait=True):
1134 '''Lock the non-store parts of the repository (everything under
1134 '''Lock the non-store parts of the repository (everything under
1135 .hg except .hg/store) and return a weak reference to the lock.
1135 .hg except .hg/store) and return a weak reference to the lock.
1136 Use this before modifying files in .hg.'''
1136 Use this before modifying files in .hg.'''
1137 l = self._wlockref and self._wlockref()
1137 l = self._wlockref and self._wlockref()
1138 if l is not None and l.held:
1138 if l is not None and l.held:
1139 l.lock()
1139 l.lock()
1140 return l
1140 return l
1141
1141
1142 def unlock():
1142 def unlock():
1143 if self.dirstate.pendingparentchange():
1143 if self.dirstate.pendingparentchange():
1144 self.dirstate.invalidate()
1144 self.dirstate.invalidate()
1145 else:
1145 else:
1146 self.dirstate.write()
1146 self.dirstate.write()
1147
1147
1148 self._filecache['dirstate'].refresh()
1148 self._filecache['dirstate'].refresh()
1149
1149
1150 l = self._lock(self.vfs, "wlock", wait, unlock,
1150 l = self._lock(self.vfs, "wlock", wait, unlock,
1151 self.invalidatedirstate, _('working directory of %s') %
1151 self.invalidatedirstate, _('working directory of %s') %
1152 self.origroot)
1152 self.origroot)
1153 self._wlockref = weakref.ref(l)
1153 self._wlockref = weakref.ref(l)
1154 return l
1154 return l
1155
1155
1156 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1156 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1157 """
1157 """
1158 commit an individual file as part of a larger transaction
1158 commit an individual file as part of a larger transaction
1159 """
1159 """
1160
1160
1161 fname = fctx.path()
1161 fname = fctx.path()
1162 text = fctx.data()
1162 text = fctx.data()
1163 flog = self.file(fname)
1163 flog = self.file(fname)
1164 fparent1 = manifest1.get(fname, nullid)
1164 fparent1 = manifest1.get(fname, nullid)
1165 fparent2 = manifest2.get(fname, nullid)
1165 fparent2 = manifest2.get(fname, nullid)
1166
1166
1167 meta = {}
1167 meta = {}
1168 copy = fctx.renamed()
1168 copy = fctx.renamed()
1169 if copy and copy[0] != fname:
1169 if copy and copy[0] != fname:
1170 # Mark the new revision of this file as a copy of another
1170 # Mark the new revision of this file as a copy of another
1171 # file. This copy data will effectively act as a parent
1171 # file. This copy data will effectively act as a parent
1172 # of this new revision. If this is a merge, the first
1172 # of this new revision. If this is a merge, the first
1173 # parent will be the nullid (meaning "look up the copy data")
1173 # parent will be the nullid (meaning "look up the copy data")
1174 # and the second one will be the other parent. For example:
1174 # and the second one will be the other parent. For example:
1175 #
1175 #
1176 # 0 --- 1 --- 3 rev1 changes file foo
1176 # 0 --- 1 --- 3 rev1 changes file foo
1177 # \ / rev2 renames foo to bar and changes it
1177 # \ / rev2 renames foo to bar and changes it
1178 # \- 2 -/ rev3 should have bar with all changes and
1178 # \- 2 -/ rev3 should have bar with all changes and
1179 # should record that bar descends from
1179 # should record that bar descends from
1180 # bar in rev2 and foo in rev1
1180 # bar in rev2 and foo in rev1
1181 #
1181 #
1182 # this allows this merge to succeed:
1182 # this allows this merge to succeed:
1183 #
1183 #
1184 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1184 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1185 # \ / merging rev3 and rev4 should use bar@rev2
1185 # \ / merging rev3 and rev4 should use bar@rev2
1186 # \- 2 --- 4 as the merge base
1186 # \- 2 --- 4 as the merge base
1187 #
1187 #
1188
1188
1189 cfname = copy[0]
1189 cfname = copy[0]
1190 crev = manifest1.get(cfname)
1190 crev = manifest1.get(cfname)
1191 newfparent = fparent2
1191 newfparent = fparent2
1192
1192
1193 if manifest2: # branch merge
1193 if manifest2: # branch merge
1194 if fparent2 == nullid or crev is None: # copied on remote side
1194 if fparent2 == nullid or crev is None: # copied on remote side
1195 if cfname in manifest2:
1195 if cfname in manifest2:
1196 crev = manifest2[cfname]
1196 crev = manifest2[cfname]
1197 newfparent = fparent1
1197 newfparent = fparent1
1198
1198
1199 # find source in nearest ancestor if we've lost track
1199 # find source in nearest ancestor if we've lost track
1200 if not crev:
1200 if not crev:
1201 self.ui.debug(" %s: searching for copy revision for %s\n" %
1201 self.ui.debug(" %s: searching for copy revision for %s\n" %
1202 (fname, cfname))
1202 (fname, cfname))
1203 for ancestor in self[None].ancestors():
1203 for ancestor in self[None].ancestors():
1204 if cfname in ancestor:
1204 if cfname in ancestor:
1205 crev = ancestor[cfname].filenode()
1205 crev = ancestor[cfname].filenode()
1206 break
1206 break
1207
1207
1208 if crev:
1208 if crev:
1209 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1209 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1210 meta["copy"] = cfname
1210 meta["copy"] = cfname
1211 meta["copyrev"] = hex(crev)
1211 meta["copyrev"] = hex(crev)
1212 fparent1, fparent2 = nullid, newfparent
1212 fparent1, fparent2 = nullid, newfparent
1213 else:
1213 else:
1214 self.ui.warn(_("warning: can't find ancestor for '%s' "
1214 self.ui.warn(_("warning: can't find ancestor for '%s' "
1215 "copied from '%s'!\n") % (fname, cfname))
1215 "copied from '%s'!\n") % (fname, cfname))
1216
1216
1217 elif fparent1 == nullid:
1217 elif fparent1 == nullid:
1218 fparent1, fparent2 = fparent2, nullid
1218 fparent1, fparent2 = fparent2, nullid
1219 elif fparent2 != nullid:
1219 elif fparent2 != nullid:
1220 # is one parent an ancestor of the other?
1220 # is one parent an ancestor of the other?
1221 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1221 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1222 if fparent1 in fparentancestors:
1222 if fparent1 in fparentancestors:
1223 fparent1, fparent2 = fparent2, nullid
1223 fparent1, fparent2 = fparent2, nullid
1224 elif fparent2 in fparentancestors:
1224 elif fparent2 in fparentancestors:
1225 fparent2 = nullid
1225 fparent2 = nullid
1226
1226
1227 # is the file changed?
1227 # is the file changed?
1228 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1228 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1229 changelist.append(fname)
1229 changelist.append(fname)
1230 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1230 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1231 # are just the flags changed during merge?
1231 # are just the flags changed during merge?
1232 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1232 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1233 changelist.append(fname)
1233 changelist.append(fname)
1234
1234
1235 return fparent1
1235 return fparent1
1236
1236
1237 @unfilteredmethod
1237 @unfilteredmethod
1238 def commit(self, text="", user=None, date=None, match=None, force=False,
1238 def commit(self, text="", user=None, date=None, match=None, force=False,
1239 editor=False, extra={}):
1239 editor=False, extra={}):
1240 """Add a new revision to current repository.
1240 """Add a new revision to current repository.
1241
1241
1242 Revision information is gathered from the working directory,
1242 Revision information is gathered from the working directory,
1243 match can be used to filter the committed files. If editor is
1243 match can be used to filter the committed files. If editor is
1244 supplied, it is called to get a commit message.
1244 supplied, it is called to get a commit message.
1245 """
1245 """
1246
1246
1247 def fail(f, msg):
1247 def fail(f, msg):
1248 raise util.Abort('%s: %s' % (f, msg))
1248 raise util.Abort('%s: %s' % (f, msg))
1249
1249
1250 if not match:
1250 if not match:
1251 match = matchmod.always(self.root, '')
1251 match = matchmod.always(self.root, '')
1252
1252
1253 if not force:
1253 if not force:
1254 vdirs = []
1254 vdirs = []
1255 match.explicitdir = vdirs.append
1255 match.explicitdir = vdirs.append
1256 match.bad = fail
1256 match.bad = fail
1257
1257
1258 wlock = self.wlock()
1258 wlock = self.wlock()
1259 try:
1259 try:
1260 wctx = self[None]
1260 wctx = self[None]
1261 merge = len(wctx.parents()) > 1
1261 merge = len(wctx.parents()) > 1
1262
1262
1263 if (not force and merge and match and
1263 if (not force and merge and match and
1264 (match.files() or match.anypats())):
1264 (match.files() or match.anypats())):
1265 raise util.Abort(_('cannot partially commit a merge '
1265 raise util.Abort(_('cannot partially commit a merge '
1266 '(do not specify files or patterns)'))
1266 '(do not specify files or patterns)'))
1267
1267
1268 status = self.status(match=match, clean=force)
1268 status = self.status(match=match, clean=force)
1269 if force:
1269 if force:
1270 status.modified.extend(status.clean) # mq may commit clean files
1270 status.modified.extend(status.clean) # mq may commit clean files
1271
1271
1272 # check subrepos
1272 # check subrepos
1273 subs = []
1273 subs = []
1274 commitsubs = set()
1274 commitsubs = set()
1275 newstate = wctx.substate.copy()
1275 newstate = wctx.substate.copy()
1276 # only manage subrepos and .hgsubstate if .hgsub is present
1276 # only manage subrepos and .hgsubstate if .hgsub is present
1277 if '.hgsub' in wctx:
1277 if '.hgsub' in wctx:
1278 # we'll decide whether to track this ourselves, thanks
1278 # we'll decide whether to track this ourselves, thanks
1279 for c in status.modified, status.added, status.removed:
1279 for c in status.modified, status.added, status.removed:
1280 if '.hgsubstate' in c:
1280 if '.hgsubstate' in c:
1281 c.remove('.hgsubstate')
1281 c.remove('.hgsubstate')
1282
1282
1283 # compare current state to last committed state
1283 # compare current state to last committed state
1284 # build new substate based on last committed state
1284 # build new substate based on last committed state
1285 oldstate = wctx.p1().substate
1285 oldstate = wctx.p1().substate
1286 for s in sorted(newstate.keys()):
1286 for s in sorted(newstate.keys()):
1287 if not match(s):
1287 if not match(s):
1288 # ignore working copy, use old state if present
1288 # ignore working copy, use old state if present
1289 if s in oldstate:
1289 if s in oldstate:
1290 newstate[s] = oldstate[s]
1290 newstate[s] = oldstate[s]
1291 continue
1291 continue
1292 if not force:
1292 if not force:
1293 raise util.Abort(
1293 raise util.Abort(
1294 _("commit with new subrepo %s excluded") % s)
1294 _("commit with new subrepo %s excluded") % s)
1295 if wctx.sub(s).dirty(True):
1295 if wctx.sub(s).dirty(True):
1296 if not self.ui.configbool('ui', 'commitsubrepos'):
1296 if not self.ui.configbool('ui', 'commitsubrepos'):
1297 raise util.Abort(
1297 raise util.Abort(
1298 _("uncommitted changes in subrepo %s") % s,
1298 _("uncommitted changes in subrepo %s") % s,
1299 hint=_("use --subrepos for recursive commit"))
1299 hint=_("use --subrepos for recursive commit"))
1300 subs.append(s)
1300 subs.append(s)
1301 commitsubs.add(s)
1301 commitsubs.add(s)
1302 else:
1302 else:
1303 bs = wctx.sub(s).basestate()
1303 bs = wctx.sub(s).basestate()
1304 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1304 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1305 if oldstate.get(s, (None, None, None))[1] != bs:
1305 if oldstate.get(s, (None, None, None))[1] != bs:
1306 subs.append(s)
1306 subs.append(s)
1307
1307
1308 # check for removed subrepos
1308 # check for removed subrepos
1309 for p in wctx.parents():
1309 for p in wctx.parents():
1310 r = [s for s in p.substate if s not in newstate]
1310 r = [s for s in p.substate if s not in newstate]
1311 subs += [s for s in r if match(s)]
1311 subs += [s for s in r if match(s)]
1312 if subs:
1312 if subs:
1313 if (not match('.hgsub') and
1313 if (not match('.hgsub') and
1314 '.hgsub' in (wctx.modified() + wctx.added())):
1314 '.hgsub' in (wctx.modified() + wctx.added())):
1315 raise util.Abort(
1315 raise util.Abort(
1316 _("can't commit subrepos without .hgsub"))
1316 _("can't commit subrepos without .hgsub"))
1317 status.modified.insert(0, '.hgsubstate')
1317 status.modified.insert(0, '.hgsubstate')
1318
1318
1319 elif '.hgsub' in status.removed:
1319 elif '.hgsub' in status.removed:
1320 # clean up .hgsubstate when .hgsub is removed
1320 # clean up .hgsubstate when .hgsub is removed
1321 if ('.hgsubstate' in wctx and
1321 if ('.hgsubstate' in wctx and
1322 '.hgsubstate' not in (status.modified + status.added +
1322 '.hgsubstate' not in (status.modified + status.added +
1323 status.removed)):
1323 status.removed)):
1324 status.removed.insert(0, '.hgsubstate')
1324 status.removed.insert(0, '.hgsubstate')
1325
1325
1326 # make sure all explicit patterns are matched
1326 # make sure all explicit patterns are matched
1327 if not force and match.files():
1327 if not force and match.files():
1328 matched = set(status.modified + status.added + status.removed)
1328 matched = set(status.modified + status.added + status.removed)
1329
1329
1330 for f in match.files():
1330 for f in match.files():
1331 f = self.dirstate.normalize(f)
1331 f = self.dirstate.normalize(f)
1332 if f == '.' or f in matched or f in wctx.substate:
1332 if f == '.' or f in matched or f in wctx.substate:
1333 continue
1333 continue
1334 if f in status.deleted:
1334 if f in status.deleted:
1335 fail(f, _('file not found!'))
1335 fail(f, _('file not found!'))
1336 if f in vdirs: # visited directory
1336 if f in vdirs: # visited directory
1337 d = f + '/'
1337 d = f + '/'
1338 for mf in matched:
1338 for mf in matched:
1339 if mf.startswith(d):
1339 if mf.startswith(d):
1340 break
1340 break
1341 else:
1341 else:
1342 fail(f, _("no match under directory!"))
1342 fail(f, _("no match under directory!"))
1343 elif f not in self.dirstate:
1343 elif f not in self.dirstate:
1344 fail(f, _("file not tracked!"))
1344 fail(f, _("file not tracked!"))
1345
1345
1346 cctx = context.workingctx(self, text, user, date, extra, status)
1346 cctx = context.workingcommitctx(self, status,
1347 text, user, date, extra)
1347
1348
1348 if (not force and not extra.get("close") and not merge
1349 if (not force and not extra.get("close") and not merge
1349 and not cctx.files()
1350 and not cctx.files()
1350 and wctx.branch() == wctx.p1().branch()):
1351 and wctx.branch() == wctx.p1().branch()):
1351 return None
1352 return None
1352
1353
1353 if merge and cctx.deleted():
1354 if merge and cctx.deleted():
1354 raise util.Abort(_("cannot commit merge with missing files"))
1355 raise util.Abort(_("cannot commit merge with missing files"))
1355
1356
1356 ms = mergemod.mergestate(self)
1357 ms = mergemod.mergestate(self)
1357 for f in status.modified:
1358 for f in status.modified:
1358 if f in ms and ms[f] == 'u':
1359 if f in ms and ms[f] == 'u':
1359 raise util.Abort(_("unresolved merge conflicts "
1360 raise util.Abort(_("unresolved merge conflicts "
1360 "(see hg help resolve)"))
1361 "(see hg help resolve)"))
1361
1362
1362 if editor:
1363 if editor:
1363 cctx._text = editor(self, cctx, subs)
1364 cctx._text = editor(self, cctx, subs)
1364 edited = (text != cctx._text)
1365 edited = (text != cctx._text)
1365
1366
1366 # Save commit message in case this transaction gets rolled back
1367 # Save commit message in case this transaction gets rolled back
1367 # (e.g. by a pretxncommit hook). Leave the content alone on
1368 # (e.g. by a pretxncommit hook). Leave the content alone on
1368 # the assumption that the user will use the same editor again.
1369 # the assumption that the user will use the same editor again.
1369 msgfn = self.savecommitmessage(cctx._text)
1370 msgfn = self.savecommitmessage(cctx._text)
1370
1371
1371 # commit subs and write new state
1372 # commit subs and write new state
1372 if subs:
1373 if subs:
1373 for s in sorted(commitsubs):
1374 for s in sorted(commitsubs):
1374 sub = wctx.sub(s)
1375 sub = wctx.sub(s)
1375 self.ui.status(_('committing subrepository %s\n') %
1376 self.ui.status(_('committing subrepository %s\n') %
1376 subrepo.subrelpath(sub))
1377 subrepo.subrelpath(sub))
1377 sr = sub.commit(cctx._text, user, date)
1378 sr = sub.commit(cctx._text, user, date)
1378 newstate[s] = (newstate[s][0], sr)
1379 newstate[s] = (newstate[s][0], sr)
1379 subrepo.writestate(self, newstate)
1380 subrepo.writestate(self, newstate)
1380
1381
1381 p1, p2 = self.dirstate.parents()
1382 p1, p2 = self.dirstate.parents()
1382 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1383 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1383 try:
1384 try:
1384 self.hook("precommit", throw=True, parent1=hookp1,
1385 self.hook("precommit", throw=True, parent1=hookp1,
1385 parent2=hookp2)
1386 parent2=hookp2)
1386 ret = self.commitctx(cctx, True)
1387 ret = self.commitctx(cctx, True)
1387 except: # re-raises
1388 except: # re-raises
1388 if edited:
1389 if edited:
1389 self.ui.write(
1390 self.ui.write(
1390 _('note: commit message saved in %s\n') % msgfn)
1391 _('note: commit message saved in %s\n') % msgfn)
1391 raise
1392 raise
1392
1393
1393 # update bookmarks, dirstate and mergestate
1394 # update bookmarks, dirstate and mergestate
1394 bookmarks.update(self, [p1, p2], ret)
1395 bookmarks.update(self, [p1, p2], ret)
1395 cctx.markcommitted(ret)
1396 cctx.markcommitted(ret)
1396 ms.reset()
1397 ms.reset()
1397 finally:
1398 finally:
1398 wlock.release()
1399 wlock.release()
1399
1400
1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1401 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1401 # hack for command that use a temporary commit (eg: histedit)
1402 # hack for command that use a temporary commit (eg: histedit)
1402 # temporary commit got stripped before hook release
1403 # temporary commit got stripped before hook release
1403 if node in self:
1404 if node in self:
1404 self.hook("commit", node=node, parent1=parent1,
1405 self.hook("commit", node=node, parent1=parent1,
1405 parent2=parent2)
1406 parent2=parent2)
1406 self._afterlock(commithook)
1407 self._afterlock(commithook)
1407 return ret
1408 return ret
1408
1409
1409 @unfilteredmethod
1410 @unfilteredmethod
1410 def commitctx(self, ctx, error=False):
1411 def commitctx(self, ctx, error=False):
1411 """Add a new revision to current repository.
1412 """Add a new revision to current repository.
1412 Revision information is passed via the context argument.
1413 Revision information is passed via the context argument.
1413 """
1414 """
1414
1415
1415 tr = None
1416 tr = None
1416 p1, p2 = ctx.p1(), ctx.p2()
1417 p1, p2 = ctx.p1(), ctx.p2()
1417 user = ctx.user()
1418 user = ctx.user()
1418
1419
1419 lock = self.lock()
1420 lock = self.lock()
1420 try:
1421 try:
1421 tr = self.transaction("commit")
1422 tr = self.transaction("commit")
1422 trp = weakref.proxy(tr)
1423 trp = weakref.proxy(tr)
1423
1424
1424 if ctx.files():
1425 if ctx.files():
1425 m1 = p1.manifest()
1426 m1 = p1.manifest()
1426 m2 = p2.manifest()
1427 m2 = p2.manifest()
1427 m = m1.copy()
1428 m = m1.copy()
1428
1429
1429 # check in files
1430 # check in files
1430 added = []
1431 added = []
1431 changed = []
1432 changed = []
1432 removed = list(ctx.removed())
1433 removed = list(ctx.removed())
1433 linkrev = len(self)
1434 linkrev = len(self)
1434 for f in sorted(ctx.modified() + ctx.added()):
1435 for f in sorted(ctx.modified() + ctx.added()):
1435 self.ui.note(f + "\n")
1436 self.ui.note(f + "\n")
1436 try:
1437 try:
1437 fctx = ctx[f]
1438 fctx = ctx[f]
1438 if fctx is None:
1439 if fctx is None:
1439 removed.append(f)
1440 removed.append(f)
1440 else:
1441 else:
1441 added.append(f)
1442 added.append(f)
1442 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1443 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1443 trp, changed)
1444 trp, changed)
1444 m.setflag(f, fctx.flags())
1445 m.setflag(f, fctx.flags())
1445 except OSError, inst:
1446 except OSError, inst:
1446 self.ui.warn(_("trouble committing %s!\n") % f)
1447 self.ui.warn(_("trouble committing %s!\n") % f)
1447 raise
1448 raise
1448 except IOError, inst:
1449 except IOError, inst:
1449 errcode = getattr(inst, 'errno', errno.ENOENT)
1450 errcode = getattr(inst, 'errno', errno.ENOENT)
1450 if error or errcode and errcode != errno.ENOENT:
1451 if error or errcode and errcode != errno.ENOENT:
1451 self.ui.warn(_("trouble committing %s!\n") % f)
1452 self.ui.warn(_("trouble committing %s!\n") % f)
1452 raise
1453 raise
1453
1454
1454 # update manifest
1455 # update manifest
1455 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1456 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1456 drop = [f for f in removed if f in m]
1457 drop = [f for f in removed if f in m]
1457 for f in drop:
1458 for f in drop:
1458 del m[f]
1459 del m[f]
1459 mn = self.manifest.add(m, trp, linkrev,
1460 mn = self.manifest.add(m, trp, linkrev,
1460 p1.manifestnode(), p2.manifestnode(),
1461 p1.manifestnode(), p2.manifestnode(),
1461 added, drop)
1462 added, drop)
1462 files = changed + removed
1463 files = changed + removed
1463 else:
1464 else:
1464 mn = p1.manifestnode()
1465 mn = p1.manifestnode()
1465 files = []
1466 files = []
1466
1467
1467 # update changelog
1468 # update changelog
1468 self.changelog.delayupdate(tr)
1469 self.changelog.delayupdate(tr)
1469 n = self.changelog.add(mn, files, ctx.description(),
1470 n = self.changelog.add(mn, files, ctx.description(),
1470 trp, p1.node(), p2.node(),
1471 trp, p1.node(), p2.node(),
1471 user, ctx.date(), ctx.extra().copy())
1472 user, ctx.date(), ctx.extra().copy())
1472 p = lambda: tr.writepending() and self.root or ""
1473 p = lambda: tr.writepending() and self.root or ""
1473 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1474 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1474 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1475 parent2=xp2, pending=p)
1476 parent2=xp2, pending=p)
1476 # set the new commit is proper phase
1477 # set the new commit is proper phase
1477 targetphase = subrepo.newcommitphase(self.ui, ctx)
1478 targetphase = subrepo.newcommitphase(self.ui, ctx)
1478 if targetphase:
1479 if targetphase:
1479 # retract boundary do not alter parent changeset.
1480 # retract boundary do not alter parent changeset.
1480 # if a parent have higher the resulting phase will
1481 # if a parent have higher the resulting phase will
1481 # be compliant anyway
1482 # be compliant anyway
1482 #
1483 #
1483 # if minimal phase was 0 we don't need to retract anything
1484 # if minimal phase was 0 we don't need to retract anything
1484 phases.retractboundary(self, tr, targetphase, [n])
1485 phases.retractboundary(self, tr, targetphase, [n])
1485 tr.close()
1486 tr.close()
1486 branchmap.updatecache(self.filtered('served'))
1487 branchmap.updatecache(self.filtered('served'))
1487 return n
1488 return n
1488 finally:
1489 finally:
1489 if tr:
1490 if tr:
1490 tr.release()
1491 tr.release()
1491 lock.release()
1492 lock.release()
1492
1493
1493 @unfilteredmethod
1494 @unfilteredmethod
1494 def destroying(self):
1495 def destroying(self):
1495 '''Inform the repository that nodes are about to be destroyed.
1496 '''Inform the repository that nodes are about to be destroyed.
1496 Intended for use by strip and rollback, so there's a common
1497 Intended for use by strip and rollback, so there's a common
1497 place for anything that has to be done before destroying history.
1498 place for anything that has to be done before destroying history.
1498
1499
1499 This is mostly useful for saving state that is in memory and waiting
1500 This is mostly useful for saving state that is in memory and waiting
1500 to be flushed when the current lock is released. Because a call to
1501 to be flushed when the current lock is released. Because a call to
1501 destroyed is imminent, the repo will be invalidated causing those
1502 destroyed is imminent, the repo will be invalidated causing those
1502 changes to stay in memory (waiting for the next unlock), or vanish
1503 changes to stay in memory (waiting for the next unlock), or vanish
1503 completely.
1504 completely.
1504 '''
1505 '''
1505 # When using the same lock to commit and strip, the phasecache is left
1506 # When using the same lock to commit and strip, the phasecache is left
1506 # dirty after committing. Then when we strip, the repo is invalidated,
1507 # dirty after committing. Then when we strip, the repo is invalidated,
1507 # causing those changes to disappear.
1508 # causing those changes to disappear.
1508 if '_phasecache' in vars(self):
1509 if '_phasecache' in vars(self):
1509 self._phasecache.write()
1510 self._phasecache.write()
1510
1511
1511 @unfilteredmethod
1512 @unfilteredmethod
1512 def destroyed(self):
1513 def destroyed(self):
1513 '''Inform the repository that nodes have been destroyed.
1514 '''Inform the repository that nodes have been destroyed.
1514 Intended for use by strip and rollback, so there's a common
1515 Intended for use by strip and rollback, so there's a common
1515 place for anything that has to be done after destroying history.
1516 place for anything that has to be done after destroying history.
1516 '''
1517 '''
1517 # When one tries to:
1518 # When one tries to:
1518 # 1) destroy nodes thus calling this method (e.g. strip)
1519 # 1) destroy nodes thus calling this method (e.g. strip)
1519 # 2) use phasecache somewhere (e.g. commit)
1520 # 2) use phasecache somewhere (e.g. commit)
1520 #
1521 #
1521 # then 2) will fail because the phasecache contains nodes that were
1522 # then 2) will fail because the phasecache contains nodes that were
1522 # removed. We can either remove phasecache from the filecache,
1523 # removed. We can either remove phasecache from the filecache,
1523 # causing it to reload next time it is accessed, or simply filter
1524 # causing it to reload next time it is accessed, or simply filter
1524 # the removed nodes now and write the updated cache.
1525 # the removed nodes now and write the updated cache.
1525 self._phasecache.filterunknown(self)
1526 self._phasecache.filterunknown(self)
1526 self._phasecache.write()
1527 self._phasecache.write()
1527
1528
1528 # update the 'served' branch cache to help read only server process
1529 # update the 'served' branch cache to help read only server process
1529 # Thanks to branchcache collaboration this is done from the nearest
1530 # Thanks to branchcache collaboration this is done from the nearest
1530 # filtered subset and it is expected to be fast.
1531 # filtered subset and it is expected to be fast.
1531 branchmap.updatecache(self.filtered('served'))
1532 branchmap.updatecache(self.filtered('served'))
1532
1533
1533 # Ensure the persistent tag cache is updated. Doing it now
1534 # Ensure the persistent tag cache is updated. Doing it now
1534 # means that the tag cache only has to worry about destroyed
1535 # means that the tag cache only has to worry about destroyed
1535 # heads immediately after a strip/rollback. That in turn
1536 # heads immediately after a strip/rollback. That in turn
1536 # guarantees that "cachetip == currenttip" (comparing both rev
1537 # guarantees that "cachetip == currenttip" (comparing both rev
1537 # and node) always means no nodes have been added or destroyed.
1538 # and node) always means no nodes have been added or destroyed.
1538
1539
1539 # XXX this is suboptimal when qrefresh'ing: we strip the current
1540 # XXX this is suboptimal when qrefresh'ing: we strip the current
1540 # head, refresh the tag cache, then immediately add a new head.
1541 # head, refresh the tag cache, then immediately add a new head.
1541 # But I think doing it this way is necessary for the "instant
1542 # But I think doing it this way is necessary for the "instant
1542 # tag cache retrieval" case to work.
1543 # tag cache retrieval" case to work.
1543 self.invalidate()
1544 self.invalidate()
1544
1545
1545 def walk(self, match, node=None):
1546 def walk(self, match, node=None):
1546 '''
1547 '''
1547 walk recursively through the directory tree or a given
1548 walk recursively through the directory tree or a given
1548 changeset, finding all files matched by the match
1549 changeset, finding all files matched by the match
1549 function
1550 function
1550 '''
1551 '''
1551 return self[node].walk(match)
1552 return self[node].walk(match)
1552
1553
1553 def status(self, node1='.', node2=None, match=None,
1554 def status(self, node1='.', node2=None, match=None,
1554 ignored=False, clean=False, unknown=False,
1555 ignored=False, clean=False, unknown=False,
1555 listsubrepos=False):
1556 listsubrepos=False):
1556 '''a convenience method that calls node1.status(node2)'''
1557 '''a convenience method that calls node1.status(node2)'''
1557 return self[node1].status(node2, match, ignored, clean, unknown,
1558 return self[node1].status(node2, match, ignored, clean, unknown,
1558 listsubrepos)
1559 listsubrepos)
1559
1560
1560 def heads(self, start=None):
1561 def heads(self, start=None):
1561 heads = self.changelog.heads(start)
1562 heads = self.changelog.heads(start)
1562 # sort the output in rev descending order
1563 # sort the output in rev descending order
1563 return sorted(heads, key=self.changelog.rev, reverse=True)
1564 return sorted(heads, key=self.changelog.rev, reverse=True)
1564
1565
1565 def branchheads(self, branch=None, start=None, closed=False):
1566 def branchheads(self, branch=None, start=None, closed=False):
1566 '''return a (possibly filtered) list of heads for the given branch
1567 '''return a (possibly filtered) list of heads for the given branch
1567
1568
1568 Heads are returned in topological order, from newest to oldest.
1569 Heads are returned in topological order, from newest to oldest.
1569 If branch is None, use the dirstate branch.
1570 If branch is None, use the dirstate branch.
1570 If start is not None, return only heads reachable from start.
1571 If start is not None, return only heads reachable from start.
1571 If closed is True, return heads that are marked as closed as well.
1572 If closed is True, return heads that are marked as closed as well.
1572 '''
1573 '''
1573 if branch is None:
1574 if branch is None:
1574 branch = self[None].branch()
1575 branch = self[None].branch()
1575 branches = self.branchmap()
1576 branches = self.branchmap()
1576 if branch not in branches:
1577 if branch not in branches:
1577 return []
1578 return []
1578 # the cache returns heads ordered lowest to highest
1579 # the cache returns heads ordered lowest to highest
1579 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1580 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1580 if start is not None:
1581 if start is not None:
1581 # filter out the heads that cannot be reached from startrev
1582 # filter out the heads that cannot be reached from startrev
1582 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1583 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1583 bheads = [h for h in bheads if h in fbheads]
1584 bheads = [h for h in bheads if h in fbheads]
1584 return bheads
1585 return bheads
1585
1586
1586 def branches(self, nodes):
1587 def branches(self, nodes):
1587 if not nodes:
1588 if not nodes:
1588 nodes = [self.changelog.tip()]
1589 nodes = [self.changelog.tip()]
1589 b = []
1590 b = []
1590 for n in nodes:
1591 for n in nodes:
1591 t = n
1592 t = n
1592 while True:
1593 while True:
1593 p = self.changelog.parents(n)
1594 p = self.changelog.parents(n)
1594 if p[1] != nullid or p[0] == nullid:
1595 if p[1] != nullid or p[0] == nullid:
1595 b.append((t, n, p[0], p[1]))
1596 b.append((t, n, p[0], p[1]))
1596 break
1597 break
1597 n = p[0]
1598 n = p[0]
1598 return b
1599 return b
1599
1600
1600 def between(self, pairs):
1601 def between(self, pairs):
1601 r = []
1602 r = []
1602
1603
1603 for top, bottom in pairs:
1604 for top, bottom in pairs:
1604 n, l, i = top, [], 0
1605 n, l, i = top, [], 0
1605 f = 1
1606 f = 1
1606
1607
1607 while n != bottom and n != nullid:
1608 while n != bottom and n != nullid:
1608 p = self.changelog.parents(n)[0]
1609 p = self.changelog.parents(n)[0]
1609 if i == f:
1610 if i == f:
1610 l.append(n)
1611 l.append(n)
1611 f = f * 2
1612 f = f * 2
1612 n = p
1613 n = p
1613 i += 1
1614 i += 1
1614
1615
1615 r.append(l)
1616 r.append(l)
1616
1617
1617 return r
1618 return r
1618
1619
1619 def checkpush(self, pushop):
1620 def checkpush(self, pushop):
1620 """Extensions can override this function if additional checks have
1621 """Extensions can override this function if additional checks have
1621 to be performed before pushing, or call it if they override push
1622 to be performed before pushing, or call it if they override push
1622 command.
1623 command.
1623 """
1624 """
1624 pass
1625 pass
1625
1626
1626 @unfilteredpropertycache
1627 @unfilteredpropertycache
1627 def prepushoutgoinghooks(self):
1628 def prepushoutgoinghooks(self):
1628 """Return util.hooks consists of "(repo, remote, outgoing)"
1629 """Return util.hooks consists of "(repo, remote, outgoing)"
1629 functions, which are called before pushing changesets.
1630 functions, which are called before pushing changesets.
1630 """
1631 """
1631 return util.hooks()
1632 return util.hooks()
1632
1633
1633 def stream_in(self, remote, requirements):
1634 def stream_in(self, remote, requirements):
1634 lock = self.lock()
1635 lock = self.lock()
1635 try:
1636 try:
1636 # Save remote branchmap. We will use it later
1637 # Save remote branchmap. We will use it later
1637 # to speed up branchcache creation
1638 # to speed up branchcache creation
1638 rbranchmap = None
1639 rbranchmap = None
1639 if remote.capable("branchmap"):
1640 if remote.capable("branchmap"):
1640 rbranchmap = remote.branchmap()
1641 rbranchmap = remote.branchmap()
1641
1642
1642 fp = remote.stream_out()
1643 fp = remote.stream_out()
1643 l = fp.readline()
1644 l = fp.readline()
1644 try:
1645 try:
1645 resp = int(l)
1646 resp = int(l)
1646 except ValueError:
1647 except ValueError:
1647 raise error.ResponseError(
1648 raise error.ResponseError(
1648 _('unexpected response from remote server:'), l)
1649 _('unexpected response from remote server:'), l)
1649 if resp == 1:
1650 if resp == 1:
1650 raise util.Abort(_('operation forbidden by server'))
1651 raise util.Abort(_('operation forbidden by server'))
1651 elif resp == 2:
1652 elif resp == 2:
1652 raise util.Abort(_('locking the remote repository failed'))
1653 raise util.Abort(_('locking the remote repository failed'))
1653 elif resp != 0:
1654 elif resp != 0:
1654 raise util.Abort(_('the server sent an unknown error code'))
1655 raise util.Abort(_('the server sent an unknown error code'))
1655 self.ui.status(_('streaming all changes\n'))
1656 self.ui.status(_('streaming all changes\n'))
1656 l = fp.readline()
1657 l = fp.readline()
1657 try:
1658 try:
1658 total_files, total_bytes = map(int, l.split(' ', 1))
1659 total_files, total_bytes = map(int, l.split(' ', 1))
1659 except (ValueError, TypeError):
1660 except (ValueError, TypeError):
1660 raise error.ResponseError(
1661 raise error.ResponseError(
1661 _('unexpected response from remote server:'), l)
1662 _('unexpected response from remote server:'), l)
1662 self.ui.status(_('%d files to transfer, %s of data\n') %
1663 self.ui.status(_('%d files to transfer, %s of data\n') %
1663 (total_files, util.bytecount(total_bytes)))
1664 (total_files, util.bytecount(total_bytes)))
1664 handled_bytes = 0
1665 handled_bytes = 0
1665 self.ui.progress(_('clone'), 0, total=total_bytes)
1666 self.ui.progress(_('clone'), 0, total=total_bytes)
1666 start = time.time()
1667 start = time.time()
1667
1668
1668 tr = self.transaction(_('clone'))
1669 tr = self.transaction(_('clone'))
1669 try:
1670 try:
1670 for i in xrange(total_files):
1671 for i in xrange(total_files):
1671 # XXX doesn't support '\n' or '\r' in filenames
1672 # XXX doesn't support '\n' or '\r' in filenames
1672 l = fp.readline()
1673 l = fp.readline()
1673 try:
1674 try:
1674 name, size = l.split('\0', 1)
1675 name, size = l.split('\0', 1)
1675 size = int(size)
1676 size = int(size)
1676 except (ValueError, TypeError):
1677 except (ValueError, TypeError):
1677 raise error.ResponseError(
1678 raise error.ResponseError(
1678 _('unexpected response from remote server:'), l)
1679 _('unexpected response from remote server:'), l)
1679 if self.ui.debugflag:
1680 if self.ui.debugflag:
1680 self.ui.debug('adding %s (%s)\n' %
1681 self.ui.debug('adding %s (%s)\n' %
1681 (name, util.bytecount(size)))
1682 (name, util.bytecount(size)))
1682 # for backwards compat, name was partially encoded
1683 # for backwards compat, name was partially encoded
1683 ofp = self.sopener(store.decodedir(name), 'w')
1684 ofp = self.sopener(store.decodedir(name), 'w')
1684 for chunk in util.filechunkiter(fp, limit=size):
1685 for chunk in util.filechunkiter(fp, limit=size):
1685 handled_bytes += len(chunk)
1686 handled_bytes += len(chunk)
1686 self.ui.progress(_('clone'), handled_bytes,
1687 self.ui.progress(_('clone'), handled_bytes,
1687 total=total_bytes)
1688 total=total_bytes)
1688 ofp.write(chunk)
1689 ofp.write(chunk)
1689 ofp.close()
1690 ofp.close()
1690 tr.close()
1691 tr.close()
1691 finally:
1692 finally:
1692 tr.release()
1693 tr.release()
1693
1694
1694 # Writing straight to files circumvented the inmemory caches
1695 # Writing straight to files circumvented the inmemory caches
1695 self.invalidate()
1696 self.invalidate()
1696
1697
1697 elapsed = time.time() - start
1698 elapsed = time.time() - start
1698 if elapsed <= 0:
1699 if elapsed <= 0:
1699 elapsed = 0.001
1700 elapsed = 0.001
1700 self.ui.progress(_('clone'), None)
1701 self.ui.progress(_('clone'), None)
1701 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1702 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1702 (util.bytecount(total_bytes), elapsed,
1703 (util.bytecount(total_bytes), elapsed,
1703 util.bytecount(total_bytes / elapsed)))
1704 util.bytecount(total_bytes / elapsed)))
1704
1705
1705 # new requirements = old non-format requirements +
1706 # new requirements = old non-format requirements +
1706 # new format-related
1707 # new format-related
1707 # requirements from the streamed-in repository
1708 # requirements from the streamed-in repository
1708 requirements.update(set(self.requirements) - self.supportedformats)
1709 requirements.update(set(self.requirements) - self.supportedformats)
1709 self._applyrequirements(requirements)
1710 self._applyrequirements(requirements)
1710 self._writerequirements()
1711 self._writerequirements()
1711
1712
1712 if rbranchmap:
1713 if rbranchmap:
1713 rbheads = []
1714 rbheads = []
1714 closed = []
1715 closed = []
1715 for bheads in rbranchmap.itervalues():
1716 for bheads in rbranchmap.itervalues():
1716 rbheads.extend(bheads)
1717 rbheads.extend(bheads)
1717 for h in bheads:
1718 for h in bheads:
1718 r = self.changelog.rev(h)
1719 r = self.changelog.rev(h)
1719 b, c = self.changelog.branchinfo(r)
1720 b, c = self.changelog.branchinfo(r)
1720 if c:
1721 if c:
1721 closed.append(h)
1722 closed.append(h)
1722
1723
1723 if rbheads:
1724 if rbheads:
1724 rtiprev = max((int(self.changelog.rev(node))
1725 rtiprev = max((int(self.changelog.rev(node))
1725 for node in rbheads))
1726 for node in rbheads))
1726 cache = branchmap.branchcache(rbranchmap,
1727 cache = branchmap.branchcache(rbranchmap,
1727 self[rtiprev].node(),
1728 self[rtiprev].node(),
1728 rtiprev,
1729 rtiprev,
1729 closednodes=closed)
1730 closednodes=closed)
1730 # Try to stick it as low as possible
1731 # Try to stick it as low as possible
1731 # filter above served are unlikely to be fetch from a clone
1732 # filter above served are unlikely to be fetch from a clone
1732 for candidate in ('base', 'immutable', 'served'):
1733 for candidate in ('base', 'immutable', 'served'):
1733 rview = self.filtered(candidate)
1734 rview = self.filtered(candidate)
1734 if cache.validfor(rview):
1735 if cache.validfor(rview):
1735 self._branchcaches[candidate] = cache
1736 self._branchcaches[candidate] = cache
1736 cache.write(rview)
1737 cache.write(rview)
1737 break
1738 break
1738 self.invalidate()
1739 self.invalidate()
1739 return len(self.heads()) + 1
1740 return len(self.heads()) + 1
1740 finally:
1741 finally:
1741 lock.release()
1742 lock.release()
1742
1743
1743 def clone(self, remote, heads=[], stream=None):
1744 def clone(self, remote, heads=[], stream=None):
1744 '''clone remote repository.
1745 '''clone remote repository.
1745
1746
1746 keyword arguments:
1747 keyword arguments:
1747 heads: list of revs to clone (forces use of pull)
1748 heads: list of revs to clone (forces use of pull)
1748 stream: use streaming clone if possible'''
1749 stream: use streaming clone if possible'''
1749
1750
1750 # now, all clients that can request uncompressed clones can
1751 # now, all clients that can request uncompressed clones can
1751 # read repo formats supported by all servers that can serve
1752 # read repo formats supported by all servers that can serve
1752 # them.
1753 # them.
1753
1754
1754 # if revlog format changes, client will have to check version
1755 # if revlog format changes, client will have to check version
1755 # and format flags on "stream" capability, and use
1756 # and format flags on "stream" capability, and use
1756 # uncompressed only if compatible.
1757 # uncompressed only if compatible.
1757
1758
1758 if stream is None:
1759 if stream is None:
1759 # if the server explicitly prefers to stream (for fast LANs)
1760 # if the server explicitly prefers to stream (for fast LANs)
1760 stream = remote.capable('stream-preferred')
1761 stream = remote.capable('stream-preferred')
1761
1762
1762 if stream and not heads:
1763 if stream and not heads:
1763 # 'stream' means remote revlog format is revlogv1 only
1764 # 'stream' means remote revlog format is revlogv1 only
1764 if remote.capable('stream'):
1765 if remote.capable('stream'):
1765 self.stream_in(remote, set(('revlogv1',)))
1766 self.stream_in(remote, set(('revlogv1',)))
1766 else:
1767 else:
1767 # otherwise, 'streamreqs' contains the remote revlog format
1768 # otherwise, 'streamreqs' contains the remote revlog format
1768 streamreqs = remote.capable('streamreqs')
1769 streamreqs = remote.capable('streamreqs')
1769 if streamreqs:
1770 if streamreqs:
1770 streamreqs = set(streamreqs.split(','))
1771 streamreqs = set(streamreqs.split(','))
1771 # if we support it, stream in and adjust our requirements
1772 # if we support it, stream in and adjust our requirements
1772 if not streamreqs - self.supportedformats:
1773 if not streamreqs - self.supportedformats:
1773 self.stream_in(remote, streamreqs)
1774 self.stream_in(remote, streamreqs)
1774
1775
1775 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1776 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1776 try:
1777 try:
1777 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1778 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1778 ret = exchange.pull(self, remote, heads).cgresult
1779 ret = exchange.pull(self, remote, heads).cgresult
1779 finally:
1780 finally:
1780 self.ui.restoreconfig(quiet)
1781 self.ui.restoreconfig(quiet)
1781 return ret
1782 return ret
1782
1783
1783 def pushkey(self, namespace, key, old, new):
1784 def pushkey(self, namespace, key, old, new):
1784 try:
1785 try:
1785 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1786 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1786 old=old, new=new)
1787 old=old, new=new)
1787 except error.HookAbort, exc:
1788 except error.HookAbort, exc:
1788 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1789 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1789 if exc.hint:
1790 if exc.hint:
1790 self.ui.write_err(_("(%s)\n") % exc.hint)
1791 self.ui.write_err(_("(%s)\n") % exc.hint)
1791 return False
1792 return False
1792 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1793 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1793 ret = pushkey.push(self, namespace, key, old, new)
1794 ret = pushkey.push(self, namespace, key, old, new)
1794 def runhook():
1795 def runhook():
1795 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1796 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1796 ret=ret)
1797 ret=ret)
1797 self._afterlock(runhook)
1798 self._afterlock(runhook)
1798 return ret
1799 return ret
1799
1800
1800 def listkeys(self, namespace):
1801 def listkeys(self, namespace):
1801 self.hook('prelistkeys', throw=True, namespace=namespace)
1802 self.hook('prelistkeys', throw=True, namespace=namespace)
1802 self.ui.debug('listing keys for "%s"\n' % namespace)
1803 self.ui.debug('listing keys for "%s"\n' % namespace)
1803 values = pushkey.list(self, namespace)
1804 values = pushkey.list(self, namespace)
1804 self.hook('listkeys', namespace=namespace, values=values)
1805 self.hook('listkeys', namespace=namespace, values=values)
1805 return values
1806 return values
1806
1807
1807 def debugwireargs(self, one, two, three=None, four=None, five=None):
1808 def debugwireargs(self, one, two, three=None, four=None, five=None):
1808 '''used to test argument passing over the wire'''
1809 '''used to test argument passing over the wire'''
1809 return "%s %s %s %s %s" % (one, two, three, four, five)
1810 return "%s %s %s %s %s" % (one, two, three, four, five)
1810
1811
1811 def savecommitmessage(self, text):
1812 def savecommitmessage(self, text):
1812 fp = self.opener('last-message.txt', 'wb')
1813 fp = self.opener('last-message.txt', 'wb')
1813 try:
1814 try:
1814 fp.write(text)
1815 fp.write(text)
1815 finally:
1816 finally:
1816 fp.close()
1817 fp.close()
1817 return self.pathto(fp.name[len(self.root) + 1:])
1818 return self.pathto(fp.name[len(self.root) + 1:])
1818
1819
1819 # used to avoid circular references so destructors work
1820 # used to avoid circular references so destructors work
1820 def aftertrans(files):
1821 def aftertrans(files):
1821 renamefiles = [tuple(t) for t in files]
1822 renamefiles = [tuple(t) for t in files]
1822 def a():
1823 def a():
1823 for vfs, src, dest in renamefiles:
1824 for vfs, src, dest in renamefiles:
1824 try:
1825 try:
1825 vfs.rename(src, dest)
1826 vfs.rename(src, dest)
1826 except OSError: # journal file does not yet exist
1827 except OSError: # journal file does not yet exist
1827 pass
1828 pass
1828 return a
1829 return a
1829
1830
1830 def undoname(fn):
1831 def undoname(fn):
1831 base, name = os.path.split(fn)
1832 base, name = os.path.split(fn)
1832 assert name.startswith('journal')
1833 assert name.startswith('journal')
1833 return os.path.join(base, name.replace('journal', 'undo', 1))
1834 return os.path.join(base, name.replace('journal', 'undo', 1))
1834
1835
1835 def instance(ui, path, create):
1836 def instance(ui, path, create):
1836 return localrepository(ui, util.urllocalpath(path), create)
1837 return localrepository(ui, util.urllocalpath(path), create)
1837
1838
1838 def islocal(path):
1839 def islocal(path):
1839 return True
1840 return True
General Comments 0
You need to be logged in to leave comments. Login now