##// END OF EJS Templates
context: translate FilteredIndex/LookupError at repo[changeid] (API)...
Yuya Nishihara -
r37815:43221a57 stable
parent child Browse files
Show More
@@ -1,2598 +1,2600
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirfilenodeids,
25 wdirfilenodeids,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from . import (
29 from . import (
30 dagop,
30 dagop,
31 encoding,
31 encoding,
32 error,
32 error,
33 fileset,
33 fileset,
34 match as matchmod,
34 match as matchmod,
35 obsolete as obsmod,
35 obsolete as obsmod,
36 patch,
36 patch,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 repoview,
40 repoview,
41 revlog,
41 revlog,
42 scmutil,
42 scmutil,
43 sparse,
43 sparse,
44 subrepo,
44 subrepo,
45 subrepoutil,
45 subrepoutil,
46 util,
46 util,
47 )
47 )
48 from .utils import (
48 from .utils import (
49 dateutil,
49 dateutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 propertycache = util.propertycache
53 propertycache = util.propertycache
54
54
55 nonascii = re.compile(br'[^\x21-\x7f]').search
55 nonascii = re.compile(br'[^\x21-\x7f]').search
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return r"<%s %s>" % (type(self).__name__, str(self))
74 return r"<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(self, other, s, match, listignored, listclean,
106 def _buildstatus(self, other, s, match, listignored, listclean,
107 listunknown):
107 listunknown):
108 """build a status with respect to another context"""
108 """build a status with respect to another context"""
109 # Load earliest manifest first for caching reasons. More specifically,
109 # Load earliest manifest first for caching reasons. More specifically,
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta application.
114 # delta application.
115 mf2 = None
115 mf2 = None
116 if self.rev() is not None and self.rev() < other.rev():
116 if self.rev() is not None and self.rev() < other.rev():
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
119 if mf2 is None:
119 if mf2 is None:
120 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
121
121
122 modified, added = [], []
122 modified, added = [], []
123 removed = []
123 removed = []
124 clean = []
124 clean = []
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deletedset = set(deleted)
126 deletedset = set(deleted)
127 d = mf1.diff(mf2, match=match, clean=listclean)
127 d = mf1.diff(mf2, match=match, clean=listclean)
128 for fn, value in d.iteritems():
128 for fn, value in d.iteritems():
129 if fn in deletedset:
129 if fn in deletedset:
130 continue
130 continue
131 if value is None:
131 if value is None:
132 clean.append(fn)
132 clean.append(fn)
133 continue
133 continue
134 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
135 if node1 is None:
135 if node1 is None:
136 added.append(fn)
136 added.append(fn)
137 elif node2 is None:
137 elif node2 is None:
138 removed.append(fn)
138 removed.append(fn)
139 elif flag1 != flag2:
139 elif flag1 != flag2:
140 modified.append(fn)
140 modified.append(fn)
141 elif node2 not in wdirfilenodeids:
141 elif node2 not in wdirfilenodeids:
142 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
143 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
144 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
145 # to a file as a modification.
145 # to a file as a modification.
146 modified.append(fn)
146 modified.append(fn)
147 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
148 modified.append(fn)
148 modified.append(fn)
149 else:
149 else:
150 clean.append(fn)
150 clean.append(fn)
151
151
152 if removed:
152 if removed:
153 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
154 unknown = [fn for fn in unknown if fn not in mf1 and
154 unknown = [fn for fn in unknown if fn not in mf1 and
155 (not match or match(fn))]
155 (not match or match(fn))]
156 ignored = [fn for fn in ignored if fn not in mf1 and
156 ignored = [fn for fn in ignored if fn not in mf1 and
157 (not match or match(fn))]
157 (not match or match(fn))]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
162 ignored, clean)
163
163
164 @propertycache
164 @propertycache
165 def substate(self):
165 def substate(self):
166 return subrepoutil.state(self, self._repo.ui)
166 return subrepoutil.state(self, self._repo.ui)
167
167
168 def subrev(self, subpath):
168 def subrev(self, subpath):
169 return self.substate[subpath][1]
169 return self.substate[subpath][1]
170
170
171 def rev(self):
171 def rev(self):
172 return self._rev
172 return self._rev
173 def node(self):
173 def node(self):
174 return self._node
174 return self._node
175 def hex(self):
175 def hex(self):
176 return hex(self.node())
176 return hex(self.node())
177 def manifest(self):
177 def manifest(self):
178 return self._manifest
178 return self._manifest
179 def manifestctx(self):
179 def manifestctx(self):
180 return self._manifestctx
180 return self._manifestctx
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def orphan(self):
199 def orphan(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
202
202
203 def phasedivergent(self):
203 def phasedivergent(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
209
209
210 def contentdivergent(self):
210 def contentdivergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
216
216
217 def isunstable(self):
217 def isunstable(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
220
220
221 def instabilities(self):
221 def instabilities(self):
222 """return the list of instabilities affecting this changeset.
222 """return the list of instabilities affecting this changeset.
223
223
224 Instabilities are returned as strings. possible values are:
224 Instabilities are returned as strings. possible values are:
225 - orphan,
225 - orphan,
226 - phase-divergent,
226 - phase-divergent,
227 - content-divergent.
227 - content-divergent.
228 """
228 """
229 instabilities = []
229 instabilities = []
230 if self.orphan():
230 if self.orphan():
231 instabilities.append('orphan')
231 instabilities.append('orphan')
232 if self.phasedivergent():
232 if self.phasedivergent():
233 instabilities.append('phase-divergent')
233 instabilities.append('phase-divergent')
234 if self.contentdivergent():
234 if self.contentdivergent():
235 instabilities.append('content-divergent')
235 instabilities.append('content-divergent')
236 return instabilities
236 return instabilities
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if r'_manifest' in self.__dict__:
252 if r'_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
263 try:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
265 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
267 _('not found in manifest'))
268
268
269 return node, flag
269 return node, flag
270
270
271 def filenode(self, path):
271 def filenode(self, path):
272 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
273
273
274 def flags(self, path):
274 def flags(self, path):
275 try:
275 try:
276 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
277 except error.LookupError:
277 except error.LookupError:
278 return ''
278 return ''
279
279
280 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
283
284 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
286
286
287 def workingsub(self, path):
287 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
289 context.
290 '''
290 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
292
292
293 def match(self, pats=None, include=None, exclude=None, default='glob',
293 def match(self, pats=None, include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
295 r = self._repo
295 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
297 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
300
300
301 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
303 if ctx2 is None:
304 ctx2 = self.p1()
304 ctx2 = self.p1()
305 if ctx2 is not None:
305 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380 def changectxdeprecwarn(repo):
380 def changectxdeprecwarn(repo):
381 # changectx's constructor will soon lose support for these forms of
381 # changectx's constructor will soon lose support for these forms of
382 # changeids:
382 # changeids:
383 # * stringinfied ints
383 # * stringinfied ints
384 # * bookmarks, tags, branches, and other namespace identifiers
384 # * bookmarks, tags, branches, and other namespace identifiers
385 # * hex nodeid prefixes
385 # * hex nodeid prefixes
386 #
386 #
387 # Depending on your use case, replace repo[x] by one of these:
387 # Depending on your use case, replace repo[x] by one of these:
388 # * If you want to support general revsets, use scmutil.revsingle(x)
388 # * If you want to support general revsets, use scmutil.revsingle(x)
389 # * If you know that "x" is a stringified int, use repo[int(x)]
389 # * If you know that "x" is a stringified int, use repo[int(x)]
390 # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x)
390 # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x)
391 # * If you know that "x" is a tag, use repo[repo.tags()[x]]
391 # * If you know that "x" is a tag, use repo[repo.tags()[x]]
392 # * If you know that "x" is a branch or in some other namespace,
392 # * If you know that "x" is a branch or in some other namespace,
393 # use the appropriate mechanism for that namespace
393 # use the appropriate mechanism for that namespace
394 # * If you know that "x" is a hex nodeid prefix, use
394 # * If you know that "x" is a hex nodeid prefix, use
395 # repo[scmutil.resolvehexnodeidprefix(repo, x)]
395 # repo[scmutil.resolvehexnodeidprefix(repo, x)]
396 # * If "x" is a string that can be any of the above, but you don't want
396 # * If "x" is a string that can be any of the above, but you don't want
397 # to allow general revsets (perhaps because "x" may come from a remote
397 # to allow general revsets (perhaps because "x" may come from a remote
398 # user and the revset may be too costly), use scmutil.revsymbol(repo, x)
398 # user and the revset may be too costly), use scmutil.revsymbol(repo, x)
399 # * If "x" can be a mix of the above, you'll have to figure it out
399 # * If "x" can be a mix of the above, you'll have to figure it out
400 # yourself
400 # yourself
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see "
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see "
402 "context.changectxdeprecwarn() for details", "4.6",
402 "context.changectxdeprecwarn() for details", "4.6",
403 stacklevel=4)
403 stacklevel=4)
404
404
405 class changectx(basectx):
405 class changectx(basectx):
406 """A changecontext object makes access to data related to a particular
406 """A changecontext object makes access to data related to a particular
407 changeset convenient. It represents a read-only context already present in
407 changeset convenient. It represents a read-only context already present in
408 the repo."""
408 the repo."""
409 def __init__(self, repo, changeid='.'):
409 def __init__(self, repo, changeid='.'):
410 """changeid is a revision number, node, or tag"""
410 """changeid is a revision number, node, or tag"""
411 super(changectx, self).__init__(repo)
411 super(changectx, self).__init__(repo)
412
412
413 try:
413 try:
414 if isinstance(changeid, int):
414 if isinstance(changeid, int):
415 self._node = repo.changelog.node(changeid)
415 self._node = repo.changelog.node(changeid)
416 self._rev = changeid
416 self._rev = changeid
417 return
417 return
418 if changeid == 'null':
418 if changeid == 'null':
419 self._node = nullid
419 self._node = nullid
420 self._rev = nullrev
420 self._rev = nullrev
421 return
421 return
422 if changeid == 'tip':
422 if changeid == 'tip':
423 self._node = repo.changelog.tip()
423 self._node = repo.changelog.tip()
424 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
425 return
425 return
426 if (changeid == '.'
426 if (changeid == '.'
427 or repo.local() and changeid == repo.dirstate.p1()):
427 or repo.local() and changeid == repo.dirstate.p1()):
428 # this is a hack to delay/avoid loading obsmarkers
428 # this is a hack to delay/avoid loading obsmarkers
429 # when we know that '.' won't be hidden
429 # when we know that '.' won't be hidden
430 self._node = repo.dirstate.p1()
430 self._node = repo.dirstate.p1()
431 self._rev = repo.unfiltered().changelog.rev(self._node)
431 self._rev = repo.unfiltered().changelog.rev(self._node)
432 return
432 return
433 if len(changeid) == 20:
433 if len(changeid) == 20:
434 try:
434 try:
435 self._node = changeid
435 self._node = changeid
436 self._rev = repo.changelog.rev(changeid)
436 self._rev = repo.changelog.rev(changeid)
437 return
437 return
438 except error.FilteredLookupError:
438 except error.FilteredLookupError:
439 raise
439 raise
440 except LookupError:
440 except LookupError:
441 pass
441 pass
442
442
443 try:
443 try:
444 r = int(changeid)
444 r = int(changeid)
445 if '%d' % r != changeid:
445 if '%d' % r != changeid:
446 raise ValueError
446 raise ValueError
447 l = len(repo.changelog)
447 l = len(repo.changelog)
448 if r < 0:
448 if r < 0:
449 r += l
449 r += l
450 if r < 0 or r >= l and r != wdirrev:
450 if r < 0 or r >= l and r != wdirrev:
451 raise ValueError
451 raise ValueError
452 self._rev = r
452 self._rev = r
453 self._node = repo.changelog.node(r)
453 self._node = repo.changelog.node(r)
454 changectxdeprecwarn(repo)
454 changectxdeprecwarn(repo)
455 return
455 return
456 except error.FilteredIndexError:
456 except error.FilteredIndexError:
457 raise
457 raise
458 except (ValueError, OverflowError, IndexError):
458 except (ValueError, OverflowError, IndexError):
459 pass
459 pass
460
460
461 if len(changeid) == 40:
461 if len(changeid) == 40:
462 try:
462 try:
463 self._node = bin(changeid)
463 self._node = bin(changeid)
464 self._rev = repo.changelog.rev(self._node)
464 self._rev = repo.changelog.rev(self._node)
465 return
465 return
466 except error.FilteredLookupError:
466 except error.FilteredLookupError:
467 raise
467 raise
468 except (TypeError, LookupError):
468 except (TypeError, LookupError):
469 pass
469 pass
470
470
471 # lookup bookmarks through the name interface
471 # lookup bookmarks through the name interface
472 try:
472 try:
473 self._node = repo.names.singlenode(repo, changeid)
473 self._node = repo.names.singlenode(repo, changeid)
474 self._rev = repo.changelog.rev(self._node)
474 self._rev = repo.changelog.rev(self._node)
475 changectxdeprecwarn(repo)
475 changectxdeprecwarn(repo)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479
479
480 self._node = scmutil.resolvehexnodeidprefix(repo, changeid)
480 self._node = scmutil.resolvehexnodeidprefix(repo, changeid)
481 if self._node is not None:
481 if self._node is not None:
482 self._rev = repo.changelog.rev(self._node)
482 self._rev = repo.changelog.rev(self._node)
483 changectxdeprecwarn(repo)
483 changectxdeprecwarn(repo)
484 return
484 return
485
485
486 # lookup failed
486 # lookup failed
487 # check if it might have come from damaged dirstate
487 # check if it might have come from damaged dirstate
488 #
488 #
489 # XXX we could avoid the unfiltered if we had a recognizable
489 # XXX we could avoid the unfiltered if we had a recognizable
490 # exception for filtered changeset access
490 # exception for filtered changeset access
491 if (repo.local()
491 if (repo.local()
492 and changeid in repo.unfiltered().dirstate.parents()):
492 and changeid in repo.unfiltered().dirstate.parents()):
493 msg = _("working directory has unknown parent '%s'!")
493 msg = _("working directory has unknown parent '%s'!")
494 raise error.Abort(msg % short(changeid))
494 raise error.Abort(msg % short(changeid))
495 try:
495 try:
496 if len(changeid) == 20 and nonascii(changeid):
496 if len(changeid) == 20 and nonascii(changeid):
497 changeid = hex(changeid)
497 changeid = hex(changeid)
498 except TypeError:
498 except TypeError:
499 pass
499 pass
500 except (error.FilteredIndexError, error.FilteredLookupError,
500 except (error.FilteredIndexError, error.FilteredLookupError):
501 error.FilteredRepoLookupError):
501 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
502 % changeid)
503 except error.FilteredRepoLookupError:
502 raise
504 raise
503 except IndexError:
505 except IndexError:
504 pass
506 pass
505 raise error.RepoLookupError(
507 raise error.RepoLookupError(
506 _("unknown revision '%s'") % changeid)
508 _("unknown revision '%s'") % changeid)
507
509
508 def __hash__(self):
510 def __hash__(self):
509 try:
511 try:
510 return hash(self._rev)
512 return hash(self._rev)
511 except AttributeError:
513 except AttributeError:
512 return id(self)
514 return id(self)
513
515
514 def __nonzero__(self):
516 def __nonzero__(self):
515 return self._rev != nullrev
517 return self._rev != nullrev
516
518
517 __bool__ = __nonzero__
519 __bool__ = __nonzero__
518
520
519 @propertycache
521 @propertycache
520 def _changeset(self):
522 def _changeset(self):
521 return self._repo.changelog.changelogrevision(self.rev())
523 return self._repo.changelog.changelogrevision(self.rev())
522
524
523 @propertycache
525 @propertycache
524 def _manifest(self):
526 def _manifest(self):
525 return self._manifestctx.read()
527 return self._manifestctx.read()
526
528
527 @property
529 @property
528 def _manifestctx(self):
530 def _manifestctx(self):
529 return self._repo.manifestlog[self._changeset.manifest]
531 return self._repo.manifestlog[self._changeset.manifest]
530
532
531 @propertycache
533 @propertycache
532 def _manifestdelta(self):
534 def _manifestdelta(self):
533 return self._manifestctx.readdelta()
535 return self._manifestctx.readdelta()
534
536
535 @propertycache
537 @propertycache
536 def _parents(self):
538 def _parents(self):
537 repo = self._repo
539 repo = self._repo
538 p1, p2 = repo.changelog.parentrevs(self._rev)
540 p1, p2 = repo.changelog.parentrevs(self._rev)
539 if p2 == nullrev:
541 if p2 == nullrev:
540 return [changectx(repo, p1)]
542 return [changectx(repo, p1)]
541 return [changectx(repo, p1), changectx(repo, p2)]
543 return [changectx(repo, p1), changectx(repo, p2)]
542
544
543 def changeset(self):
545 def changeset(self):
544 c = self._changeset
546 c = self._changeset
545 return (
547 return (
546 c.manifest,
548 c.manifest,
547 c.user,
549 c.user,
548 c.date,
550 c.date,
549 c.files,
551 c.files,
550 c.description,
552 c.description,
551 c.extra,
553 c.extra,
552 )
554 )
553 def manifestnode(self):
555 def manifestnode(self):
554 return self._changeset.manifest
556 return self._changeset.manifest
555
557
556 def user(self):
558 def user(self):
557 return self._changeset.user
559 return self._changeset.user
558 def date(self):
560 def date(self):
559 return self._changeset.date
561 return self._changeset.date
560 def files(self):
562 def files(self):
561 return self._changeset.files
563 return self._changeset.files
562 def description(self):
564 def description(self):
563 return self._changeset.description
565 return self._changeset.description
564 def branch(self):
566 def branch(self):
565 return encoding.tolocal(self._changeset.extra.get("branch"))
567 return encoding.tolocal(self._changeset.extra.get("branch"))
566 def closesbranch(self):
568 def closesbranch(self):
567 return 'close' in self._changeset.extra
569 return 'close' in self._changeset.extra
568 def extra(self):
570 def extra(self):
569 """Return a dict of extra information."""
571 """Return a dict of extra information."""
570 return self._changeset.extra
572 return self._changeset.extra
571 def tags(self):
573 def tags(self):
572 """Return a list of byte tag names"""
574 """Return a list of byte tag names"""
573 return self._repo.nodetags(self._node)
575 return self._repo.nodetags(self._node)
574 def bookmarks(self):
576 def bookmarks(self):
575 """Return a list of byte bookmark names."""
577 """Return a list of byte bookmark names."""
576 return self._repo.nodebookmarks(self._node)
578 return self._repo.nodebookmarks(self._node)
577 def phase(self):
579 def phase(self):
578 return self._repo._phasecache.phase(self._repo, self._rev)
580 return self._repo._phasecache.phase(self._repo, self._rev)
579 def hidden(self):
581 def hidden(self):
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
581
583
582 def isinmemory(self):
584 def isinmemory(self):
583 return False
585 return False
584
586
585 def children(self):
587 def children(self):
586 """return list of changectx contexts for each child changeset.
588 """return list of changectx contexts for each child changeset.
587
589
588 This returns only the immediate child changesets. Use descendants() to
590 This returns only the immediate child changesets. Use descendants() to
589 recursively walk children.
591 recursively walk children.
590 """
592 """
591 c = self._repo.changelog.children(self._node)
593 c = self._repo.changelog.children(self._node)
592 return [changectx(self._repo, x) for x in c]
594 return [changectx(self._repo, x) for x in c]
593
595
594 def ancestors(self):
596 def ancestors(self):
595 for a in self._repo.changelog.ancestors([self._rev]):
597 for a in self._repo.changelog.ancestors([self._rev]):
596 yield changectx(self._repo, a)
598 yield changectx(self._repo, a)
597
599
598 def descendants(self):
600 def descendants(self):
599 """Recursively yield all children of the changeset.
601 """Recursively yield all children of the changeset.
600
602
601 For just the immediate children, use children()
603 For just the immediate children, use children()
602 """
604 """
603 for d in self._repo.changelog.descendants([self._rev]):
605 for d in self._repo.changelog.descendants([self._rev]):
604 yield changectx(self._repo, d)
606 yield changectx(self._repo, d)
605
607
606 def filectx(self, path, fileid=None, filelog=None):
608 def filectx(self, path, fileid=None, filelog=None):
607 """get a file context from this changeset"""
609 """get a file context from this changeset"""
608 if fileid is None:
610 if fileid is None:
609 fileid = self.filenode(path)
611 fileid = self.filenode(path)
610 return filectx(self._repo, path, fileid=fileid,
612 return filectx(self._repo, path, fileid=fileid,
611 changectx=self, filelog=filelog)
613 changectx=self, filelog=filelog)
612
614
613 def ancestor(self, c2, warn=False):
615 def ancestor(self, c2, warn=False):
614 """return the "best" ancestor context of self and c2
616 """return the "best" ancestor context of self and c2
615
617
616 If there are multiple candidates, it will show a message and check
618 If there are multiple candidates, it will show a message and check
617 merge.preferancestor configuration before falling back to the
619 merge.preferancestor configuration before falling back to the
618 revlog ancestor."""
620 revlog ancestor."""
619 # deal with workingctxs
621 # deal with workingctxs
620 n2 = c2._node
622 n2 = c2._node
621 if n2 is None:
623 if n2 is None:
622 n2 = c2._parents[0]._node
624 n2 = c2._parents[0]._node
623 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
625 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
624 if not cahs:
626 if not cahs:
625 anc = nullid
627 anc = nullid
626 elif len(cahs) == 1:
628 elif len(cahs) == 1:
627 anc = cahs[0]
629 anc = cahs[0]
628 else:
630 else:
629 # experimental config: merge.preferancestor
631 # experimental config: merge.preferancestor
630 for r in self._repo.ui.configlist('merge', 'preferancestor'):
632 for r in self._repo.ui.configlist('merge', 'preferancestor'):
631 try:
633 try:
632 ctx = scmutil.revsymbol(self._repo, r)
634 ctx = scmutil.revsymbol(self._repo, r)
633 except error.RepoLookupError:
635 except error.RepoLookupError:
634 continue
636 continue
635 anc = ctx.node()
637 anc = ctx.node()
636 if anc in cahs:
638 if anc in cahs:
637 break
639 break
638 else:
640 else:
639 anc = self._repo.changelog.ancestor(self._node, n2)
641 anc = self._repo.changelog.ancestor(self._node, n2)
640 if warn:
642 if warn:
641 self._repo.ui.status(
643 self._repo.ui.status(
642 (_("note: using %s as ancestor of %s and %s\n") %
644 (_("note: using %s as ancestor of %s and %s\n") %
643 (short(anc), short(self._node), short(n2))) +
645 (short(anc), short(self._node), short(n2))) +
644 ''.join(_(" alternatively, use --config "
646 ''.join(_(" alternatively, use --config "
645 "merge.preferancestor=%s\n") %
647 "merge.preferancestor=%s\n") %
646 short(n) for n in sorted(cahs) if n != anc))
648 short(n) for n in sorted(cahs) if n != anc))
647 return changectx(self._repo, anc)
649 return changectx(self._repo, anc)
648
650
649 def descendant(self, other):
651 def descendant(self, other):
650 """True if other is descendant of this changeset"""
652 """True if other is descendant of this changeset"""
651 return self._repo.changelog.descendant(self._rev, other._rev)
653 return self._repo.changelog.descendant(self._rev, other._rev)
652
654
653 def walk(self, match):
655 def walk(self, match):
654 '''Generates matching file names.'''
656 '''Generates matching file names.'''
655
657
656 # Wrap match.bad method to have message with nodeid
658 # Wrap match.bad method to have message with nodeid
657 def bad(fn, msg):
659 def bad(fn, msg):
658 # The manifest doesn't know about subrepos, so don't complain about
660 # The manifest doesn't know about subrepos, so don't complain about
659 # paths into valid subrepos.
661 # paths into valid subrepos.
660 if any(fn == s or fn.startswith(s + '/')
662 if any(fn == s or fn.startswith(s + '/')
661 for s in self.substate):
663 for s in self.substate):
662 return
664 return
663 match.bad(fn, _('no such file in rev %s') % self)
665 match.bad(fn, _('no such file in rev %s') % self)
664
666
665 m = matchmod.badmatch(match, bad)
667 m = matchmod.badmatch(match, bad)
666 return self._manifest.walk(m)
668 return self._manifest.walk(m)
667
669
668 def matches(self, match):
670 def matches(self, match):
669 return self.walk(match)
671 return self.walk(match)
670
672
671 class basefilectx(object):
673 class basefilectx(object):
672 """A filecontext object represents the common logic for its children:
674 """A filecontext object represents the common logic for its children:
673 filectx: read-only access to a filerevision that is already present
675 filectx: read-only access to a filerevision that is already present
674 in the repo,
676 in the repo,
675 workingfilectx: a filecontext that represents files from the working
677 workingfilectx: a filecontext that represents files from the working
676 directory,
678 directory,
677 memfilectx: a filecontext that represents files in-memory,
679 memfilectx: a filecontext that represents files in-memory,
678 overlayfilectx: duplicate another filecontext with some fields overridden.
680 overlayfilectx: duplicate another filecontext with some fields overridden.
679 """
681 """
680 @propertycache
682 @propertycache
681 def _filelog(self):
683 def _filelog(self):
682 return self._repo.file(self._path)
684 return self._repo.file(self._path)
683
685
684 @propertycache
686 @propertycache
685 def _changeid(self):
687 def _changeid(self):
686 if r'_changeid' in self.__dict__:
688 if r'_changeid' in self.__dict__:
687 return self._changeid
689 return self._changeid
688 elif r'_changectx' in self.__dict__:
690 elif r'_changectx' in self.__dict__:
689 return self._changectx.rev()
691 return self._changectx.rev()
690 elif r'_descendantrev' in self.__dict__:
692 elif r'_descendantrev' in self.__dict__:
691 # this file context was created from a revision with a known
693 # this file context was created from a revision with a known
692 # descendant, we can (lazily) correct for linkrev aliases
694 # descendant, we can (lazily) correct for linkrev aliases
693 return self._adjustlinkrev(self._descendantrev)
695 return self._adjustlinkrev(self._descendantrev)
694 else:
696 else:
695 return self._filelog.linkrev(self._filerev)
697 return self._filelog.linkrev(self._filerev)
696
698
697 @propertycache
699 @propertycache
698 def _filenode(self):
700 def _filenode(self):
699 if r'_fileid' in self.__dict__:
701 if r'_fileid' in self.__dict__:
700 return self._filelog.lookup(self._fileid)
702 return self._filelog.lookup(self._fileid)
701 else:
703 else:
702 return self._changectx.filenode(self._path)
704 return self._changectx.filenode(self._path)
703
705
704 @propertycache
706 @propertycache
705 def _filerev(self):
707 def _filerev(self):
706 return self._filelog.rev(self._filenode)
708 return self._filelog.rev(self._filenode)
707
709
708 @propertycache
710 @propertycache
709 def _repopath(self):
711 def _repopath(self):
710 return self._path
712 return self._path
711
713
712 def __nonzero__(self):
714 def __nonzero__(self):
713 try:
715 try:
714 self._filenode
716 self._filenode
715 return True
717 return True
716 except error.LookupError:
718 except error.LookupError:
717 # file is missing
719 # file is missing
718 return False
720 return False
719
721
720 __bool__ = __nonzero__
722 __bool__ = __nonzero__
721
723
722 def __bytes__(self):
724 def __bytes__(self):
723 try:
725 try:
724 return "%s@%s" % (self.path(), self._changectx)
726 return "%s@%s" % (self.path(), self._changectx)
725 except error.LookupError:
727 except error.LookupError:
726 return "%s@???" % self.path()
728 return "%s@???" % self.path()
727
729
728 __str__ = encoding.strmethod(__bytes__)
730 __str__ = encoding.strmethod(__bytes__)
729
731
730 def __repr__(self):
732 def __repr__(self):
731 return r"<%s %s>" % (type(self).__name__, str(self))
733 return r"<%s %s>" % (type(self).__name__, str(self))
732
734
733 def __hash__(self):
735 def __hash__(self):
734 try:
736 try:
735 return hash((self._path, self._filenode))
737 return hash((self._path, self._filenode))
736 except AttributeError:
738 except AttributeError:
737 return id(self)
739 return id(self)
738
740
739 def __eq__(self, other):
741 def __eq__(self, other):
740 try:
742 try:
741 return (type(self) == type(other) and self._path == other._path
743 return (type(self) == type(other) and self._path == other._path
742 and self._filenode == other._filenode)
744 and self._filenode == other._filenode)
743 except AttributeError:
745 except AttributeError:
744 return False
746 return False
745
747
746 def __ne__(self, other):
748 def __ne__(self, other):
747 return not (self == other)
749 return not (self == other)
748
750
749 def filerev(self):
751 def filerev(self):
750 return self._filerev
752 return self._filerev
751 def filenode(self):
753 def filenode(self):
752 return self._filenode
754 return self._filenode
753 @propertycache
755 @propertycache
754 def _flags(self):
756 def _flags(self):
755 return self._changectx.flags(self._path)
757 return self._changectx.flags(self._path)
756 def flags(self):
758 def flags(self):
757 return self._flags
759 return self._flags
758 def filelog(self):
760 def filelog(self):
759 return self._filelog
761 return self._filelog
760 def rev(self):
762 def rev(self):
761 return self._changeid
763 return self._changeid
762 def linkrev(self):
764 def linkrev(self):
763 return self._filelog.linkrev(self._filerev)
765 return self._filelog.linkrev(self._filerev)
764 def node(self):
766 def node(self):
765 return self._changectx.node()
767 return self._changectx.node()
766 def hex(self):
768 def hex(self):
767 return self._changectx.hex()
769 return self._changectx.hex()
768 def user(self):
770 def user(self):
769 return self._changectx.user()
771 return self._changectx.user()
770 def date(self):
772 def date(self):
771 return self._changectx.date()
773 return self._changectx.date()
772 def files(self):
774 def files(self):
773 return self._changectx.files()
775 return self._changectx.files()
774 def description(self):
776 def description(self):
775 return self._changectx.description()
777 return self._changectx.description()
776 def branch(self):
778 def branch(self):
777 return self._changectx.branch()
779 return self._changectx.branch()
778 def extra(self):
780 def extra(self):
779 return self._changectx.extra()
781 return self._changectx.extra()
780 def phase(self):
782 def phase(self):
781 return self._changectx.phase()
783 return self._changectx.phase()
782 def phasestr(self):
784 def phasestr(self):
783 return self._changectx.phasestr()
785 return self._changectx.phasestr()
784 def obsolete(self):
786 def obsolete(self):
785 return self._changectx.obsolete()
787 return self._changectx.obsolete()
786 def instabilities(self):
788 def instabilities(self):
787 return self._changectx.instabilities()
789 return self._changectx.instabilities()
788 def manifest(self):
790 def manifest(self):
789 return self._changectx.manifest()
791 return self._changectx.manifest()
790 def changectx(self):
792 def changectx(self):
791 return self._changectx
793 return self._changectx
792 def renamed(self):
794 def renamed(self):
793 return self._copied
795 return self._copied
794 def repo(self):
796 def repo(self):
795 return self._repo
797 return self._repo
796 def size(self):
798 def size(self):
797 return len(self.data())
799 return len(self.data())
798
800
799 def path(self):
801 def path(self):
800 return self._path
802 return self._path
801
803
802 def isbinary(self):
804 def isbinary(self):
803 try:
805 try:
804 return stringutil.binary(self.data())
806 return stringutil.binary(self.data())
805 except IOError:
807 except IOError:
806 return False
808 return False
807 def isexec(self):
809 def isexec(self):
808 return 'x' in self.flags()
810 return 'x' in self.flags()
809 def islink(self):
811 def islink(self):
810 return 'l' in self.flags()
812 return 'l' in self.flags()
811
813
812 def isabsent(self):
814 def isabsent(self):
813 """whether this filectx represents a file not in self._changectx
815 """whether this filectx represents a file not in self._changectx
814
816
815 This is mainly for merge code to detect change/delete conflicts. This is
817 This is mainly for merge code to detect change/delete conflicts. This is
816 expected to be True for all subclasses of basectx."""
818 expected to be True for all subclasses of basectx."""
817 return False
819 return False
818
820
819 _customcmp = False
821 _customcmp = False
820 def cmp(self, fctx):
822 def cmp(self, fctx):
821 """compare with other file context
823 """compare with other file context
822
824
823 returns True if different than fctx.
825 returns True if different than fctx.
824 """
826 """
825 if fctx._customcmp:
827 if fctx._customcmp:
826 return fctx.cmp(self)
828 return fctx.cmp(self)
827
829
828 if (fctx._filenode is None
830 if (fctx._filenode is None
829 and (self._repo._encodefilterpats
831 and (self._repo._encodefilterpats
830 # if file data starts with '\1\n', empty metadata block is
832 # if file data starts with '\1\n', empty metadata block is
831 # prepended, which adds 4 bytes to filelog.size().
833 # prepended, which adds 4 bytes to filelog.size().
832 or self.size() - 4 == fctx.size())
834 or self.size() - 4 == fctx.size())
833 or self.size() == fctx.size()):
835 or self.size() == fctx.size()):
834 return self._filelog.cmp(self._filenode, fctx.data())
836 return self._filelog.cmp(self._filenode, fctx.data())
835
837
836 return True
838 return True
837
839
838 def _adjustlinkrev(self, srcrev, inclusive=False):
840 def _adjustlinkrev(self, srcrev, inclusive=False):
839 """return the first ancestor of <srcrev> introducing <fnode>
841 """return the first ancestor of <srcrev> introducing <fnode>
840
842
841 If the linkrev of the file revision does not point to an ancestor of
843 If the linkrev of the file revision does not point to an ancestor of
842 srcrev, we'll walk down the ancestors until we find one introducing
844 srcrev, we'll walk down the ancestors until we find one introducing
843 this file revision.
845 this file revision.
844
846
845 :srcrev: the changeset revision we search ancestors from
847 :srcrev: the changeset revision we search ancestors from
846 :inclusive: if true, the src revision will also be checked
848 :inclusive: if true, the src revision will also be checked
847 """
849 """
848 repo = self._repo
850 repo = self._repo
849 cl = repo.unfiltered().changelog
851 cl = repo.unfiltered().changelog
850 mfl = repo.manifestlog
852 mfl = repo.manifestlog
851 # fetch the linkrev
853 # fetch the linkrev
852 lkr = self.linkrev()
854 lkr = self.linkrev()
853 # hack to reuse ancestor computation when searching for renames
855 # hack to reuse ancestor computation when searching for renames
854 memberanc = getattr(self, '_ancestrycontext', None)
856 memberanc = getattr(self, '_ancestrycontext', None)
855 iteranc = None
857 iteranc = None
856 if srcrev is None:
858 if srcrev is None:
857 # wctx case, used by workingfilectx during mergecopy
859 # wctx case, used by workingfilectx during mergecopy
858 revs = [p.rev() for p in self._repo[None].parents()]
860 revs = [p.rev() for p in self._repo[None].parents()]
859 inclusive = True # we skipped the real (revless) source
861 inclusive = True # we skipped the real (revless) source
860 else:
862 else:
861 revs = [srcrev]
863 revs = [srcrev]
862 if memberanc is None:
864 if memberanc is None:
863 memberanc = iteranc = cl.ancestors(revs, lkr,
865 memberanc = iteranc = cl.ancestors(revs, lkr,
864 inclusive=inclusive)
866 inclusive=inclusive)
865 # check if this linkrev is an ancestor of srcrev
867 # check if this linkrev is an ancestor of srcrev
866 if lkr not in memberanc:
868 if lkr not in memberanc:
867 if iteranc is None:
869 if iteranc is None:
868 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
870 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
869 fnode = self._filenode
871 fnode = self._filenode
870 path = self._path
872 path = self._path
871 for a in iteranc:
873 for a in iteranc:
872 ac = cl.read(a) # get changeset data (we avoid object creation)
874 ac = cl.read(a) # get changeset data (we avoid object creation)
873 if path in ac[3]: # checking the 'files' field.
875 if path in ac[3]: # checking the 'files' field.
874 # The file has been touched, check if the content is
876 # The file has been touched, check if the content is
875 # similar to the one we search for.
877 # similar to the one we search for.
876 if fnode == mfl[ac[0]].readfast().get(path):
878 if fnode == mfl[ac[0]].readfast().get(path):
877 return a
879 return a
878 # In theory, we should never get out of that loop without a result.
880 # In theory, we should never get out of that loop without a result.
879 # But if manifest uses a buggy file revision (not children of the
881 # But if manifest uses a buggy file revision (not children of the
880 # one it replaces) we could. Such a buggy situation will likely
882 # one it replaces) we could. Such a buggy situation will likely
881 # result is crash somewhere else at to some point.
883 # result is crash somewhere else at to some point.
882 return lkr
884 return lkr
883
885
884 def introrev(self):
886 def introrev(self):
885 """return the rev of the changeset which introduced this file revision
887 """return the rev of the changeset which introduced this file revision
886
888
887 This method is different from linkrev because it take into account the
889 This method is different from linkrev because it take into account the
888 changeset the filectx was created from. It ensures the returned
890 changeset the filectx was created from. It ensures the returned
889 revision is one of its ancestors. This prevents bugs from
891 revision is one of its ancestors. This prevents bugs from
890 'linkrev-shadowing' when a file revision is used by multiple
892 'linkrev-shadowing' when a file revision is used by multiple
891 changesets.
893 changesets.
892 """
894 """
893 lkr = self.linkrev()
895 lkr = self.linkrev()
894 attrs = vars(self)
896 attrs = vars(self)
895 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
897 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
896 if noctx or self.rev() == lkr:
898 if noctx or self.rev() == lkr:
897 return self.linkrev()
899 return self.linkrev()
898 return self._adjustlinkrev(self.rev(), inclusive=True)
900 return self._adjustlinkrev(self.rev(), inclusive=True)
899
901
900 def introfilectx(self):
902 def introfilectx(self):
901 """Return filectx having identical contents, but pointing to the
903 """Return filectx having identical contents, but pointing to the
902 changeset revision where this filectx was introduced"""
904 changeset revision where this filectx was introduced"""
903 introrev = self.introrev()
905 introrev = self.introrev()
904 if self.rev() == introrev:
906 if self.rev() == introrev:
905 return self
907 return self
906 return self.filectx(self.filenode(), changeid=introrev)
908 return self.filectx(self.filenode(), changeid=introrev)
907
909
908 def _parentfilectx(self, path, fileid, filelog):
910 def _parentfilectx(self, path, fileid, filelog):
909 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
911 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
910 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
912 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
911 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
913 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
912 # If self is associated with a changeset (probably explicitly
914 # If self is associated with a changeset (probably explicitly
913 # fed), ensure the created filectx is associated with a
915 # fed), ensure the created filectx is associated with a
914 # changeset that is an ancestor of self.changectx.
916 # changeset that is an ancestor of self.changectx.
915 # This lets us later use _adjustlinkrev to get a correct link.
917 # This lets us later use _adjustlinkrev to get a correct link.
916 fctx._descendantrev = self.rev()
918 fctx._descendantrev = self.rev()
917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
919 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
918 elif r'_descendantrev' in vars(self):
920 elif r'_descendantrev' in vars(self):
919 # Otherwise propagate _descendantrev if we have one associated.
921 # Otherwise propagate _descendantrev if we have one associated.
920 fctx._descendantrev = self._descendantrev
922 fctx._descendantrev = self._descendantrev
921 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
923 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
922 return fctx
924 return fctx
923
925
924 def parents(self):
926 def parents(self):
925 _path = self._path
927 _path = self._path
926 fl = self._filelog
928 fl = self._filelog
927 parents = self._filelog.parents(self._filenode)
929 parents = self._filelog.parents(self._filenode)
928 pl = [(_path, node, fl) for node in parents if node != nullid]
930 pl = [(_path, node, fl) for node in parents if node != nullid]
929
931
930 r = fl.renamed(self._filenode)
932 r = fl.renamed(self._filenode)
931 if r:
933 if r:
932 # - In the simple rename case, both parent are nullid, pl is empty.
934 # - In the simple rename case, both parent are nullid, pl is empty.
933 # - In case of merge, only one of the parent is null id and should
935 # - In case of merge, only one of the parent is null id and should
934 # be replaced with the rename information. This parent is -always-
936 # be replaced with the rename information. This parent is -always-
935 # the first one.
937 # the first one.
936 #
938 #
937 # As null id have always been filtered out in the previous list
939 # As null id have always been filtered out in the previous list
938 # comprehension, inserting to 0 will always result in "replacing
940 # comprehension, inserting to 0 will always result in "replacing
939 # first nullid parent with rename information.
941 # first nullid parent with rename information.
940 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
942 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
941
943
942 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
944 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
943
945
944 def p1(self):
946 def p1(self):
945 return self.parents()[0]
947 return self.parents()[0]
946
948
947 def p2(self):
949 def p2(self):
948 p = self.parents()
950 p = self.parents()
949 if len(p) == 2:
951 if len(p) == 2:
950 return p[1]
952 return p[1]
951 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
953 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
952
954
953 def annotate(self, follow=False, skiprevs=None, diffopts=None):
955 def annotate(self, follow=False, skiprevs=None, diffopts=None):
954 """Returns a list of annotateline objects for each line in the file
956 """Returns a list of annotateline objects for each line in the file
955
957
956 - line.fctx is the filectx of the node where that line was last changed
958 - line.fctx is the filectx of the node where that line was last changed
957 - line.lineno is the line number at the first appearance in the managed
959 - line.lineno is the line number at the first appearance in the managed
958 file
960 file
959 - line.text is the data on that line (including newline character)
961 - line.text is the data on that line (including newline character)
960 """
962 """
961 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
963 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
962
964
963 def parents(f):
965 def parents(f):
964 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
966 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
965 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
967 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
966 # from the topmost introrev (= srcrev) down to p.linkrev() if it
968 # from the topmost introrev (= srcrev) down to p.linkrev() if it
967 # isn't an ancestor of the srcrev.
969 # isn't an ancestor of the srcrev.
968 f._changeid
970 f._changeid
969 pl = f.parents()
971 pl = f.parents()
970
972
971 # Don't return renamed parents if we aren't following.
973 # Don't return renamed parents if we aren't following.
972 if not follow:
974 if not follow:
973 pl = [p for p in pl if p.path() == f.path()]
975 pl = [p for p in pl if p.path() == f.path()]
974
976
975 # renamed filectx won't have a filelog yet, so set it
977 # renamed filectx won't have a filelog yet, so set it
976 # from the cache to save time
978 # from the cache to save time
977 for p in pl:
979 for p in pl:
978 if not r'_filelog' in p.__dict__:
980 if not r'_filelog' in p.__dict__:
979 p._filelog = getlog(p.path())
981 p._filelog = getlog(p.path())
980
982
981 return pl
983 return pl
982
984
983 # use linkrev to find the first changeset where self appeared
985 # use linkrev to find the first changeset where self appeared
984 base = self.introfilectx()
986 base = self.introfilectx()
985 if getattr(base, '_ancestrycontext', None) is None:
987 if getattr(base, '_ancestrycontext', None) is None:
986 cl = self._repo.changelog
988 cl = self._repo.changelog
987 if base.rev() is None:
989 if base.rev() is None:
988 # wctx is not inclusive, but works because _ancestrycontext
990 # wctx is not inclusive, but works because _ancestrycontext
989 # is used to test filelog revisions
991 # is used to test filelog revisions
990 ac = cl.ancestors([p.rev() for p in base.parents()],
992 ac = cl.ancestors([p.rev() for p in base.parents()],
991 inclusive=True)
993 inclusive=True)
992 else:
994 else:
993 ac = cl.ancestors([base.rev()], inclusive=True)
995 ac = cl.ancestors([base.rev()], inclusive=True)
994 base._ancestrycontext = ac
996 base._ancestrycontext = ac
995
997
996 return dagop.annotate(base, parents, skiprevs=skiprevs,
998 return dagop.annotate(base, parents, skiprevs=skiprevs,
997 diffopts=diffopts)
999 diffopts=diffopts)
998
1000
999 def ancestors(self, followfirst=False):
1001 def ancestors(self, followfirst=False):
1000 visit = {}
1002 visit = {}
1001 c = self
1003 c = self
1002 if followfirst:
1004 if followfirst:
1003 cut = 1
1005 cut = 1
1004 else:
1006 else:
1005 cut = None
1007 cut = None
1006
1008
1007 while True:
1009 while True:
1008 for parent in c.parents()[:cut]:
1010 for parent in c.parents()[:cut]:
1009 visit[(parent.linkrev(), parent.filenode())] = parent
1011 visit[(parent.linkrev(), parent.filenode())] = parent
1010 if not visit:
1012 if not visit:
1011 break
1013 break
1012 c = visit.pop(max(visit))
1014 c = visit.pop(max(visit))
1013 yield c
1015 yield c
1014
1016
1015 def decodeddata(self):
1017 def decodeddata(self):
1016 """Returns `data()` after running repository decoding filters.
1018 """Returns `data()` after running repository decoding filters.
1017
1019
1018 This is often equivalent to how the data would be expressed on disk.
1020 This is often equivalent to how the data would be expressed on disk.
1019 """
1021 """
1020 return self._repo.wwritedata(self.path(), self.data())
1022 return self._repo.wwritedata(self.path(), self.data())
1021
1023
1022 class filectx(basefilectx):
1024 class filectx(basefilectx):
1023 """A filecontext object makes access to data related to a particular
1025 """A filecontext object makes access to data related to a particular
1024 filerevision convenient."""
1026 filerevision convenient."""
1025 def __init__(self, repo, path, changeid=None, fileid=None,
1027 def __init__(self, repo, path, changeid=None, fileid=None,
1026 filelog=None, changectx=None):
1028 filelog=None, changectx=None):
1027 """changeid can be a changeset revision, node, or tag.
1029 """changeid can be a changeset revision, node, or tag.
1028 fileid can be a file revision or node."""
1030 fileid can be a file revision or node."""
1029 self._repo = repo
1031 self._repo = repo
1030 self._path = path
1032 self._path = path
1031
1033
1032 assert (changeid is not None
1034 assert (changeid is not None
1033 or fileid is not None
1035 or fileid is not None
1034 or changectx is not None), \
1036 or changectx is not None), \
1035 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1037 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1036 % (changeid, fileid, changectx))
1038 % (changeid, fileid, changectx))
1037
1039
1038 if filelog is not None:
1040 if filelog is not None:
1039 self._filelog = filelog
1041 self._filelog = filelog
1040
1042
1041 if changeid is not None:
1043 if changeid is not None:
1042 self._changeid = changeid
1044 self._changeid = changeid
1043 if changectx is not None:
1045 if changectx is not None:
1044 self._changectx = changectx
1046 self._changectx = changectx
1045 if fileid is not None:
1047 if fileid is not None:
1046 self._fileid = fileid
1048 self._fileid = fileid
1047
1049
1048 @propertycache
1050 @propertycache
1049 def _changectx(self):
1051 def _changectx(self):
1050 try:
1052 try:
1051 return changectx(self._repo, self._changeid)
1053 return changectx(self._repo, self._changeid)
1052 except error.FilteredRepoLookupError:
1054 except error.FilteredRepoLookupError:
1053 # Linkrev may point to any revision in the repository. When the
1055 # Linkrev may point to any revision in the repository. When the
1054 # repository is filtered this may lead to `filectx` trying to build
1056 # repository is filtered this may lead to `filectx` trying to build
1055 # `changectx` for filtered revision. In such case we fallback to
1057 # `changectx` for filtered revision. In such case we fallback to
1056 # creating `changectx` on the unfiltered version of the reposition.
1058 # creating `changectx` on the unfiltered version of the reposition.
1057 # This fallback should not be an issue because `changectx` from
1059 # This fallback should not be an issue because `changectx` from
1058 # `filectx` are not used in complex operations that care about
1060 # `filectx` are not used in complex operations that care about
1059 # filtering.
1061 # filtering.
1060 #
1062 #
1061 # This fallback is a cheap and dirty fix that prevent several
1063 # This fallback is a cheap and dirty fix that prevent several
1062 # crashes. It does not ensure the behavior is correct. However the
1064 # crashes. It does not ensure the behavior is correct. However the
1063 # behavior was not correct before filtering either and "incorrect
1065 # behavior was not correct before filtering either and "incorrect
1064 # behavior" is seen as better as "crash"
1066 # behavior" is seen as better as "crash"
1065 #
1067 #
1066 # Linkrevs have several serious troubles with filtering that are
1068 # Linkrevs have several serious troubles with filtering that are
1067 # complicated to solve. Proper handling of the issue here should be
1069 # complicated to solve. Proper handling of the issue here should be
1068 # considered when solving linkrev issue are on the table.
1070 # considered when solving linkrev issue are on the table.
1069 return changectx(self._repo.unfiltered(), self._changeid)
1071 return changectx(self._repo.unfiltered(), self._changeid)
1070
1072
1071 def filectx(self, fileid, changeid=None):
1073 def filectx(self, fileid, changeid=None):
1072 '''opens an arbitrary revision of the file without
1074 '''opens an arbitrary revision of the file without
1073 opening a new filelog'''
1075 opening a new filelog'''
1074 return filectx(self._repo, self._path, fileid=fileid,
1076 return filectx(self._repo, self._path, fileid=fileid,
1075 filelog=self._filelog, changeid=changeid)
1077 filelog=self._filelog, changeid=changeid)
1076
1078
1077 def rawdata(self):
1079 def rawdata(self):
1078 return self._filelog.revision(self._filenode, raw=True)
1080 return self._filelog.revision(self._filenode, raw=True)
1079
1081
1080 def rawflags(self):
1082 def rawflags(self):
1081 """low-level revlog flags"""
1083 """low-level revlog flags"""
1082 return self._filelog.flags(self._filerev)
1084 return self._filelog.flags(self._filerev)
1083
1085
1084 def data(self):
1086 def data(self):
1085 try:
1087 try:
1086 return self._filelog.read(self._filenode)
1088 return self._filelog.read(self._filenode)
1087 except error.CensoredNodeError:
1089 except error.CensoredNodeError:
1088 if self._repo.ui.config("censor", "policy") == "ignore":
1090 if self._repo.ui.config("censor", "policy") == "ignore":
1089 return ""
1091 return ""
1090 raise error.Abort(_("censored node: %s") % short(self._filenode),
1092 raise error.Abort(_("censored node: %s") % short(self._filenode),
1091 hint=_("set censor.policy to ignore errors"))
1093 hint=_("set censor.policy to ignore errors"))
1092
1094
1093 def size(self):
1095 def size(self):
1094 return self._filelog.size(self._filerev)
1096 return self._filelog.size(self._filerev)
1095
1097
1096 @propertycache
1098 @propertycache
1097 def _copied(self):
1099 def _copied(self):
1098 """check if file was actually renamed in this changeset revision
1100 """check if file was actually renamed in this changeset revision
1099
1101
1100 If rename logged in file revision, we report copy for changeset only
1102 If rename logged in file revision, we report copy for changeset only
1101 if file revisions linkrev points back to the changeset in question
1103 if file revisions linkrev points back to the changeset in question
1102 or both changeset parents contain different file revisions.
1104 or both changeset parents contain different file revisions.
1103 """
1105 """
1104
1106
1105 renamed = self._filelog.renamed(self._filenode)
1107 renamed = self._filelog.renamed(self._filenode)
1106 if not renamed:
1108 if not renamed:
1107 return renamed
1109 return renamed
1108
1110
1109 if self.rev() == self.linkrev():
1111 if self.rev() == self.linkrev():
1110 return renamed
1112 return renamed
1111
1113
1112 name = self.path()
1114 name = self.path()
1113 fnode = self._filenode
1115 fnode = self._filenode
1114 for p in self._changectx.parents():
1116 for p in self._changectx.parents():
1115 try:
1117 try:
1116 if fnode == p.filenode(name):
1118 if fnode == p.filenode(name):
1117 return None
1119 return None
1118 except error.LookupError:
1120 except error.LookupError:
1119 pass
1121 pass
1120 return renamed
1122 return renamed
1121
1123
1122 def children(self):
1124 def children(self):
1123 # hard for renames
1125 # hard for renames
1124 c = self._filelog.children(self._filenode)
1126 c = self._filelog.children(self._filenode)
1125 return [filectx(self._repo, self._path, fileid=x,
1127 return [filectx(self._repo, self._path, fileid=x,
1126 filelog=self._filelog) for x in c]
1128 filelog=self._filelog) for x in c]
1127
1129
1128 class committablectx(basectx):
1130 class committablectx(basectx):
1129 """A committablectx object provides common functionality for a context that
1131 """A committablectx object provides common functionality for a context that
1130 wants the ability to commit, e.g. workingctx or memctx."""
1132 wants the ability to commit, e.g. workingctx or memctx."""
1131 def __init__(self, repo, text="", user=None, date=None, extra=None,
1133 def __init__(self, repo, text="", user=None, date=None, extra=None,
1132 changes=None):
1134 changes=None):
1133 super(committablectx, self).__init__(repo)
1135 super(committablectx, self).__init__(repo)
1134 self._rev = None
1136 self._rev = None
1135 self._node = None
1137 self._node = None
1136 self._text = text
1138 self._text = text
1137 if date:
1139 if date:
1138 self._date = dateutil.parsedate(date)
1140 self._date = dateutil.parsedate(date)
1139 if user:
1141 if user:
1140 self._user = user
1142 self._user = user
1141 if changes:
1143 if changes:
1142 self._status = changes
1144 self._status = changes
1143
1145
1144 self._extra = {}
1146 self._extra = {}
1145 if extra:
1147 if extra:
1146 self._extra = extra.copy()
1148 self._extra = extra.copy()
1147 if 'branch' not in self._extra:
1149 if 'branch' not in self._extra:
1148 try:
1150 try:
1149 branch = encoding.fromlocal(self._repo.dirstate.branch())
1151 branch = encoding.fromlocal(self._repo.dirstate.branch())
1150 except UnicodeDecodeError:
1152 except UnicodeDecodeError:
1151 raise error.Abort(_('branch name not in UTF-8!'))
1153 raise error.Abort(_('branch name not in UTF-8!'))
1152 self._extra['branch'] = branch
1154 self._extra['branch'] = branch
1153 if self._extra['branch'] == '':
1155 if self._extra['branch'] == '':
1154 self._extra['branch'] = 'default'
1156 self._extra['branch'] = 'default'
1155
1157
1156 def __bytes__(self):
1158 def __bytes__(self):
1157 return bytes(self._parents[0]) + "+"
1159 return bytes(self._parents[0]) + "+"
1158
1160
1159 __str__ = encoding.strmethod(__bytes__)
1161 __str__ = encoding.strmethod(__bytes__)
1160
1162
1161 def __nonzero__(self):
1163 def __nonzero__(self):
1162 return True
1164 return True
1163
1165
1164 __bool__ = __nonzero__
1166 __bool__ = __nonzero__
1165
1167
1166 def _buildflagfunc(self):
1168 def _buildflagfunc(self):
1167 # Create a fallback function for getting file flags when the
1169 # Create a fallback function for getting file flags when the
1168 # filesystem doesn't support them
1170 # filesystem doesn't support them
1169
1171
1170 copiesget = self._repo.dirstate.copies().get
1172 copiesget = self._repo.dirstate.copies().get
1171 parents = self.parents()
1173 parents = self.parents()
1172 if len(parents) < 2:
1174 if len(parents) < 2:
1173 # when we have one parent, it's easy: copy from parent
1175 # when we have one parent, it's easy: copy from parent
1174 man = parents[0].manifest()
1176 man = parents[0].manifest()
1175 def func(f):
1177 def func(f):
1176 f = copiesget(f, f)
1178 f = copiesget(f, f)
1177 return man.flags(f)
1179 return man.flags(f)
1178 else:
1180 else:
1179 # merges are tricky: we try to reconstruct the unstored
1181 # merges are tricky: we try to reconstruct the unstored
1180 # result from the merge (issue1802)
1182 # result from the merge (issue1802)
1181 p1, p2 = parents
1183 p1, p2 = parents
1182 pa = p1.ancestor(p2)
1184 pa = p1.ancestor(p2)
1183 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1185 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1184
1186
1185 def func(f):
1187 def func(f):
1186 f = copiesget(f, f) # may be wrong for merges with copies
1188 f = copiesget(f, f) # may be wrong for merges with copies
1187 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1189 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1188 if fl1 == fl2:
1190 if fl1 == fl2:
1189 return fl1
1191 return fl1
1190 if fl1 == fla:
1192 if fl1 == fla:
1191 return fl2
1193 return fl2
1192 if fl2 == fla:
1194 if fl2 == fla:
1193 return fl1
1195 return fl1
1194 return '' # punt for conflicts
1196 return '' # punt for conflicts
1195
1197
1196 return func
1198 return func
1197
1199
1198 @propertycache
1200 @propertycache
1199 def _flagfunc(self):
1201 def _flagfunc(self):
1200 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1202 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1201
1203
1202 @propertycache
1204 @propertycache
1203 def _status(self):
1205 def _status(self):
1204 return self._repo.status()
1206 return self._repo.status()
1205
1207
1206 @propertycache
1208 @propertycache
1207 def _user(self):
1209 def _user(self):
1208 return self._repo.ui.username()
1210 return self._repo.ui.username()
1209
1211
1210 @propertycache
1212 @propertycache
1211 def _date(self):
1213 def _date(self):
1212 ui = self._repo.ui
1214 ui = self._repo.ui
1213 date = ui.configdate('devel', 'default-date')
1215 date = ui.configdate('devel', 'default-date')
1214 if date is None:
1216 if date is None:
1215 date = dateutil.makedate()
1217 date = dateutil.makedate()
1216 return date
1218 return date
1217
1219
1218 def subrev(self, subpath):
1220 def subrev(self, subpath):
1219 return None
1221 return None
1220
1222
1221 def manifestnode(self):
1223 def manifestnode(self):
1222 return None
1224 return None
1223 def user(self):
1225 def user(self):
1224 return self._user or self._repo.ui.username()
1226 return self._user or self._repo.ui.username()
1225 def date(self):
1227 def date(self):
1226 return self._date
1228 return self._date
1227 def description(self):
1229 def description(self):
1228 return self._text
1230 return self._text
1229 def files(self):
1231 def files(self):
1230 return sorted(self._status.modified + self._status.added +
1232 return sorted(self._status.modified + self._status.added +
1231 self._status.removed)
1233 self._status.removed)
1232
1234
1233 def modified(self):
1235 def modified(self):
1234 return self._status.modified
1236 return self._status.modified
1235 def added(self):
1237 def added(self):
1236 return self._status.added
1238 return self._status.added
1237 def removed(self):
1239 def removed(self):
1238 return self._status.removed
1240 return self._status.removed
1239 def deleted(self):
1241 def deleted(self):
1240 return self._status.deleted
1242 return self._status.deleted
1241 def branch(self):
1243 def branch(self):
1242 return encoding.tolocal(self._extra['branch'])
1244 return encoding.tolocal(self._extra['branch'])
1243 def closesbranch(self):
1245 def closesbranch(self):
1244 return 'close' in self._extra
1246 return 'close' in self._extra
1245 def extra(self):
1247 def extra(self):
1246 return self._extra
1248 return self._extra
1247
1249
1248 def isinmemory(self):
1250 def isinmemory(self):
1249 return False
1251 return False
1250
1252
1251 def tags(self):
1253 def tags(self):
1252 return []
1254 return []
1253
1255
1254 def bookmarks(self):
1256 def bookmarks(self):
1255 b = []
1257 b = []
1256 for p in self.parents():
1258 for p in self.parents():
1257 b.extend(p.bookmarks())
1259 b.extend(p.bookmarks())
1258 return b
1260 return b
1259
1261
1260 def phase(self):
1262 def phase(self):
1261 phase = phases.draft # default phase to draft
1263 phase = phases.draft # default phase to draft
1262 for p in self.parents():
1264 for p in self.parents():
1263 phase = max(phase, p.phase())
1265 phase = max(phase, p.phase())
1264 return phase
1266 return phase
1265
1267
1266 def hidden(self):
1268 def hidden(self):
1267 return False
1269 return False
1268
1270
1269 def children(self):
1271 def children(self):
1270 return []
1272 return []
1271
1273
1272 def flags(self, path):
1274 def flags(self, path):
1273 if r'_manifest' in self.__dict__:
1275 if r'_manifest' in self.__dict__:
1274 try:
1276 try:
1275 return self._manifest.flags(path)
1277 return self._manifest.flags(path)
1276 except KeyError:
1278 except KeyError:
1277 return ''
1279 return ''
1278
1280
1279 try:
1281 try:
1280 return self._flagfunc(path)
1282 return self._flagfunc(path)
1281 except OSError:
1283 except OSError:
1282 return ''
1284 return ''
1283
1285
1284 def ancestor(self, c2):
1286 def ancestor(self, c2):
1285 """return the "best" ancestor context of self and c2"""
1287 """return the "best" ancestor context of self and c2"""
1286 return self._parents[0].ancestor(c2) # punt on two parents for now
1288 return self._parents[0].ancestor(c2) # punt on two parents for now
1287
1289
1288 def walk(self, match):
1290 def walk(self, match):
1289 '''Generates matching file names.'''
1291 '''Generates matching file names.'''
1290 return sorted(self._repo.dirstate.walk(match,
1292 return sorted(self._repo.dirstate.walk(match,
1291 subrepos=sorted(self.substate),
1293 subrepos=sorted(self.substate),
1292 unknown=True, ignored=False))
1294 unknown=True, ignored=False))
1293
1295
1294 def matches(self, match):
1296 def matches(self, match):
1295 return sorted(self._repo.dirstate.matches(match))
1297 return sorted(self._repo.dirstate.matches(match))
1296
1298
1297 def ancestors(self):
1299 def ancestors(self):
1298 for p in self._parents:
1300 for p in self._parents:
1299 yield p
1301 yield p
1300 for a in self._repo.changelog.ancestors(
1302 for a in self._repo.changelog.ancestors(
1301 [p.rev() for p in self._parents]):
1303 [p.rev() for p in self._parents]):
1302 yield changectx(self._repo, a)
1304 yield changectx(self._repo, a)
1303
1305
1304 def markcommitted(self, node):
1306 def markcommitted(self, node):
1305 """Perform post-commit cleanup necessary after committing this ctx
1307 """Perform post-commit cleanup necessary after committing this ctx
1306
1308
1307 Specifically, this updates backing stores this working context
1309 Specifically, this updates backing stores this working context
1308 wraps to reflect the fact that the changes reflected by this
1310 wraps to reflect the fact that the changes reflected by this
1309 workingctx have been committed. For example, it marks
1311 workingctx have been committed. For example, it marks
1310 modified and added files as normal in the dirstate.
1312 modified and added files as normal in the dirstate.
1311
1313
1312 """
1314 """
1313
1315
1314 with self._repo.dirstate.parentchange():
1316 with self._repo.dirstate.parentchange():
1315 for f in self.modified() + self.added():
1317 for f in self.modified() + self.added():
1316 self._repo.dirstate.normal(f)
1318 self._repo.dirstate.normal(f)
1317 for f in self.removed():
1319 for f in self.removed():
1318 self._repo.dirstate.drop(f)
1320 self._repo.dirstate.drop(f)
1319 self._repo.dirstate.setparents(node)
1321 self._repo.dirstate.setparents(node)
1320
1322
1321 # write changes out explicitly, because nesting wlock at
1323 # write changes out explicitly, because nesting wlock at
1322 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1324 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1323 # from immediately doing so for subsequent changing files
1325 # from immediately doing so for subsequent changing files
1324 self._repo.dirstate.write(self._repo.currenttransaction())
1326 self._repo.dirstate.write(self._repo.currenttransaction())
1325
1327
1326 def dirty(self, missing=False, merge=True, branch=True):
1328 def dirty(self, missing=False, merge=True, branch=True):
1327 return False
1329 return False
1328
1330
1329 class workingctx(committablectx):
1331 class workingctx(committablectx):
1330 """A workingctx object makes access to data related to
1332 """A workingctx object makes access to data related to
1331 the current working directory convenient.
1333 the current working directory convenient.
1332 date - any valid date string or (unixtime, offset), or None.
1334 date - any valid date string or (unixtime, offset), or None.
1333 user - username string, or None.
1335 user - username string, or None.
1334 extra - a dictionary of extra values, or None.
1336 extra - a dictionary of extra values, or None.
1335 changes - a list of file lists as returned by localrepo.status()
1337 changes - a list of file lists as returned by localrepo.status()
1336 or None to use the repository status.
1338 or None to use the repository status.
1337 """
1339 """
1338 def __init__(self, repo, text="", user=None, date=None, extra=None,
1340 def __init__(self, repo, text="", user=None, date=None, extra=None,
1339 changes=None):
1341 changes=None):
1340 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1342 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1341
1343
1342 def __iter__(self):
1344 def __iter__(self):
1343 d = self._repo.dirstate
1345 d = self._repo.dirstate
1344 for f in d:
1346 for f in d:
1345 if d[f] != 'r':
1347 if d[f] != 'r':
1346 yield f
1348 yield f
1347
1349
1348 def __contains__(self, key):
1350 def __contains__(self, key):
1349 return self._repo.dirstate[key] not in "?r"
1351 return self._repo.dirstate[key] not in "?r"
1350
1352
1351 def hex(self):
1353 def hex(self):
1352 return hex(wdirid)
1354 return hex(wdirid)
1353
1355
1354 @propertycache
1356 @propertycache
1355 def _parents(self):
1357 def _parents(self):
1356 p = self._repo.dirstate.parents()
1358 p = self._repo.dirstate.parents()
1357 if p[1] == nullid:
1359 if p[1] == nullid:
1358 p = p[:-1]
1360 p = p[:-1]
1359 return [changectx(self._repo, x) for x in p]
1361 return [changectx(self._repo, x) for x in p]
1360
1362
1361 def _fileinfo(self, path):
1363 def _fileinfo(self, path):
1362 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1364 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1363 self._manifest
1365 self._manifest
1364 return super(workingctx, self)._fileinfo(path)
1366 return super(workingctx, self)._fileinfo(path)
1365
1367
1366 def filectx(self, path, filelog=None):
1368 def filectx(self, path, filelog=None):
1367 """get a file context from the working directory"""
1369 """get a file context from the working directory"""
1368 return workingfilectx(self._repo, path, workingctx=self,
1370 return workingfilectx(self._repo, path, workingctx=self,
1369 filelog=filelog)
1371 filelog=filelog)
1370
1372
1371 def dirty(self, missing=False, merge=True, branch=True):
1373 def dirty(self, missing=False, merge=True, branch=True):
1372 "check whether a working directory is modified"
1374 "check whether a working directory is modified"
1373 # check subrepos first
1375 # check subrepos first
1374 for s in sorted(self.substate):
1376 for s in sorted(self.substate):
1375 if self.sub(s).dirty(missing=missing):
1377 if self.sub(s).dirty(missing=missing):
1376 return True
1378 return True
1377 # check current working dir
1379 # check current working dir
1378 return ((merge and self.p2()) or
1380 return ((merge and self.p2()) or
1379 (branch and self.branch() != self.p1().branch()) or
1381 (branch and self.branch() != self.p1().branch()) or
1380 self.modified() or self.added() or self.removed() or
1382 self.modified() or self.added() or self.removed() or
1381 (missing and self.deleted()))
1383 (missing and self.deleted()))
1382
1384
1383 def add(self, list, prefix=""):
1385 def add(self, list, prefix=""):
1384 with self._repo.wlock():
1386 with self._repo.wlock():
1385 ui, ds = self._repo.ui, self._repo.dirstate
1387 ui, ds = self._repo.ui, self._repo.dirstate
1386 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1388 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1387 rejected = []
1389 rejected = []
1388 lstat = self._repo.wvfs.lstat
1390 lstat = self._repo.wvfs.lstat
1389 for f in list:
1391 for f in list:
1390 # ds.pathto() returns an absolute file when this is invoked from
1392 # ds.pathto() returns an absolute file when this is invoked from
1391 # the keyword extension. That gets flagged as non-portable on
1393 # the keyword extension. That gets flagged as non-portable on
1392 # Windows, since it contains the drive letter and colon.
1394 # Windows, since it contains the drive letter and colon.
1393 scmutil.checkportable(ui, os.path.join(prefix, f))
1395 scmutil.checkportable(ui, os.path.join(prefix, f))
1394 try:
1396 try:
1395 st = lstat(f)
1397 st = lstat(f)
1396 except OSError:
1398 except OSError:
1397 ui.warn(_("%s does not exist!\n") % uipath(f))
1399 ui.warn(_("%s does not exist!\n") % uipath(f))
1398 rejected.append(f)
1400 rejected.append(f)
1399 continue
1401 continue
1400 if st.st_size > 10000000:
1402 if st.st_size > 10000000:
1401 ui.warn(_("%s: up to %d MB of RAM may be required "
1403 ui.warn(_("%s: up to %d MB of RAM may be required "
1402 "to manage this file\n"
1404 "to manage this file\n"
1403 "(use 'hg revert %s' to cancel the "
1405 "(use 'hg revert %s' to cancel the "
1404 "pending addition)\n")
1406 "pending addition)\n")
1405 % (f, 3 * st.st_size // 1000000, uipath(f)))
1407 % (f, 3 * st.st_size // 1000000, uipath(f)))
1406 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1408 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1407 ui.warn(_("%s not added: only files and symlinks "
1409 ui.warn(_("%s not added: only files and symlinks "
1408 "supported currently\n") % uipath(f))
1410 "supported currently\n") % uipath(f))
1409 rejected.append(f)
1411 rejected.append(f)
1410 elif ds[f] in 'amn':
1412 elif ds[f] in 'amn':
1411 ui.warn(_("%s already tracked!\n") % uipath(f))
1413 ui.warn(_("%s already tracked!\n") % uipath(f))
1412 elif ds[f] == 'r':
1414 elif ds[f] == 'r':
1413 ds.normallookup(f)
1415 ds.normallookup(f)
1414 else:
1416 else:
1415 ds.add(f)
1417 ds.add(f)
1416 return rejected
1418 return rejected
1417
1419
1418 def forget(self, files, prefix=""):
1420 def forget(self, files, prefix=""):
1419 with self._repo.wlock():
1421 with self._repo.wlock():
1420 ds = self._repo.dirstate
1422 ds = self._repo.dirstate
1421 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1423 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1422 rejected = []
1424 rejected = []
1423 for f in files:
1425 for f in files:
1424 if f not in self._repo.dirstate:
1426 if f not in self._repo.dirstate:
1425 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1427 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1426 rejected.append(f)
1428 rejected.append(f)
1427 elif self._repo.dirstate[f] != 'a':
1429 elif self._repo.dirstate[f] != 'a':
1428 self._repo.dirstate.remove(f)
1430 self._repo.dirstate.remove(f)
1429 else:
1431 else:
1430 self._repo.dirstate.drop(f)
1432 self._repo.dirstate.drop(f)
1431 return rejected
1433 return rejected
1432
1434
1433 def undelete(self, list):
1435 def undelete(self, list):
1434 pctxs = self.parents()
1436 pctxs = self.parents()
1435 with self._repo.wlock():
1437 with self._repo.wlock():
1436 ds = self._repo.dirstate
1438 ds = self._repo.dirstate
1437 for f in list:
1439 for f in list:
1438 if self._repo.dirstate[f] != 'r':
1440 if self._repo.dirstate[f] != 'r':
1439 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1441 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1440 else:
1442 else:
1441 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1443 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1442 t = fctx.data()
1444 t = fctx.data()
1443 self._repo.wwrite(f, t, fctx.flags())
1445 self._repo.wwrite(f, t, fctx.flags())
1444 self._repo.dirstate.normal(f)
1446 self._repo.dirstate.normal(f)
1445
1447
1446 def copy(self, source, dest):
1448 def copy(self, source, dest):
1447 try:
1449 try:
1448 st = self._repo.wvfs.lstat(dest)
1450 st = self._repo.wvfs.lstat(dest)
1449 except OSError as err:
1451 except OSError as err:
1450 if err.errno != errno.ENOENT:
1452 if err.errno != errno.ENOENT:
1451 raise
1453 raise
1452 self._repo.ui.warn(_("%s does not exist!\n")
1454 self._repo.ui.warn(_("%s does not exist!\n")
1453 % self._repo.dirstate.pathto(dest))
1455 % self._repo.dirstate.pathto(dest))
1454 return
1456 return
1455 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1457 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1456 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1458 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1457 "symbolic link\n")
1459 "symbolic link\n")
1458 % self._repo.dirstate.pathto(dest))
1460 % self._repo.dirstate.pathto(dest))
1459 else:
1461 else:
1460 with self._repo.wlock():
1462 with self._repo.wlock():
1461 if self._repo.dirstate[dest] in '?':
1463 if self._repo.dirstate[dest] in '?':
1462 self._repo.dirstate.add(dest)
1464 self._repo.dirstate.add(dest)
1463 elif self._repo.dirstate[dest] in 'r':
1465 elif self._repo.dirstate[dest] in 'r':
1464 self._repo.dirstate.normallookup(dest)
1466 self._repo.dirstate.normallookup(dest)
1465 self._repo.dirstate.copy(source, dest)
1467 self._repo.dirstate.copy(source, dest)
1466
1468
1467 def match(self, pats=None, include=None, exclude=None, default='glob',
1469 def match(self, pats=None, include=None, exclude=None, default='glob',
1468 listsubrepos=False, badfn=None):
1470 listsubrepos=False, badfn=None):
1469 r = self._repo
1471 r = self._repo
1470
1472
1471 # Only a case insensitive filesystem needs magic to translate user input
1473 # Only a case insensitive filesystem needs magic to translate user input
1472 # to actual case in the filesystem.
1474 # to actual case in the filesystem.
1473 icasefs = not util.fscasesensitive(r.root)
1475 icasefs = not util.fscasesensitive(r.root)
1474 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1476 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1475 default, auditor=r.auditor, ctx=self,
1477 default, auditor=r.auditor, ctx=self,
1476 listsubrepos=listsubrepos, badfn=badfn,
1478 listsubrepos=listsubrepos, badfn=badfn,
1477 icasefs=icasefs)
1479 icasefs=icasefs)
1478
1480
1479 def _filtersuspectsymlink(self, files):
1481 def _filtersuspectsymlink(self, files):
1480 if not files or self._repo.dirstate._checklink:
1482 if not files or self._repo.dirstate._checklink:
1481 return files
1483 return files
1482
1484
1483 # Symlink placeholders may get non-symlink-like contents
1485 # Symlink placeholders may get non-symlink-like contents
1484 # via user error or dereferencing by NFS or Samba servers,
1486 # via user error or dereferencing by NFS or Samba servers,
1485 # so we filter out any placeholders that don't look like a
1487 # so we filter out any placeholders that don't look like a
1486 # symlink
1488 # symlink
1487 sane = []
1489 sane = []
1488 for f in files:
1490 for f in files:
1489 if self.flags(f) == 'l':
1491 if self.flags(f) == 'l':
1490 d = self[f].data()
1492 d = self[f].data()
1491 if (d == '' or len(d) >= 1024 or '\n' in d
1493 if (d == '' or len(d) >= 1024 or '\n' in d
1492 or stringutil.binary(d)):
1494 or stringutil.binary(d)):
1493 self._repo.ui.debug('ignoring suspect symlink placeholder'
1495 self._repo.ui.debug('ignoring suspect symlink placeholder'
1494 ' "%s"\n' % f)
1496 ' "%s"\n' % f)
1495 continue
1497 continue
1496 sane.append(f)
1498 sane.append(f)
1497 return sane
1499 return sane
1498
1500
1499 def _checklookup(self, files):
1501 def _checklookup(self, files):
1500 # check for any possibly clean files
1502 # check for any possibly clean files
1501 if not files:
1503 if not files:
1502 return [], [], []
1504 return [], [], []
1503
1505
1504 modified = []
1506 modified = []
1505 deleted = []
1507 deleted = []
1506 fixup = []
1508 fixup = []
1507 pctx = self._parents[0]
1509 pctx = self._parents[0]
1508 # do a full compare of any files that might have changed
1510 # do a full compare of any files that might have changed
1509 for f in sorted(files):
1511 for f in sorted(files):
1510 try:
1512 try:
1511 # This will return True for a file that got replaced by a
1513 # This will return True for a file that got replaced by a
1512 # directory in the interim, but fixing that is pretty hard.
1514 # directory in the interim, but fixing that is pretty hard.
1513 if (f not in pctx or self.flags(f) != pctx.flags(f)
1515 if (f not in pctx or self.flags(f) != pctx.flags(f)
1514 or pctx[f].cmp(self[f])):
1516 or pctx[f].cmp(self[f])):
1515 modified.append(f)
1517 modified.append(f)
1516 else:
1518 else:
1517 fixup.append(f)
1519 fixup.append(f)
1518 except (IOError, OSError):
1520 except (IOError, OSError):
1519 # A file become inaccessible in between? Mark it as deleted,
1521 # A file become inaccessible in between? Mark it as deleted,
1520 # matching dirstate behavior (issue5584).
1522 # matching dirstate behavior (issue5584).
1521 # The dirstate has more complex behavior around whether a
1523 # The dirstate has more complex behavior around whether a
1522 # missing file matches a directory, etc, but we don't need to
1524 # missing file matches a directory, etc, but we don't need to
1523 # bother with that: if f has made it to this point, we're sure
1525 # bother with that: if f has made it to this point, we're sure
1524 # it's in the dirstate.
1526 # it's in the dirstate.
1525 deleted.append(f)
1527 deleted.append(f)
1526
1528
1527 return modified, deleted, fixup
1529 return modified, deleted, fixup
1528
1530
1529 def _poststatusfixup(self, status, fixup):
1531 def _poststatusfixup(self, status, fixup):
1530 """update dirstate for files that are actually clean"""
1532 """update dirstate for files that are actually clean"""
1531 poststatus = self._repo.postdsstatus()
1533 poststatus = self._repo.postdsstatus()
1532 if fixup or poststatus:
1534 if fixup or poststatus:
1533 try:
1535 try:
1534 oldid = self._repo.dirstate.identity()
1536 oldid = self._repo.dirstate.identity()
1535
1537
1536 # updating the dirstate is optional
1538 # updating the dirstate is optional
1537 # so we don't wait on the lock
1539 # so we don't wait on the lock
1538 # wlock can invalidate the dirstate, so cache normal _after_
1540 # wlock can invalidate the dirstate, so cache normal _after_
1539 # taking the lock
1541 # taking the lock
1540 with self._repo.wlock(False):
1542 with self._repo.wlock(False):
1541 if self._repo.dirstate.identity() == oldid:
1543 if self._repo.dirstate.identity() == oldid:
1542 if fixup:
1544 if fixup:
1543 normal = self._repo.dirstate.normal
1545 normal = self._repo.dirstate.normal
1544 for f in fixup:
1546 for f in fixup:
1545 normal(f)
1547 normal(f)
1546 # write changes out explicitly, because nesting
1548 # write changes out explicitly, because nesting
1547 # wlock at runtime may prevent 'wlock.release()'
1549 # wlock at runtime may prevent 'wlock.release()'
1548 # after this block from doing so for subsequent
1550 # after this block from doing so for subsequent
1549 # changing files
1551 # changing files
1550 tr = self._repo.currenttransaction()
1552 tr = self._repo.currenttransaction()
1551 self._repo.dirstate.write(tr)
1553 self._repo.dirstate.write(tr)
1552
1554
1553 if poststatus:
1555 if poststatus:
1554 for ps in poststatus:
1556 for ps in poststatus:
1555 ps(self, status)
1557 ps(self, status)
1556 else:
1558 else:
1557 # in this case, writing changes out breaks
1559 # in this case, writing changes out breaks
1558 # consistency, because .hg/dirstate was
1560 # consistency, because .hg/dirstate was
1559 # already changed simultaneously after last
1561 # already changed simultaneously after last
1560 # caching (see also issue5584 for detail)
1562 # caching (see also issue5584 for detail)
1561 self._repo.ui.debug('skip updating dirstate: '
1563 self._repo.ui.debug('skip updating dirstate: '
1562 'identity mismatch\n')
1564 'identity mismatch\n')
1563 except error.LockError:
1565 except error.LockError:
1564 pass
1566 pass
1565 finally:
1567 finally:
1566 # Even if the wlock couldn't be grabbed, clear out the list.
1568 # Even if the wlock couldn't be grabbed, clear out the list.
1567 self._repo.clearpostdsstatus()
1569 self._repo.clearpostdsstatus()
1568
1570
1569 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1571 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1570 '''Gets the status from the dirstate -- internal use only.'''
1572 '''Gets the status from the dirstate -- internal use only.'''
1571 subrepos = []
1573 subrepos = []
1572 if '.hgsub' in self:
1574 if '.hgsub' in self:
1573 subrepos = sorted(self.substate)
1575 subrepos = sorted(self.substate)
1574 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1576 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1575 clean=clean, unknown=unknown)
1577 clean=clean, unknown=unknown)
1576
1578
1577 # check for any possibly clean files
1579 # check for any possibly clean files
1578 fixup = []
1580 fixup = []
1579 if cmp:
1581 if cmp:
1580 modified2, deleted2, fixup = self._checklookup(cmp)
1582 modified2, deleted2, fixup = self._checklookup(cmp)
1581 s.modified.extend(modified2)
1583 s.modified.extend(modified2)
1582 s.deleted.extend(deleted2)
1584 s.deleted.extend(deleted2)
1583
1585
1584 if fixup and clean:
1586 if fixup and clean:
1585 s.clean.extend(fixup)
1587 s.clean.extend(fixup)
1586
1588
1587 self._poststatusfixup(s, fixup)
1589 self._poststatusfixup(s, fixup)
1588
1590
1589 if match.always():
1591 if match.always():
1590 # cache for performance
1592 # cache for performance
1591 if s.unknown or s.ignored or s.clean:
1593 if s.unknown or s.ignored or s.clean:
1592 # "_status" is cached with list*=False in the normal route
1594 # "_status" is cached with list*=False in the normal route
1593 self._status = scmutil.status(s.modified, s.added, s.removed,
1595 self._status = scmutil.status(s.modified, s.added, s.removed,
1594 s.deleted, [], [], [])
1596 s.deleted, [], [], [])
1595 else:
1597 else:
1596 self._status = s
1598 self._status = s
1597
1599
1598 return s
1600 return s
1599
1601
1600 @propertycache
1602 @propertycache
1601 def _manifest(self):
1603 def _manifest(self):
1602 """generate a manifest corresponding to the values in self._status
1604 """generate a manifest corresponding to the values in self._status
1603
1605
1604 This reuse the file nodeid from parent, but we use special node
1606 This reuse the file nodeid from parent, but we use special node
1605 identifiers for added and modified files. This is used by manifests
1607 identifiers for added and modified files. This is used by manifests
1606 merge to see that files are different and by update logic to avoid
1608 merge to see that files are different and by update logic to avoid
1607 deleting newly added files.
1609 deleting newly added files.
1608 """
1610 """
1609 return self._buildstatusmanifest(self._status)
1611 return self._buildstatusmanifest(self._status)
1610
1612
1611 def _buildstatusmanifest(self, status):
1613 def _buildstatusmanifest(self, status):
1612 """Builds a manifest that includes the given status results."""
1614 """Builds a manifest that includes the given status results."""
1613 parents = self.parents()
1615 parents = self.parents()
1614
1616
1615 man = parents[0].manifest().copy()
1617 man = parents[0].manifest().copy()
1616
1618
1617 ff = self._flagfunc
1619 ff = self._flagfunc
1618 for i, l in ((addednodeid, status.added),
1620 for i, l in ((addednodeid, status.added),
1619 (modifiednodeid, status.modified)):
1621 (modifiednodeid, status.modified)):
1620 for f in l:
1622 for f in l:
1621 man[f] = i
1623 man[f] = i
1622 try:
1624 try:
1623 man.setflag(f, ff(f))
1625 man.setflag(f, ff(f))
1624 except OSError:
1626 except OSError:
1625 pass
1627 pass
1626
1628
1627 for f in status.deleted + status.removed:
1629 for f in status.deleted + status.removed:
1628 if f in man:
1630 if f in man:
1629 del man[f]
1631 del man[f]
1630
1632
1631 return man
1633 return man
1632
1634
1633 def _buildstatus(self, other, s, match, listignored, listclean,
1635 def _buildstatus(self, other, s, match, listignored, listclean,
1634 listunknown):
1636 listunknown):
1635 """build a status with respect to another context
1637 """build a status with respect to another context
1636
1638
1637 This includes logic for maintaining the fast path of status when
1639 This includes logic for maintaining the fast path of status when
1638 comparing the working directory against its parent, which is to skip
1640 comparing the working directory against its parent, which is to skip
1639 building a new manifest if self (working directory) is not comparing
1641 building a new manifest if self (working directory) is not comparing
1640 against its parent (repo['.']).
1642 against its parent (repo['.']).
1641 """
1643 """
1642 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1644 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1643 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1645 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1644 # might have accidentally ended up with the entire contents of the file
1646 # might have accidentally ended up with the entire contents of the file
1645 # they are supposed to be linking to.
1647 # they are supposed to be linking to.
1646 s.modified[:] = self._filtersuspectsymlink(s.modified)
1648 s.modified[:] = self._filtersuspectsymlink(s.modified)
1647 if other != self._repo['.']:
1649 if other != self._repo['.']:
1648 s = super(workingctx, self)._buildstatus(other, s, match,
1650 s = super(workingctx, self)._buildstatus(other, s, match,
1649 listignored, listclean,
1651 listignored, listclean,
1650 listunknown)
1652 listunknown)
1651 return s
1653 return s
1652
1654
1653 def _matchstatus(self, other, match):
1655 def _matchstatus(self, other, match):
1654 """override the match method with a filter for directory patterns
1656 """override the match method with a filter for directory patterns
1655
1657
1656 We use inheritance to customize the match.bad method only in cases of
1658 We use inheritance to customize the match.bad method only in cases of
1657 workingctx since it belongs only to the working directory when
1659 workingctx since it belongs only to the working directory when
1658 comparing against the parent changeset.
1660 comparing against the parent changeset.
1659
1661
1660 If we aren't comparing against the working directory's parent, then we
1662 If we aren't comparing against the working directory's parent, then we
1661 just use the default match object sent to us.
1663 just use the default match object sent to us.
1662 """
1664 """
1663 if other != self._repo['.']:
1665 if other != self._repo['.']:
1664 def bad(f, msg):
1666 def bad(f, msg):
1665 # 'f' may be a directory pattern from 'match.files()',
1667 # 'f' may be a directory pattern from 'match.files()',
1666 # so 'f not in ctx1' is not enough
1668 # so 'f not in ctx1' is not enough
1667 if f not in other and not other.hasdir(f):
1669 if f not in other and not other.hasdir(f):
1668 self._repo.ui.warn('%s: %s\n' %
1670 self._repo.ui.warn('%s: %s\n' %
1669 (self._repo.dirstate.pathto(f), msg))
1671 (self._repo.dirstate.pathto(f), msg))
1670 match.bad = bad
1672 match.bad = bad
1671 return match
1673 return match
1672
1674
1673 def markcommitted(self, node):
1675 def markcommitted(self, node):
1674 super(workingctx, self).markcommitted(node)
1676 super(workingctx, self).markcommitted(node)
1675
1677
1676 sparse.aftercommit(self._repo, node)
1678 sparse.aftercommit(self._repo, node)
1677
1679
1678 class committablefilectx(basefilectx):
1680 class committablefilectx(basefilectx):
1679 """A committablefilectx provides common functionality for a file context
1681 """A committablefilectx provides common functionality for a file context
1680 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1682 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1681 def __init__(self, repo, path, filelog=None, ctx=None):
1683 def __init__(self, repo, path, filelog=None, ctx=None):
1682 self._repo = repo
1684 self._repo = repo
1683 self._path = path
1685 self._path = path
1684 self._changeid = None
1686 self._changeid = None
1685 self._filerev = self._filenode = None
1687 self._filerev = self._filenode = None
1686
1688
1687 if filelog is not None:
1689 if filelog is not None:
1688 self._filelog = filelog
1690 self._filelog = filelog
1689 if ctx:
1691 if ctx:
1690 self._changectx = ctx
1692 self._changectx = ctx
1691
1693
1692 def __nonzero__(self):
1694 def __nonzero__(self):
1693 return True
1695 return True
1694
1696
1695 __bool__ = __nonzero__
1697 __bool__ = __nonzero__
1696
1698
1697 def linkrev(self):
1699 def linkrev(self):
1698 # linked to self._changectx no matter if file is modified or not
1700 # linked to self._changectx no matter if file is modified or not
1699 return self.rev()
1701 return self.rev()
1700
1702
1701 def parents(self):
1703 def parents(self):
1702 '''return parent filectxs, following copies if necessary'''
1704 '''return parent filectxs, following copies if necessary'''
1703 def filenode(ctx, path):
1705 def filenode(ctx, path):
1704 return ctx._manifest.get(path, nullid)
1706 return ctx._manifest.get(path, nullid)
1705
1707
1706 path = self._path
1708 path = self._path
1707 fl = self._filelog
1709 fl = self._filelog
1708 pcl = self._changectx._parents
1710 pcl = self._changectx._parents
1709 renamed = self.renamed()
1711 renamed = self.renamed()
1710
1712
1711 if renamed:
1713 if renamed:
1712 pl = [renamed + (None,)]
1714 pl = [renamed + (None,)]
1713 else:
1715 else:
1714 pl = [(path, filenode(pcl[0], path), fl)]
1716 pl = [(path, filenode(pcl[0], path), fl)]
1715
1717
1716 for pc in pcl[1:]:
1718 for pc in pcl[1:]:
1717 pl.append((path, filenode(pc, path), fl))
1719 pl.append((path, filenode(pc, path), fl))
1718
1720
1719 return [self._parentfilectx(p, fileid=n, filelog=l)
1721 return [self._parentfilectx(p, fileid=n, filelog=l)
1720 for p, n, l in pl if n != nullid]
1722 for p, n, l in pl if n != nullid]
1721
1723
1722 def children(self):
1724 def children(self):
1723 return []
1725 return []
1724
1726
1725 class workingfilectx(committablefilectx):
1727 class workingfilectx(committablefilectx):
1726 """A workingfilectx object makes access to data related to a particular
1728 """A workingfilectx object makes access to data related to a particular
1727 file in the working directory convenient."""
1729 file in the working directory convenient."""
1728 def __init__(self, repo, path, filelog=None, workingctx=None):
1730 def __init__(self, repo, path, filelog=None, workingctx=None):
1729 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1731 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1730
1732
1731 @propertycache
1733 @propertycache
1732 def _changectx(self):
1734 def _changectx(self):
1733 return workingctx(self._repo)
1735 return workingctx(self._repo)
1734
1736
1735 def data(self):
1737 def data(self):
1736 return self._repo.wread(self._path)
1738 return self._repo.wread(self._path)
1737 def renamed(self):
1739 def renamed(self):
1738 rp = self._repo.dirstate.copied(self._path)
1740 rp = self._repo.dirstate.copied(self._path)
1739 if not rp:
1741 if not rp:
1740 return None
1742 return None
1741 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1743 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1742
1744
1743 def size(self):
1745 def size(self):
1744 return self._repo.wvfs.lstat(self._path).st_size
1746 return self._repo.wvfs.lstat(self._path).st_size
1745 def date(self):
1747 def date(self):
1746 t, tz = self._changectx.date()
1748 t, tz = self._changectx.date()
1747 try:
1749 try:
1748 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1750 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1749 except OSError as err:
1751 except OSError as err:
1750 if err.errno != errno.ENOENT:
1752 if err.errno != errno.ENOENT:
1751 raise
1753 raise
1752 return (t, tz)
1754 return (t, tz)
1753
1755
1754 def exists(self):
1756 def exists(self):
1755 return self._repo.wvfs.exists(self._path)
1757 return self._repo.wvfs.exists(self._path)
1756
1758
1757 def lexists(self):
1759 def lexists(self):
1758 return self._repo.wvfs.lexists(self._path)
1760 return self._repo.wvfs.lexists(self._path)
1759
1761
1760 def audit(self):
1762 def audit(self):
1761 return self._repo.wvfs.audit(self._path)
1763 return self._repo.wvfs.audit(self._path)
1762
1764
1763 def cmp(self, fctx):
1765 def cmp(self, fctx):
1764 """compare with other file context
1766 """compare with other file context
1765
1767
1766 returns True if different than fctx.
1768 returns True if different than fctx.
1767 """
1769 """
1768 # fctx should be a filectx (not a workingfilectx)
1770 # fctx should be a filectx (not a workingfilectx)
1769 # invert comparison to reuse the same code path
1771 # invert comparison to reuse the same code path
1770 return fctx.cmp(self)
1772 return fctx.cmp(self)
1771
1773
1772 def remove(self, ignoremissing=False):
1774 def remove(self, ignoremissing=False):
1773 """wraps unlink for a repo's working directory"""
1775 """wraps unlink for a repo's working directory"""
1774 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1776 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1775
1777
1776 def write(self, data, flags, backgroundclose=False, **kwargs):
1778 def write(self, data, flags, backgroundclose=False, **kwargs):
1777 """wraps repo.wwrite"""
1779 """wraps repo.wwrite"""
1778 self._repo.wwrite(self._path, data, flags,
1780 self._repo.wwrite(self._path, data, flags,
1779 backgroundclose=backgroundclose,
1781 backgroundclose=backgroundclose,
1780 **kwargs)
1782 **kwargs)
1781
1783
1782 def markcopied(self, src):
1784 def markcopied(self, src):
1783 """marks this file a copy of `src`"""
1785 """marks this file a copy of `src`"""
1784 if self._repo.dirstate[self._path] in "nma":
1786 if self._repo.dirstate[self._path] in "nma":
1785 self._repo.dirstate.copy(src, self._path)
1787 self._repo.dirstate.copy(src, self._path)
1786
1788
1787 def clearunknown(self):
1789 def clearunknown(self):
1788 """Removes conflicting items in the working directory so that
1790 """Removes conflicting items in the working directory so that
1789 ``write()`` can be called successfully.
1791 ``write()`` can be called successfully.
1790 """
1792 """
1791 wvfs = self._repo.wvfs
1793 wvfs = self._repo.wvfs
1792 f = self._path
1794 f = self._path
1793 wvfs.audit(f)
1795 wvfs.audit(f)
1794 if wvfs.isdir(f) and not wvfs.islink(f):
1796 if wvfs.isdir(f) and not wvfs.islink(f):
1795 wvfs.rmtree(f, forcibly=True)
1797 wvfs.rmtree(f, forcibly=True)
1796 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1798 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1797 for p in reversed(list(util.finddirs(f))):
1799 for p in reversed(list(util.finddirs(f))):
1798 if wvfs.isfileorlink(p):
1800 if wvfs.isfileorlink(p):
1799 wvfs.unlink(p)
1801 wvfs.unlink(p)
1800 break
1802 break
1801
1803
1802 def setflags(self, l, x):
1804 def setflags(self, l, x):
1803 self._repo.wvfs.setflags(self._path, l, x)
1805 self._repo.wvfs.setflags(self._path, l, x)
1804
1806
1805 class overlayworkingctx(committablectx):
1807 class overlayworkingctx(committablectx):
1806 """Wraps another mutable context with a write-back cache that can be
1808 """Wraps another mutable context with a write-back cache that can be
1807 converted into a commit context.
1809 converted into a commit context.
1808
1810
1809 self._cache[path] maps to a dict with keys: {
1811 self._cache[path] maps to a dict with keys: {
1810 'exists': bool?
1812 'exists': bool?
1811 'date': date?
1813 'date': date?
1812 'data': str?
1814 'data': str?
1813 'flags': str?
1815 'flags': str?
1814 'copied': str? (path or None)
1816 'copied': str? (path or None)
1815 }
1817 }
1816 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1818 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1817 is `False`, the file was deleted.
1819 is `False`, the file was deleted.
1818 """
1820 """
1819
1821
1820 def __init__(self, repo):
1822 def __init__(self, repo):
1821 super(overlayworkingctx, self).__init__(repo)
1823 super(overlayworkingctx, self).__init__(repo)
1822 self.clean()
1824 self.clean()
1823
1825
1824 def setbase(self, wrappedctx):
1826 def setbase(self, wrappedctx):
1825 self._wrappedctx = wrappedctx
1827 self._wrappedctx = wrappedctx
1826 self._parents = [wrappedctx]
1828 self._parents = [wrappedctx]
1827 # Drop old manifest cache as it is now out of date.
1829 # Drop old manifest cache as it is now out of date.
1828 # This is necessary when, e.g., rebasing several nodes with one
1830 # This is necessary when, e.g., rebasing several nodes with one
1829 # ``overlayworkingctx`` (e.g. with --collapse).
1831 # ``overlayworkingctx`` (e.g. with --collapse).
1830 util.clearcachedproperty(self, '_manifest')
1832 util.clearcachedproperty(self, '_manifest')
1831
1833
1832 def data(self, path):
1834 def data(self, path):
1833 if self.isdirty(path):
1835 if self.isdirty(path):
1834 if self._cache[path]['exists']:
1836 if self._cache[path]['exists']:
1835 if self._cache[path]['data']:
1837 if self._cache[path]['data']:
1836 return self._cache[path]['data']
1838 return self._cache[path]['data']
1837 else:
1839 else:
1838 # Must fallback here, too, because we only set flags.
1840 # Must fallback here, too, because we only set flags.
1839 return self._wrappedctx[path].data()
1841 return self._wrappedctx[path].data()
1840 else:
1842 else:
1841 raise error.ProgrammingError("No such file or directory: %s" %
1843 raise error.ProgrammingError("No such file or directory: %s" %
1842 path)
1844 path)
1843 else:
1845 else:
1844 return self._wrappedctx[path].data()
1846 return self._wrappedctx[path].data()
1845
1847
1846 @propertycache
1848 @propertycache
1847 def _manifest(self):
1849 def _manifest(self):
1848 parents = self.parents()
1850 parents = self.parents()
1849 man = parents[0].manifest().copy()
1851 man = parents[0].manifest().copy()
1850
1852
1851 flag = self._flagfunc
1853 flag = self._flagfunc
1852 for path in self.added():
1854 for path in self.added():
1853 man[path] = addednodeid
1855 man[path] = addednodeid
1854 man.setflag(path, flag(path))
1856 man.setflag(path, flag(path))
1855 for path in self.modified():
1857 for path in self.modified():
1856 man[path] = modifiednodeid
1858 man[path] = modifiednodeid
1857 man.setflag(path, flag(path))
1859 man.setflag(path, flag(path))
1858 for path in self.removed():
1860 for path in self.removed():
1859 del man[path]
1861 del man[path]
1860 return man
1862 return man
1861
1863
1862 @propertycache
1864 @propertycache
1863 def _flagfunc(self):
1865 def _flagfunc(self):
1864 def f(path):
1866 def f(path):
1865 return self._cache[path]['flags']
1867 return self._cache[path]['flags']
1866 return f
1868 return f
1867
1869
1868 def files(self):
1870 def files(self):
1869 return sorted(self.added() + self.modified() + self.removed())
1871 return sorted(self.added() + self.modified() + self.removed())
1870
1872
1871 def modified(self):
1873 def modified(self):
1872 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1874 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1873 self._existsinparent(f)]
1875 self._existsinparent(f)]
1874
1876
1875 def added(self):
1877 def added(self):
1876 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1878 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1877 not self._existsinparent(f)]
1879 not self._existsinparent(f)]
1878
1880
1879 def removed(self):
1881 def removed(self):
1880 return [f for f in self._cache.keys() if
1882 return [f for f in self._cache.keys() if
1881 not self._cache[f]['exists'] and self._existsinparent(f)]
1883 not self._cache[f]['exists'] and self._existsinparent(f)]
1882
1884
1883 def isinmemory(self):
1885 def isinmemory(self):
1884 return True
1886 return True
1885
1887
1886 def filedate(self, path):
1888 def filedate(self, path):
1887 if self.isdirty(path):
1889 if self.isdirty(path):
1888 return self._cache[path]['date']
1890 return self._cache[path]['date']
1889 else:
1891 else:
1890 return self._wrappedctx[path].date()
1892 return self._wrappedctx[path].date()
1891
1893
1892 def markcopied(self, path, origin):
1894 def markcopied(self, path, origin):
1893 if self.isdirty(path):
1895 if self.isdirty(path):
1894 self._cache[path]['copied'] = origin
1896 self._cache[path]['copied'] = origin
1895 else:
1897 else:
1896 raise error.ProgrammingError('markcopied() called on clean context')
1898 raise error.ProgrammingError('markcopied() called on clean context')
1897
1899
1898 def copydata(self, path):
1900 def copydata(self, path):
1899 if self.isdirty(path):
1901 if self.isdirty(path):
1900 return self._cache[path]['copied']
1902 return self._cache[path]['copied']
1901 else:
1903 else:
1902 raise error.ProgrammingError('copydata() called on clean context')
1904 raise error.ProgrammingError('copydata() called on clean context')
1903
1905
1904 def flags(self, path):
1906 def flags(self, path):
1905 if self.isdirty(path):
1907 if self.isdirty(path):
1906 if self._cache[path]['exists']:
1908 if self._cache[path]['exists']:
1907 return self._cache[path]['flags']
1909 return self._cache[path]['flags']
1908 else:
1910 else:
1909 raise error.ProgrammingError("No such file or directory: %s" %
1911 raise error.ProgrammingError("No such file or directory: %s" %
1910 self._path)
1912 self._path)
1911 else:
1913 else:
1912 return self._wrappedctx[path].flags()
1914 return self._wrappedctx[path].flags()
1913
1915
1914 def _existsinparent(self, path):
1916 def _existsinparent(self, path):
1915 try:
1917 try:
1916 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1918 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1917 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1919 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1918 # with an ``exists()`` function.
1920 # with an ``exists()`` function.
1919 self._wrappedctx[path]
1921 self._wrappedctx[path]
1920 return True
1922 return True
1921 except error.ManifestLookupError:
1923 except error.ManifestLookupError:
1922 return False
1924 return False
1923
1925
1924 def _auditconflicts(self, path):
1926 def _auditconflicts(self, path):
1925 """Replicates conflict checks done by wvfs.write().
1927 """Replicates conflict checks done by wvfs.write().
1926
1928
1927 Since we never write to the filesystem and never call `applyupdates` in
1929 Since we never write to the filesystem and never call `applyupdates` in
1928 IMM, we'll never check that a path is actually writable -- e.g., because
1930 IMM, we'll never check that a path is actually writable -- e.g., because
1929 it adds `a/foo`, but `a` is actually a file in the other commit.
1931 it adds `a/foo`, but `a` is actually a file in the other commit.
1930 """
1932 """
1931 def fail(path, component):
1933 def fail(path, component):
1932 # p1() is the base and we're receiving "writes" for p2()'s
1934 # p1() is the base and we're receiving "writes" for p2()'s
1933 # files.
1935 # files.
1934 if 'l' in self.p1()[component].flags():
1936 if 'l' in self.p1()[component].flags():
1935 raise error.Abort("error: %s conflicts with symlink %s "
1937 raise error.Abort("error: %s conflicts with symlink %s "
1936 "in %s." % (path, component,
1938 "in %s." % (path, component,
1937 self.p1().rev()))
1939 self.p1().rev()))
1938 else:
1940 else:
1939 raise error.Abort("error: '%s' conflicts with file '%s' in "
1941 raise error.Abort("error: '%s' conflicts with file '%s' in "
1940 "%s." % (path, component,
1942 "%s." % (path, component,
1941 self.p1().rev()))
1943 self.p1().rev()))
1942
1944
1943 # Test that each new directory to be created to write this path from p2
1945 # Test that each new directory to be created to write this path from p2
1944 # is not a file in p1.
1946 # is not a file in p1.
1945 components = path.split('/')
1947 components = path.split('/')
1946 for i in xrange(len(components)):
1948 for i in xrange(len(components)):
1947 component = "/".join(components[0:i])
1949 component = "/".join(components[0:i])
1948 if component in self.p1():
1950 if component in self.p1():
1949 fail(path, component)
1951 fail(path, component)
1950
1952
1951 # Test the other direction -- that this path from p2 isn't a directory
1953 # Test the other direction -- that this path from p2 isn't a directory
1952 # in p1 (test that p1 doesn't any paths matching `path/*`).
1954 # in p1 (test that p1 doesn't any paths matching `path/*`).
1953 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1955 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1954 matches = self.p1().manifest().matches(match)
1956 matches = self.p1().manifest().matches(match)
1955 if len(matches) > 0:
1957 if len(matches) > 0:
1956 if len(matches) == 1 and matches.keys()[0] == path:
1958 if len(matches) == 1 and matches.keys()[0] == path:
1957 return
1959 return
1958 raise error.Abort("error: file '%s' cannot be written because "
1960 raise error.Abort("error: file '%s' cannot be written because "
1959 " '%s/' is a folder in %s (containing %d "
1961 " '%s/' is a folder in %s (containing %d "
1960 "entries: %s)"
1962 "entries: %s)"
1961 % (path, path, self.p1(), len(matches),
1963 % (path, path, self.p1(), len(matches),
1962 ', '.join(matches.keys())))
1964 ', '.join(matches.keys())))
1963
1965
1964 def write(self, path, data, flags='', **kwargs):
1966 def write(self, path, data, flags='', **kwargs):
1965 if data is None:
1967 if data is None:
1966 raise error.ProgrammingError("data must be non-None")
1968 raise error.ProgrammingError("data must be non-None")
1967 self._auditconflicts(path)
1969 self._auditconflicts(path)
1968 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1970 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1969 flags=flags)
1971 flags=flags)
1970
1972
1971 def setflags(self, path, l, x):
1973 def setflags(self, path, l, x):
1972 self._markdirty(path, exists=True, date=dateutil.makedate(),
1974 self._markdirty(path, exists=True, date=dateutil.makedate(),
1973 flags=(l and 'l' or '') + (x and 'x' or ''))
1975 flags=(l and 'l' or '') + (x and 'x' or ''))
1974
1976
1975 def remove(self, path):
1977 def remove(self, path):
1976 self._markdirty(path, exists=False)
1978 self._markdirty(path, exists=False)
1977
1979
1978 def exists(self, path):
1980 def exists(self, path):
1979 """exists behaves like `lexists`, but needs to follow symlinks and
1981 """exists behaves like `lexists`, but needs to follow symlinks and
1980 return False if they are broken.
1982 return False if they are broken.
1981 """
1983 """
1982 if self.isdirty(path):
1984 if self.isdirty(path):
1983 # If this path exists and is a symlink, "follow" it by calling
1985 # If this path exists and is a symlink, "follow" it by calling
1984 # exists on the destination path.
1986 # exists on the destination path.
1985 if (self._cache[path]['exists'] and
1987 if (self._cache[path]['exists'] and
1986 'l' in self._cache[path]['flags']):
1988 'l' in self._cache[path]['flags']):
1987 return self.exists(self._cache[path]['data'].strip())
1989 return self.exists(self._cache[path]['data'].strip())
1988 else:
1990 else:
1989 return self._cache[path]['exists']
1991 return self._cache[path]['exists']
1990
1992
1991 return self._existsinparent(path)
1993 return self._existsinparent(path)
1992
1994
1993 def lexists(self, path):
1995 def lexists(self, path):
1994 """lexists returns True if the path exists"""
1996 """lexists returns True if the path exists"""
1995 if self.isdirty(path):
1997 if self.isdirty(path):
1996 return self._cache[path]['exists']
1998 return self._cache[path]['exists']
1997
1999
1998 return self._existsinparent(path)
2000 return self._existsinparent(path)
1999
2001
2000 def size(self, path):
2002 def size(self, path):
2001 if self.isdirty(path):
2003 if self.isdirty(path):
2002 if self._cache[path]['exists']:
2004 if self._cache[path]['exists']:
2003 return len(self._cache[path]['data'])
2005 return len(self._cache[path]['data'])
2004 else:
2006 else:
2005 raise error.ProgrammingError("No such file or directory: %s" %
2007 raise error.ProgrammingError("No such file or directory: %s" %
2006 self._path)
2008 self._path)
2007 return self._wrappedctx[path].size()
2009 return self._wrappedctx[path].size()
2008
2010
2009 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2011 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2010 user=None, editor=None):
2012 user=None, editor=None):
2011 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2013 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2012 committed.
2014 committed.
2013
2015
2014 ``text`` is the commit message.
2016 ``text`` is the commit message.
2015 ``parents`` (optional) are rev numbers.
2017 ``parents`` (optional) are rev numbers.
2016 """
2018 """
2017 # Default parents to the wrapped contexts' if not passed.
2019 # Default parents to the wrapped contexts' if not passed.
2018 if parents is None:
2020 if parents is None:
2019 parents = self._wrappedctx.parents()
2021 parents = self._wrappedctx.parents()
2020 if len(parents) == 1:
2022 if len(parents) == 1:
2021 parents = (parents[0], None)
2023 parents = (parents[0], None)
2022
2024
2023 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2025 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2024 if parents[1] is None:
2026 if parents[1] is None:
2025 parents = (self._repo[parents[0]], None)
2027 parents = (self._repo[parents[0]], None)
2026 else:
2028 else:
2027 parents = (self._repo[parents[0]], self._repo[parents[1]])
2029 parents = (self._repo[parents[0]], self._repo[parents[1]])
2028
2030
2029 files = self._cache.keys()
2031 files = self._cache.keys()
2030 def getfile(repo, memctx, path):
2032 def getfile(repo, memctx, path):
2031 if self._cache[path]['exists']:
2033 if self._cache[path]['exists']:
2032 return memfilectx(repo, memctx, path,
2034 return memfilectx(repo, memctx, path,
2033 self._cache[path]['data'],
2035 self._cache[path]['data'],
2034 'l' in self._cache[path]['flags'],
2036 'l' in self._cache[path]['flags'],
2035 'x' in self._cache[path]['flags'],
2037 'x' in self._cache[path]['flags'],
2036 self._cache[path]['copied'])
2038 self._cache[path]['copied'])
2037 else:
2039 else:
2038 # Returning None, but including the path in `files`, is
2040 # Returning None, but including the path in `files`, is
2039 # necessary for memctx to register a deletion.
2041 # necessary for memctx to register a deletion.
2040 return None
2042 return None
2041 return memctx(self._repo, parents, text, files, getfile, date=date,
2043 return memctx(self._repo, parents, text, files, getfile, date=date,
2042 extra=extra, user=user, branch=branch, editor=editor)
2044 extra=extra, user=user, branch=branch, editor=editor)
2043
2045
2044 def isdirty(self, path):
2046 def isdirty(self, path):
2045 return path in self._cache
2047 return path in self._cache
2046
2048
2047 def isempty(self):
2049 def isempty(self):
2048 # We need to discard any keys that are actually clean before the empty
2050 # We need to discard any keys that are actually clean before the empty
2049 # commit check.
2051 # commit check.
2050 self._compact()
2052 self._compact()
2051 return len(self._cache) == 0
2053 return len(self._cache) == 0
2052
2054
2053 def clean(self):
2055 def clean(self):
2054 self._cache = {}
2056 self._cache = {}
2055
2057
2056 def _compact(self):
2058 def _compact(self):
2057 """Removes keys from the cache that are actually clean, by comparing
2059 """Removes keys from the cache that are actually clean, by comparing
2058 them with the underlying context.
2060 them with the underlying context.
2059
2061
2060 This can occur during the merge process, e.g. by passing --tool :local
2062 This can occur during the merge process, e.g. by passing --tool :local
2061 to resolve a conflict.
2063 to resolve a conflict.
2062 """
2064 """
2063 keys = []
2065 keys = []
2064 for path in self._cache.keys():
2066 for path in self._cache.keys():
2065 cache = self._cache[path]
2067 cache = self._cache[path]
2066 try:
2068 try:
2067 underlying = self._wrappedctx[path]
2069 underlying = self._wrappedctx[path]
2068 if (underlying.data() == cache['data'] and
2070 if (underlying.data() == cache['data'] and
2069 underlying.flags() == cache['flags']):
2071 underlying.flags() == cache['flags']):
2070 keys.append(path)
2072 keys.append(path)
2071 except error.ManifestLookupError:
2073 except error.ManifestLookupError:
2072 # Path not in the underlying manifest (created).
2074 # Path not in the underlying manifest (created).
2073 continue
2075 continue
2074
2076
2075 for path in keys:
2077 for path in keys:
2076 del self._cache[path]
2078 del self._cache[path]
2077 return keys
2079 return keys
2078
2080
2079 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2081 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2080 self._cache[path] = {
2082 self._cache[path] = {
2081 'exists': exists,
2083 'exists': exists,
2082 'data': data,
2084 'data': data,
2083 'date': date,
2085 'date': date,
2084 'flags': flags,
2086 'flags': flags,
2085 'copied': None,
2087 'copied': None,
2086 }
2088 }
2087
2089
2088 def filectx(self, path, filelog=None):
2090 def filectx(self, path, filelog=None):
2089 return overlayworkingfilectx(self._repo, path, parent=self,
2091 return overlayworkingfilectx(self._repo, path, parent=self,
2090 filelog=filelog)
2092 filelog=filelog)
2091
2093
2092 class overlayworkingfilectx(committablefilectx):
2094 class overlayworkingfilectx(committablefilectx):
2093 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2095 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2094 cache, which can be flushed through later by calling ``flush()``."""
2096 cache, which can be flushed through later by calling ``flush()``."""
2095
2097
2096 def __init__(self, repo, path, filelog=None, parent=None):
2098 def __init__(self, repo, path, filelog=None, parent=None):
2097 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2099 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2098 parent)
2100 parent)
2099 self._repo = repo
2101 self._repo = repo
2100 self._parent = parent
2102 self._parent = parent
2101 self._path = path
2103 self._path = path
2102
2104
2103 def cmp(self, fctx):
2105 def cmp(self, fctx):
2104 return self.data() != fctx.data()
2106 return self.data() != fctx.data()
2105
2107
2106 def changectx(self):
2108 def changectx(self):
2107 return self._parent
2109 return self._parent
2108
2110
2109 def data(self):
2111 def data(self):
2110 return self._parent.data(self._path)
2112 return self._parent.data(self._path)
2111
2113
2112 def date(self):
2114 def date(self):
2113 return self._parent.filedate(self._path)
2115 return self._parent.filedate(self._path)
2114
2116
2115 def exists(self):
2117 def exists(self):
2116 return self.lexists()
2118 return self.lexists()
2117
2119
2118 def lexists(self):
2120 def lexists(self):
2119 return self._parent.exists(self._path)
2121 return self._parent.exists(self._path)
2120
2122
2121 def renamed(self):
2123 def renamed(self):
2122 path = self._parent.copydata(self._path)
2124 path = self._parent.copydata(self._path)
2123 if not path:
2125 if not path:
2124 return None
2126 return None
2125 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2127 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2126
2128
2127 def size(self):
2129 def size(self):
2128 return self._parent.size(self._path)
2130 return self._parent.size(self._path)
2129
2131
2130 def markcopied(self, origin):
2132 def markcopied(self, origin):
2131 self._parent.markcopied(self._path, origin)
2133 self._parent.markcopied(self._path, origin)
2132
2134
2133 def audit(self):
2135 def audit(self):
2134 pass
2136 pass
2135
2137
2136 def flags(self):
2138 def flags(self):
2137 return self._parent.flags(self._path)
2139 return self._parent.flags(self._path)
2138
2140
2139 def setflags(self, islink, isexec):
2141 def setflags(self, islink, isexec):
2140 return self._parent.setflags(self._path, islink, isexec)
2142 return self._parent.setflags(self._path, islink, isexec)
2141
2143
2142 def write(self, data, flags, backgroundclose=False, **kwargs):
2144 def write(self, data, flags, backgroundclose=False, **kwargs):
2143 return self._parent.write(self._path, data, flags, **kwargs)
2145 return self._parent.write(self._path, data, flags, **kwargs)
2144
2146
2145 def remove(self, ignoremissing=False):
2147 def remove(self, ignoremissing=False):
2146 return self._parent.remove(self._path)
2148 return self._parent.remove(self._path)
2147
2149
2148 def clearunknown(self):
2150 def clearunknown(self):
2149 pass
2151 pass
2150
2152
2151 class workingcommitctx(workingctx):
2153 class workingcommitctx(workingctx):
2152 """A workingcommitctx object makes access to data related to
2154 """A workingcommitctx object makes access to data related to
2153 the revision being committed convenient.
2155 the revision being committed convenient.
2154
2156
2155 This hides changes in the working directory, if they aren't
2157 This hides changes in the working directory, if they aren't
2156 committed in this context.
2158 committed in this context.
2157 """
2159 """
2158 def __init__(self, repo, changes,
2160 def __init__(self, repo, changes,
2159 text="", user=None, date=None, extra=None):
2161 text="", user=None, date=None, extra=None):
2160 super(workingctx, self).__init__(repo, text, user, date, extra,
2162 super(workingctx, self).__init__(repo, text, user, date, extra,
2161 changes)
2163 changes)
2162
2164
2163 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2165 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2164 """Return matched files only in ``self._status``
2166 """Return matched files only in ``self._status``
2165
2167
2166 Uncommitted files appear "clean" via this context, even if
2168 Uncommitted files appear "clean" via this context, even if
2167 they aren't actually so in the working directory.
2169 they aren't actually so in the working directory.
2168 """
2170 """
2169 if clean:
2171 if clean:
2170 clean = [f for f in self._manifest if f not in self._changedset]
2172 clean = [f for f in self._manifest if f not in self._changedset]
2171 else:
2173 else:
2172 clean = []
2174 clean = []
2173 return scmutil.status([f for f in self._status.modified if match(f)],
2175 return scmutil.status([f for f in self._status.modified if match(f)],
2174 [f for f in self._status.added if match(f)],
2176 [f for f in self._status.added if match(f)],
2175 [f for f in self._status.removed if match(f)],
2177 [f for f in self._status.removed if match(f)],
2176 [], [], [], clean)
2178 [], [], [], clean)
2177
2179
2178 @propertycache
2180 @propertycache
2179 def _changedset(self):
2181 def _changedset(self):
2180 """Return the set of files changed in this context
2182 """Return the set of files changed in this context
2181 """
2183 """
2182 changed = set(self._status.modified)
2184 changed = set(self._status.modified)
2183 changed.update(self._status.added)
2185 changed.update(self._status.added)
2184 changed.update(self._status.removed)
2186 changed.update(self._status.removed)
2185 return changed
2187 return changed
2186
2188
2187 def makecachingfilectxfn(func):
2189 def makecachingfilectxfn(func):
2188 """Create a filectxfn that caches based on the path.
2190 """Create a filectxfn that caches based on the path.
2189
2191
2190 We can't use util.cachefunc because it uses all arguments as the cache
2192 We can't use util.cachefunc because it uses all arguments as the cache
2191 key and this creates a cycle since the arguments include the repo and
2193 key and this creates a cycle since the arguments include the repo and
2192 memctx.
2194 memctx.
2193 """
2195 """
2194 cache = {}
2196 cache = {}
2195
2197
2196 def getfilectx(repo, memctx, path):
2198 def getfilectx(repo, memctx, path):
2197 if path not in cache:
2199 if path not in cache:
2198 cache[path] = func(repo, memctx, path)
2200 cache[path] = func(repo, memctx, path)
2199 return cache[path]
2201 return cache[path]
2200
2202
2201 return getfilectx
2203 return getfilectx
2202
2204
2203 def memfilefromctx(ctx):
2205 def memfilefromctx(ctx):
2204 """Given a context return a memfilectx for ctx[path]
2206 """Given a context return a memfilectx for ctx[path]
2205
2207
2206 This is a convenience method for building a memctx based on another
2208 This is a convenience method for building a memctx based on another
2207 context.
2209 context.
2208 """
2210 """
2209 def getfilectx(repo, memctx, path):
2211 def getfilectx(repo, memctx, path):
2210 fctx = ctx[path]
2212 fctx = ctx[path]
2211 # this is weird but apparently we only keep track of one parent
2213 # this is weird but apparently we only keep track of one parent
2212 # (why not only store that instead of a tuple?)
2214 # (why not only store that instead of a tuple?)
2213 copied = fctx.renamed()
2215 copied = fctx.renamed()
2214 if copied:
2216 if copied:
2215 copied = copied[0]
2217 copied = copied[0]
2216 return memfilectx(repo, memctx, path, fctx.data(),
2218 return memfilectx(repo, memctx, path, fctx.data(),
2217 islink=fctx.islink(), isexec=fctx.isexec(),
2219 islink=fctx.islink(), isexec=fctx.isexec(),
2218 copied=copied)
2220 copied=copied)
2219
2221
2220 return getfilectx
2222 return getfilectx
2221
2223
2222 def memfilefrompatch(patchstore):
2224 def memfilefrompatch(patchstore):
2223 """Given a patch (e.g. patchstore object) return a memfilectx
2225 """Given a patch (e.g. patchstore object) return a memfilectx
2224
2226
2225 This is a convenience method for building a memctx based on a patchstore.
2227 This is a convenience method for building a memctx based on a patchstore.
2226 """
2228 """
2227 def getfilectx(repo, memctx, path):
2229 def getfilectx(repo, memctx, path):
2228 data, mode, copied = patchstore.getfile(path)
2230 data, mode, copied = patchstore.getfile(path)
2229 if data is None:
2231 if data is None:
2230 return None
2232 return None
2231 islink, isexec = mode
2233 islink, isexec = mode
2232 return memfilectx(repo, memctx, path, data, islink=islink,
2234 return memfilectx(repo, memctx, path, data, islink=islink,
2233 isexec=isexec, copied=copied)
2235 isexec=isexec, copied=copied)
2234
2236
2235 return getfilectx
2237 return getfilectx
2236
2238
2237 class memctx(committablectx):
2239 class memctx(committablectx):
2238 """Use memctx to perform in-memory commits via localrepo.commitctx().
2240 """Use memctx to perform in-memory commits via localrepo.commitctx().
2239
2241
2240 Revision information is supplied at initialization time while
2242 Revision information is supplied at initialization time while
2241 related files data and is made available through a callback
2243 related files data and is made available through a callback
2242 mechanism. 'repo' is the current localrepo, 'parents' is a
2244 mechanism. 'repo' is the current localrepo, 'parents' is a
2243 sequence of two parent revisions identifiers (pass None for every
2245 sequence of two parent revisions identifiers (pass None for every
2244 missing parent), 'text' is the commit message and 'files' lists
2246 missing parent), 'text' is the commit message and 'files' lists
2245 names of files touched by the revision (normalized and relative to
2247 names of files touched by the revision (normalized and relative to
2246 repository root).
2248 repository root).
2247
2249
2248 filectxfn(repo, memctx, path) is a callable receiving the
2250 filectxfn(repo, memctx, path) is a callable receiving the
2249 repository, the current memctx object and the normalized path of
2251 repository, the current memctx object and the normalized path of
2250 requested file, relative to repository root. It is fired by the
2252 requested file, relative to repository root. It is fired by the
2251 commit function for every file in 'files', but calls order is
2253 commit function for every file in 'files', but calls order is
2252 undefined. If the file is available in the revision being
2254 undefined. If the file is available in the revision being
2253 committed (updated or added), filectxfn returns a memfilectx
2255 committed (updated or added), filectxfn returns a memfilectx
2254 object. If the file was removed, filectxfn return None for recent
2256 object. If the file was removed, filectxfn return None for recent
2255 Mercurial. Moved files are represented by marking the source file
2257 Mercurial. Moved files are represented by marking the source file
2256 removed and the new file added with copy information (see
2258 removed and the new file added with copy information (see
2257 memfilectx).
2259 memfilectx).
2258
2260
2259 user receives the committer name and defaults to current
2261 user receives the committer name and defaults to current
2260 repository username, date is the commit date in any format
2262 repository username, date is the commit date in any format
2261 supported by dateutil.parsedate() and defaults to current date, extra
2263 supported by dateutil.parsedate() and defaults to current date, extra
2262 is a dictionary of metadata or is left empty.
2264 is a dictionary of metadata or is left empty.
2263 """
2265 """
2264
2266
2265 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2267 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2266 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2268 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2267 # this field to determine what to do in filectxfn.
2269 # this field to determine what to do in filectxfn.
2268 _returnnoneformissingfiles = True
2270 _returnnoneformissingfiles = True
2269
2271
2270 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2272 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2271 date=None, extra=None, branch=None, editor=False):
2273 date=None, extra=None, branch=None, editor=False):
2272 super(memctx, self).__init__(repo, text, user, date, extra)
2274 super(memctx, self).__init__(repo, text, user, date, extra)
2273 self._rev = None
2275 self._rev = None
2274 self._node = None
2276 self._node = None
2275 parents = [(p or nullid) for p in parents]
2277 parents = [(p or nullid) for p in parents]
2276 p1, p2 = parents
2278 p1, p2 = parents
2277 self._parents = [self._repo[p] for p in (p1, p2)]
2279 self._parents = [self._repo[p] for p in (p1, p2)]
2278 files = sorted(set(files))
2280 files = sorted(set(files))
2279 self._files = files
2281 self._files = files
2280 if branch is not None:
2282 if branch is not None:
2281 self._extra['branch'] = encoding.fromlocal(branch)
2283 self._extra['branch'] = encoding.fromlocal(branch)
2282 self.substate = {}
2284 self.substate = {}
2283
2285
2284 if isinstance(filectxfn, patch.filestore):
2286 if isinstance(filectxfn, patch.filestore):
2285 filectxfn = memfilefrompatch(filectxfn)
2287 filectxfn = memfilefrompatch(filectxfn)
2286 elif not callable(filectxfn):
2288 elif not callable(filectxfn):
2287 # if store is not callable, wrap it in a function
2289 # if store is not callable, wrap it in a function
2288 filectxfn = memfilefromctx(filectxfn)
2290 filectxfn = memfilefromctx(filectxfn)
2289
2291
2290 # memoizing increases performance for e.g. vcs convert scenarios.
2292 # memoizing increases performance for e.g. vcs convert scenarios.
2291 self._filectxfn = makecachingfilectxfn(filectxfn)
2293 self._filectxfn = makecachingfilectxfn(filectxfn)
2292
2294
2293 if editor:
2295 if editor:
2294 self._text = editor(self._repo, self, [])
2296 self._text = editor(self._repo, self, [])
2295 self._repo.savecommitmessage(self._text)
2297 self._repo.savecommitmessage(self._text)
2296
2298
2297 def filectx(self, path, filelog=None):
2299 def filectx(self, path, filelog=None):
2298 """get a file context from the working directory
2300 """get a file context from the working directory
2299
2301
2300 Returns None if file doesn't exist and should be removed."""
2302 Returns None if file doesn't exist and should be removed."""
2301 return self._filectxfn(self._repo, self, path)
2303 return self._filectxfn(self._repo, self, path)
2302
2304
2303 def commit(self):
2305 def commit(self):
2304 """commit context to the repo"""
2306 """commit context to the repo"""
2305 return self._repo.commitctx(self)
2307 return self._repo.commitctx(self)
2306
2308
2307 @propertycache
2309 @propertycache
2308 def _manifest(self):
2310 def _manifest(self):
2309 """generate a manifest based on the return values of filectxfn"""
2311 """generate a manifest based on the return values of filectxfn"""
2310
2312
2311 # keep this simple for now; just worry about p1
2313 # keep this simple for now; just worry about p1
2312 pctx = self._parents[0]
2314 pctx = self._parents[0]
2313 man = pctx.manifest().copy()
2315 man = pctx.manifest().copy()
2314
2316
2315 for f in self._status.modified:
2317 for f in self._status.modified:
2316 p1node = nullid
2318 p1node = nullid
2317 p2node = nullid
2319 p2node = nullid
2318 p = pctx[f].parents() # if file isn't in pctx, check p2?
2320 p = pctx[f].parents() # if file isn't in pctx, check p2?
2319 if len(p) > 0:
2321 if len(p) > 0:
2320 p1node = p[0].filenode()
2322 p1node = p[0].filenode()
2321 if len(p) > 1:
2323 if len(p) > 1:
2322 p2node = p[1].filenode()
2324 p2node = p[1].filenode()
2323 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2325 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2324
2326
2325 for f in self._status.added:
2327 for f in self._status.added:
2326 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2328 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2327
2329
2328 for f in self._status.removed:
2330 for f in self._status.removed:
2329 if f in man:
2331 if f in man:
2330 del man[f]
2332 del man[f]
2331
2333
2332 return man
2334 return man
2333
2335
2334 @propertycache
2336 @propertycache
2335 def _status(self):
2337 def _status(self):
2336 """Calculate exact status from ``files`` specified at construction
2338 """Calculate exact status from ``files`` specified at construction
2337 """
2339 """
2338 man1 = self.p1().manifest()
2340 man1 = self.p1().manifest()
2339 p2 = self._parents[1]
2341 p2 = self._parents[1]
2340 # "1 < len(self._parents)" can't be used for checking
2342 # "1 < len(self._parents)" can't be used for checking
2341 # existence of the 2nd parent, because "memctx._parents" is
2343 # existence of the 2nd parent, because "memctx._parents" is
2342 # explicitly initialized by the list, of which length is 2.
2344 # explicitly initialized by the list, of which length is 2.
2343 if p2.node() != nullid:
2345 if p2.node() != nullid:
2344 man2 = p2.manifest()
2346 man2 = p2.manifest()
2345 managing = lambda f: f in man1 or f in man2
2347 managing = lambda f: f in man1 or f in man2
2346 else:
2348 else:
2347 managing = lambda f: f in man1
2349 managing = lambda f: f in man1
2348
2350
2349 modified, added, removed = [], [], []
2351 modified, added, removed = [], [], []
2350 for f in self._files:
2352 for f in self._files:
2351 if not managing(f):
2353 if not managing(f):
2352 added.append(f)
2354 added.append(f)
2353 elif self[f]:
2355 elif self[f]:
2354 modified.append(f)
2356 modified.append(f)
2355 else:
2357 else:
2356 removed.append(f)
2358 removed.append(f)
2357
2359
2358 return scmutil.status(modified, added, removed, [], [], [], [])
2360 return scmutil.status(modified, added, removed, [], [], [], [])
2359
2361
2360 class memfilectx(committablefilectx):
2362 class memfilectx(committablefilectx):
2361 """memfilectx represents an in-memory file to commit.
2363 """memfilectx represents an in-memory file to commit.
2362
2364
2363 See memctx and committablefilectx for more details.
2365 See memctx and committablefilectx for more details.
2364 """
2366 """
2365 def __init__(self, repo, changectx, path, data, islink=False,
2367 def __init__(self, repo, changectx, path, data, islink=False,
2366 isexec=False, copied=None):
2368 isexec=False, copied=None):
2367 """
2369 """
2368 path is the normalized file path relative to repository root.
2370 path is the normalized file path relative to repository root.
2369 data is the file content as a string.
2371 data is the file content as a string.
2370 islink is True if the file is a symbolic link.
2372 islink is True if the file is a symbolic link.
2371 isexec is True if the file is executable.
2373 isexec is True if the file is executable.
2372 copied is the source file path if current file was copied in the
2374 copied is the source file path if current file was copied in the
2373 revision being committed, or None."""
2375 revision being committed, or None."""
2374 super(memfilectx, self).__init__(repo, path, None, changectx)
2376 super(memfilectx, self).__init__(repo, path, None, changectx)
2375 self._data = data
2377 self._data = data
2376 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2378 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2377 self._copied = None
2379 self._copied = None
2378 if copied:
2380 if copied:
2379 self._copied = (copied, nullid)
2381 self._copied = (copied, nullid)
2380
2382
2381 def data(self):
2383 def data(self):
2382 return self._data
2384 return self._data
2383
2385
2384 def remove(self, ignoremissing=False):
2386 def remove(self, ignoremissing=False):
2385 """wraps unlink for a repo's working directory"""
2387 """wraps unlink for a repo's working directory"""
2386 # need to figure out what to do here
2388 # need to figure out what to do here
2387 del self._changectx[self._path]
2389 del self._changectx[self._path]
2388
2390
2389 def write(self, data, flags, **kwargs):
2391 def write(self, data, flags, **kwargs):
2390 """wraps repo.wwrite"""
2392 """wraps repo.wwrite"""
2391 self._data = data
2393 self._data = data
2392
2394
2393 class overlayfilectx(committablefilectx):
2395 class overlayfilectx(committablefilectx):
2394 """Like memfilectx but take an original filectx and optional parameters to
2396 """Like memfilectx but take an original filectx and optional parameters to
2395 override parts of it. This is useful when fctx.data() is expensive (i.e.
2397 override parts of it. This is useful when fctx.data() is expensive (i.e.
2396 flag processor is expensive) and raw data, flags, and filenode could be
2398 flag processor is expensive) and raw data, flags, and filenode could be
2397 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2399 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2398 """
2400 """
2399
2401
2400 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2402 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2401 copied=None, ctx=None):
2403 copied=None, ctx=None):
2402 """originalfctx: filecontext to duplicate
2404 """originalfctx: filecontext to duplicate
2403
2405
2404 datafunc: None or a function to override data (file content). It is a
2406 datafunc: None or a function to override data (file content). It is a
2405 function to be lazy. path, flags, copied, ctx: None or overridden value
2407 function to be lazy. path, flags, copied, ctx: None or overridden value
2406
2408
2407 copied could be (path, rev), or False. copied could also be just path,
2409 copied could be (path, rev), or False. copied could also be just path,
2408 and will be converted to (path, nullid). This simplifies some callers.
2410 and will be converted to (path, nullid). This simplifies some callers.
2409 """
2411 """
2410
2412
2411 if path is None:
2413 if path is None:
2412 path = originalfctx.path()
2414 path = originalfctx.path()
2413 if ctx is None:
2415 if ctx is None:
2414 ctx = originalfctx.changectx()
2416 ctx = originalfctx.changectx()
2415 ctxmatch = lambda: True
2417 ctxmatch = lambda: True
2416 else:
2418 else:
2417 ctxmatch = lambda: ctx == originalfctx.changectx()
2419 ctxmatch = lambda: ctx == originalfctx.changectx()
2418
2420
2419 repo = originalfctx.repo()
2421 repo = originalfctx.repo()
2420 flog = originalfctx.filelog()
2422 flog = originalfctx.filelog()
2421 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2423 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2422
2424
2423 if copied is None:
2425 if copied is None:
2424 copied = originalfctx.renamed()
2426 copied = originalfctx.renamed()
2425 copiedmatch = lambda: True
2427 copiedmatch = lambda: True
2426 else:
2428 else:
2427 if copied and not isinstance(copied, tuple):
2429 if copied and not isinstance(copied, tuple):
2428 # repo._filecommit will recalculate copyrev so nullid is okay
2430 # repo._filecommit will recalculate copyrev so nullid is okay
2429 copied = (copied, nullid)
2431 copied = (copied, nullid)
2430 copiedmatch = lambda: copied == originalfctx.renamed()
2432 copiedmatch = lambda: copied == originalfctx.renamed()
2431
2433
2432 # When data, copied (could affect data), ctx (could affect filelog
2434 # When data, copied (could affect data), ctx (could affect filelog
2433 # parents) are not overridden, rawdata, rawflags, and filenode may be
2435 # parents) are not overridden, rawdata, rawflags, and filenode may be
2434 # reused (repo._filecommit should double check filelog parents).
2436 # reused (repo._filecommit should double check filelog parents).
2435 #
2437 #
2436 # path, flags are not hashed in filelog (but in manifestlog) so they do
2438 # path, flags are not hashed in filelog (but in manifestlog) so they do
2437 # not affect reusable here.
2439 # not affect reusable here.
2438 #
2440 #
2439 # If ctx or copied is overridden to a same value with originalfctx,
2441 # If ctx or copied is overridden to a same value with originalfctx,
2440 # still consider it's reusable. originalfctx.renamed() may be a bit
2442 # still consider it's reusable. originalfctx.renamed() may be a bit
2441 # expensive so it's not called unless necessary. Assuming datafunc is
2443 # expensive so it's not called unless necessary. Assuming datafunc is
2442 # always expensive, do not call it for this "reusable" test.
2444 # always expensive, do not call it for this "reusable" test.
2443 reusable = datafunc is None and ctxmatch() and copiedmatch()
2445 reusable = datafunc is None and ctxmatch() and copiedmatch()
2444
2446
2445 if datafunc is None:
2447 if datafunc is None:
2446 datafunc = originalfctx.data
2448 datafunc = originalfctx.data
2447 if flags is None:
2449 if flags is None:
2448 flags = originalfctx.flags()
2450 flags = originalfctx.flags()
2449
2451
2450 self._datafunc = datafunc
2452 self._datafunc = datafunc
2451 self._flags = flags
2453 self._flags = flags
2452 self._copied = copied
2454 self._copied = copied
2453
2455
2454 if reusable:
2456 if reusable:
2455 # copy extra fields from originalfctx
2457 # copy extra fields from originalfctx
2456 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2458 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2457 for attr_ in attrs:
2459 for attr_ in attrs:
2458 if util.safehasattr(originalfctx, attr_):
2460 if util.safehasattr(originalfctx, attr_):
2459 setattr(self, attr_, getattr(originalfctx, attr_))
2461 setattr(self, attr_, getattr(originalfctx, attr_))
2460
2462
2461 def data(self):
2463 def data(self):
2462 return self._datafunc()
2464 return self._datafunc()
2463
2465
2464 class metadataonlyctx(committablectx):
2466 class metadataonlyctx(committablectx):
2465 """Like memctx but it's reusing the manifest of different commit.
2467 """Like memctx but it's reusing the manifest of different commit.
2466 Intended to be used by lightweight operations that are creating
2468 Intended to be used by lightweight operations that are creating
2467 metadata-only changes.
2469 metadata-only changes.
2468
2470
2469 Revision information is supplied at initialization time. 'repo' is the
2471 Revision information is supplied at initialization time. 'repo' is the
2470 current localrepo, 'ctx' is original revision which manifest we're reuisng
2472 current localrepo, 'ctx' is original revision which manifest we're reuisng
2471 'parents' is a sequence of two parent revisions identifiers (pass None for
2473 'parents' is a sequence of two parent revisions identifiers (pass None for
2472 every missing parent), 'text' is the commit.
2474 every missing parent), 'text' is the commit.
2473
2475
2474 user receives the committer name and defaults to current repository
2476 user receives the committer name and defaults to current repository
2475 username, date is the commit date in any format supported by
2477 username, date is the commit date in any format supported by
2476 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2478 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2477 metadata or is left empty.
2479 metadata or is left empty.
2478 """
2480 """
2479 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2481 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2480 date=None, extra=None, editor=False):
2482 date=None, extra=None, editor=False):
2481 if text is None:
2483 if text is None:
2482 text = originalctx.description()
2484 text = originalctx.description()
2483 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2485 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2484 self._rev = None
2486 self._rev = None
2485 self._node = None
2487 self._node = None
2486 self._originalctx = originalctx
2488 self._originalctx = originalctx
2487 self._manifestnode = originalctx.manifestnode()
2489 self._manifestnode = originalctx.manifestnode()
2488 if parents is None:
2490 if parents is None:
2489 parents = originalctx.parents()
2491 parents = originalctx.parents()
2490 else:
2492 else:
2491 parents = [repo[p] for p in parents if p is not None]
2493 parents = [repo[p] for p in parents if p is not None]
2492 parents = parents[:]
2494 parents = parents[:]
2493 while len(parents) < 2:
2495 while len(parents) < 2:
2494 parents.append(repo[nullid])
2496 parents.append(repo[nullid])
2495 p1, p2 = self._parents = parents
2497 p1, p2 = self._parents = parents
2496
2498
2497 # sanity check to ensure that the reused manifest parents are
2499 # sanity check to ensure that the reused manifest parents are
2498 # manifests of our commit parents
2500 # manifests of our commit parents
2499 mp1, mp2 = self.manifestctx().parents
2501 mp1, mp2 = self.manifestctx().parents
2500 if p1 != nullid and p1.manifestnode() != mp1:
2502 if p1 != nullid and p1.manifestnode() != mp1:
2501 raise RuntimeError('can\'t reuse the manifest: '
2503 raise RuntimeError('can\'t reuse the manifest: '
2502 'its p1 doesn\'t match the new ctx p1')
2504 'its p1 doesn\'t match the new ctx p1')
2503 if p2 != nullid and p2.manifestnode() != mp2:
2505 if p2 != nullid and p2.manifestnode() != mp2:
2504 raise RuntimeError('can\'t reuse the manifest: '
2506 raise RuntimeError('can\'t reuse the manifest: '
2505 'its p2 doesn\'t match the new ctx p2')
2507 'its p2 doesn\'t match the new ctx p2')
2506
2508
2507 self._files = originalctx.files()
2509 self._files = originalctx.files()
2508 self.substate = {}
2510 self.substate = {}
2509
2511
2510 if editor:
2512 if editor:
2511 self._text = editor(self._repo, self, [])
2513 self._text = editor(self._repo, self, [])
2512 self._repo.savecommitmessage(self._text)
2514 self._repo.savecommitmessage(self._text)
2513
2515
2514 def manifestnode(self):
2516 def manifestnode(self):
2515 return self._manifestnode
2517 return self._manifestnode
2516
2518
2517 @property
2519 @property
2518 def _manifestctx(self):
2520 def _manifestctx(self):
2519 return self._repo.manifestlog[self._manifestnode]
2521 return self._repo.manifestlog[self._manifestnode]
2520
2522
2521 def filectx(self, path, filelog=None):
2523 def filectx(self, path, filelog=None):
2522 return self._originalctx.filectx(path, filelog=filelog)
2524 return self._originalctx.filectx(path, filelog=filelog)
2523
2525
2524 def commit(self):
2526 def commit(self):
2525 """commit context to the repo"""
2527 """commit context to the repo"""
2526 return self._repo.commitctx(self)
2528 return self._repo.commitctx(self)
2527
2529
2528 @property
2530 @property
2529 def _manifest(self):
2531 def _manifest(self):
2530 return self._originalctx.manifest()
2532 return self._originalctx.manifest()
2531
2533
2532 @propertycache
2534 @propertycache
2533 def _status(self):
2535 def _status(self):
2534 """Calculate exact status from ``files`` specified in the ``origctx``
2536 """Calculate exact status from ``files`` specified in the ``origctx``
2535 and parents manifests.
2537 and parents manifests.
2536 """
2538 """
2537 man1 = self.p1().manifest()
2539 man1 = self.p1().manifest()
2538 p2 = self._parents[1]
2540 p2 = self._parents[1]
2539 # "1 < len(self._parents)" can't be used for checking
2541 # "1 < len(self._parents)" can't be used for checking
2540 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2542 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2541 # explicitly initialized by the list, of which length is 2.
2543 # explicitly initialized by the list, of which length is 2.
2542 if p2.node() != nullid:
2544 if p2.node() != nullid:
2543 man2 = p2.manifest()
2545 man2 = p2.manifest()
2544 managing = lambda f: f in man1 or f in man2
2546 managing = lambda f: f in man1 or f in man2
2545 else:
2547 else:
2546 managing = lambda f: f in man1
2548 managing = lambda f: f in man1
2547
2549
2548 modified, added, removed = [], [], []
2550 modified, added, removed = [], [], []
2549 for f in self._files:
2551 for f in self._files:
2550 if not managing(f):
2552 if not managing(f):
2551 added.append(f)
2553 added.append(f)
2552 elif f in self:
2554 elif f in self:
2553 modified.append(f)
2555 modified.append(f)
2554 else:
2556 else:
2555 removed.append(f)
2557 removed.append(f)
2556
2558
2557 return scmutil.status(modified, added, removed, [], [], [], [])
2559 return scmutil.status(modified, added, removed, [], [], [], [])
2558
2560
2559 class arbitraryfilectx(object):
2561 class arbitraryfilectx(object):
2560 """Allows you to use filectx-like functions on a file in an arbitrary
2562 """Allows you to use filectx-like functions on a file in an arbitrary
2561 location on disk, possibly not in the working directory.
2563 location on disk, possibly not in the working directory.
2562 """
2564 """
2563 def __init__(self, path, repo=None):
2565 def __init__(self, path, repo=None):
2564 # Repo is optional because contrib/simplemerge uses this class.
2566 # Repo is optional because contrib/simplemerge uses this class.
2565 self._repo = repo
2567 self._repo = repo
2566 self._path = path
2568 self._path = path
2567
2569
2568 def cmp(self, fctx):
2570 def cmp(self, fctx):
2569 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2571 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2570 # path if either side is a symlink.
2572 # path if either side is a symlink.
2571 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2573 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2572 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2574 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2573 # Add a fast-path for merge if both sides are disk-backed.
2575 # Add a fast-path for merge if both sides are disk-backed.
2574 # Note that filecmp uses the opposite return values (True if same)
2576 # Note that filecmp uses the opposite return values (True if same)
2575 # from our cmp functions (True if different).
2577 # from our cmp functions (True if different).
2576 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2578 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2577 return self.data() != fctx.data()
2579 return self.data() != fctx.data()
2578
2580
2579 def path(self):
2581 def path(self):
2580 return self._path
2582 return self._path
2581
2583
2582 def flags(self):
2584 def flags(self):
2583 return ''
2585 return ''
2584
2586
2585 def data(self):
2587 def data(self):
2586 return util.readfile(self._path)
2588 return util.readfile(self._path)
2587
2589
2588 def decodeddata(self):
2590 def decodeddata(self):
2589 with open(self._path, "rb") as f:
2591 with open(self._path, "rb") as f:
2590 return f.read()
2592 return f.read()
2591
2593
2592 def remove(self):
2594 def remove(self):
2593 util.unlink(self._path)
2595 util.unlink(self._path)
2594
2596
2595 def write(self, data, flags, **kwargs):
2597 def write(self, data, flags, **kwargs):
2596 assert not flags
2598 assert not flags
2597 with open(self._path, "w") as f:
2599 with open(self._path, "w") as f:
2598 f.write(data)
2600 f.write(data)
@@ -1,2381 +1,2380
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from .thirdparty.zope import (
24 from .thirdparty.zope import (
25 interface as zi,
25 interface as zi,
26 )
26 )
27 from . import (
27 from . import (
28 bookmarks,
28 bookmarks,
29 branchmap,
29 branchmap,
30 bundle2,
30 bundle2,
31 changegroup,
31 changegroup,
32 changelog,
32 changelog,
33 color,
33 color,
34 context,
34 context,
35 dirstate,
35 dirstate,
36 dirstateguard,
36 dirstateguard,
37 discovery,
37 discovery,
38 encoding,
38 encoding,
39 error,
39 error,
40 exchange,
40 exchange,
41 extensions,
41 extensions,
42 filelog,
42 filelog,
43 hook,
43 hook,
44 lock as lockmod,
44 lock as lockmod,
45 manifest,
45 manifest,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 @zi.implementer(repository.ipeercommandexecutor)
156 @zi.implementer(repository.ipeercommandexecutor)
157 class localcommandexecutor(object):
157 class localcommandexecutor(object):
158 def __init__(self, peer):
158 def __init__(self, peer):
159 self._peer = peer
159 self._peer = peer
160 self._sent = False
160 self._sent = False
161 self._closed = False
161 self._closed = False
162
162
163 def __enter__(self):
163 def __enter__(self):
164 return self
164 return self
165
165
166 def __exit__(self, exctype, excvalue, exctb):
166 def __exit__(self, exctype, excvalue, exctb):
167 self.close()
167 self.close()
168
168
169 def callcommand(self, command, args):
169 def callcommand(self, command, args):
170 if self._sent:
170 if self._sent:
171 raise error.ProgrammingError('callcommand() cannot be used after '
171 raise error.ProgrammingError('callcommand() cannot be used after '
172 'sendcommands()')
172 'sendcommands()')
173
173
174 if self._closed:
174 if self._closed:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'close()')
176 'close()')
177
177
178 # We don't need to support anything fancy. Just call the named
178 # We don't need to support anything fancy. Just call the named
179 # method on the peer and return a resolved future.
179 # method on the peer and return a resolved future.
180 fn = getattr(self._peer, pycompat.sysstr(command))
180 fn = getattr(self._peer, pycompat.sysstr(command))
181
181
182 f = pycompat.futures.Future()
182 f = pycompat.futures.Future()
183
183
184 try:
184 try:
185 result = fn(**pycompat.strkwargs(args))
185 result = fn(**pycompat.strkwargs(args))
186 except Exception:
186 except Exception:
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
188 else:
188 else:
189 f.set_result(result)
189 f.set_result(result)
190
190
191 return f
191 return f
192
192
193 def sendcommands(self):
193 def sendcommands(self):
194 self._sent = True
194 self._sent = True
195
195
196 def close(self):
196 def close(self):
197 self._closed = True
197 self._closed = True
198
198
199 @zi.implementer(repository.ipeercommands)
199 @zi.implementer(repository.ipeercommands)
200 class localpeer(repository.peer):
200 class localpeer(repository.peer):
201 '''peer for a local repo; reflects only the most recent API'''
201 '''peer for a local repo; reflects only the most recent API'''
202
202
203 def __init__(self, repo, caps=None):
203 def __init__(self, repo, caps=None):
204 super(localpeer, self).__init__()
204 super(localpeer, self).__init__()
205
205
206 if caps is None:
206 if caps is None:
207 caps = moderncaps.copy()
207 caps = moderncaps.copy()
208 self._repo = repo.filtered('served')
208 self._repo = repo.filtered('served')
209 self.ui = repo.ui
209 self.ui = repo.ui
210 self._caps = repo._restrictcapabilities(caps)
210 self._caps = repo._restrictcapabilities(caps)
211
211
212 # Begin of _basepeer interface.
212 # Begin of _basepeer interface.
213
213
214 def url(self):
214 def url(self):
215 return self._repo.url()
215 return self._repo.url()
216
216
217 def local(self):
217 def local(self):
218 return self._repo
218 return self._repo
219
219
220 def peer(self):
220 def peer(self):
221 return self
221 return self
222
222
223 def canpush(self):
223 def canpush(self):
224 return True
224 return True
225
225
226 def close(self):
226 def close(self):
227 self._repo.close()
227 self._repo.close()
228
228
229 # End of _basepeer interface.
229 # End of _basepeer interface.
230
230
231 # Begin of _basewirecommands interface.
231 # Begin of _basewirecommands interface.
232
232
233 def branchmap(self):
233 def branchmap(self):
234 return self._repo.branchmap()
234 return self._repo.branchmap()
235
235
236 def capabilities(self):
236 def capabilities(self):
237 return self._caps
237 return self._caps
238
238
239 def clonebundles(self):
239 def clonebundles(self):
240 return self._repo.tryread('clonebundles.manifest')
240 return self._repo.tryread('clonebundles.manifest')
241
241
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
243 """Used to test argument passing over the wire"""
243 """Used to test argument passing over the wire"""
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
245 pycompat.bytestr(four),
245 pycompat.bytestr(four),
246 pycompat.bytestr(five))
246 pycompat.bytestr(five))
247
247
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
249 **kwargs):
249 **kwargs):
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
251 common=common, bundlecaps=bundlecaps,
251 common=common, bundlecaps=bundlecaps,
252 **kwargs)[1]
252 **kwargs)[1]
253 cb = util.chunkbuffer(chunks)
253 cb = util.chunkbuffer(chunks)
254
254
255 if exchange.bundle2requested(bundlecaps):
255 if exchange.bundle2requested(bundlecaps):
256 # When requesting a bundle2, getbundle returns a stream to make the
256 # When requesting a bundle2, getbundle returns a stream to make the
257 # wire level function happier. We need to build a proper object
257 # wire level function happier. We need to build a proper object
258 # from it in local peer.
258 # from it in local peer.
259 return bundle2.getunbundler(self.ui, cb)
259 return bundle2.getunbundler(self.ui, cb)
260 else:
260 else:
261 return changegroup.getunbundler('01', cb, None)
261 return changegroup.getunbundler('01', cb, None)
262
262
263 def heads(self):
263 def heads(self):
264 return self._repo.heads()
264 return self._repo.heads()
265
265
266 def known(self, nodes):
266 def known(self, nodes):
267 return self._repo.known(nodes)
267 return self._repo.known(nodes)
268
268
269 def listkeys(self, namespace):
269 def listkeys(self, namespace):
270 return self._repo.listkeys(namespace)
270 return self._repo.listkeys(namespace)
271
271
272 def lookup(self, key):
272 def lookup(self, key):
273 return self._repo.lookup(key)
273 return self._repo.lookup(key)
274
274
275 def pushkey(self, namespace, key, old, new):
275 def pushkey(self, namespace, key, old, new):
276 return self._repo.pushkey(namespace, key, old, new)
276 return self._repo.pushkey(namespace, key, old, new)
277
277
278 def stream_out(self):
278 def stream_out(self):
279 raise error.Abort(_('cannot perform stream clone against local '
279 raise error.Abort(_('cannot perform stream clone against local '
280 'peer'))
280 'peer'))
281
281
282 def unbundle(self, bundle, heads, url):
282 def unbundle(self, bundle, heads, url):
283 """apply a bundle on a repo
283 """apply a bundle on a repo
284
284
285 This function handles the repo locking itself."""
285 This function handles the repo locking itself."""
286 try:
286 try:
287 try:
287 try:
288 bundle = exchange.readbundle(self.ui, bundle, None)
288 bundle = exchange.readbundle(self.ui, bundle, None)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
290 if util.safehasattr(ret, 'getchunks'):
290 if util.safehasattr(ret, 'getchunks'):
291 # This is a bundle20 object, turn it into an unbundler.
291 # This is a bundle20 object, turn it into an unbundler.
292 # This little dance should be dropped eventually when the
292 # This little dance should be dropped eventually when the
293 # API is finally improved.
293 # API is finally improved.
294 stream = util.chunkbuffer(ret.getchunks())
294 stream = util.chunkbuffer(ret.getchunks())
295 ret = bundle2.getunbundler(self.ui, stream)
295 ret = bundle2.getunbundler(self.ui, stream)
296 return ret
296 return ret
297 except Exception as exc:
297 except Exception as exc:
298 # If the exception contains output salvaged from a bundle2
298 # If the exception contains output salvaged from a bundle2
299 # reply, we need to make sure it is printed before continuing
299 # reply, we need to make sure it is printed before continuing
300 # to fail. So we build a bundle2 with such output and consume
300 # to fail. So we build a bundle2 with such output and consume
301 # it directly.
301 # it directly.
302 #
302 #
303 # This is not very elegant but allows a "simple" solution for
303 # This is not very elegant but allows a "simple" solution for
304 # issue4594
304 # issue4594
305 output = getattr(exc, '_bundle2salvagedoutput', ())
305 output = getattr(exc, '_bundle2salvagedoutput', ())
306 if output:
306 if output:
307 bundler = bundle2.bundle20(self._repo.ui)
307 bundler = bundle2.bundle20(self._repo.ui)
308 for out in output:
308 for out in output:
309 bundler.addpart(out)
309 bundler.addpart(out)
310 stream = util.chunkbuffer(bundler.getchunks())
310 stream = util.chunkbuffer(bundler.getchunks())
311 b = bundle2.getunbundler(self.ui, stream)
311 b = bundle2.getunbundler(self.ui, stream)
312 bundle2.processbundle(self._repo, b)
312 bundle2.processbundle(self._repo, b)
313 raise
313 raise
314 except error.PushRaced as exc:
314 except error.PushRaced as exc:
315 raise error.ResponseError(_('push failed:'),
315 raise error.ResponseError(_('push failed:'),
316 stringutil.forcebytestr(exc))
316 stringutil.forcebytestr(exc))
317
317
318 # End of _basewirecommands interface.
318 # End of _basewirecommands interface.
319
319
320 # Begin of peer interface.
320 # Begin of peer interface.
321
321
322 def commandexecutor(self):
322 def commandexecutor(self):
323 return localcommandexecutor(self)
323 return localcommandexecutor(self)
324
324
325 # End of peer interface.
325 # End of peer interface.
326
326
327 @zi.implementer(repository.ipeerlegacycommands)
327 @zi.implementer(repository.ipeerlegacycommands)
328 class locallegacypeer(localpeer):
328 class locallegacypeer(localpeer):
329 '''peer extension which implements legacy methods too; used for tests with
329 '''peer extension which implements legacy methods too; used for tests with
330 restricted capabilities'''
330 restricted capabilities'''
331
331
332 def __init__(self, repo):
332 def __init__(self, repo):
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
334
334
335 # Begin of baselegacywirecommands interface.
335 # Begin of baselegacywirecommands interface.
336
336
337 def between(self, pairs):
337 def between(self, pairs):
338 return self._repo.between(pairs)
338 return self._repo.between(pairs)
339
339
340 def branches(self, nodes):
340 def branches(self, nodes):
341 return self._repo.branches(nodes)
341 return self._repo.branches(nodes)
342
342
343 def changegroup(self, nodes, source):
343 def changegroup(self, nodes, source):
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
345 missingheads=self._repo.heads())
345 missingheads=self._repo.heads())
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
347
347
348 def changegroupsubset(self, bases, heads, source):
348 def changegroupsubset(self, bases, heads, source):
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
350 missingheads=heads)
350 missingheads=heads)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
352
352
353 # End of baselegacywirecommands interface.
353 # End of baselegacywirecommands interface.
354
354
355 # Increment the sub-version when the revlog v2 format changes to lock out old
355 # Increment the sub-version when the revlog v2 format changes to lock out old
356 # clients.
356 # clients.
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
358
358
359 # Functions receiving (ui, features) that extensions can register to impact
359 # Functions receiving (ui, features) that extensions can register to impact
360 # the ability to load repositories with custom requirements. Only
360 # the ability to load repositories with custom requirements. Only
361 # functions defined in loaded extensions are called.
361 # functions defined in loaded extensions are called.
362 #
362 #
363 # The function receives a set of requirement strings that the repository
363 # The function receives a set of requirement strings that the repository
364 # is capable of opening. Functions will typically add elements to the
364 # is capable of opening. Functions will typically add elements to the
365 # set to reflect that the extension knows how to handle that requirements.
365 # set to reflect that the extension knows how to handle that requirements.
366 featuresetupfuncs = set()
366 featuresetupfuncs = set()
367
367
368 @zi.implementer(repository.completelocalrepository)
368 @zi.implementer(repository.completelocalrepository)
369 class localrepository(object):
369 class localrepository(object):
370
370
371 # obsolete experimental requirements:
371 # obsolete experimental requirements:
372 # - manifestv2: An experimental new manifest format that allowed
372 # - manifestv2: An experimental new manifest format that allowed
373 # for stem compression of long paths. Experiment ended up not
373 # for stem compression of long paths. Experiment ended up not
374 # being successful (repository sizes went up due to worse delta
374 # being successful (repository sizes went up due to worse delta
375 # chains), and the code was deleted in 4.6.
375 # chains), and the code was deleted in 4.6.
376 supportedformats = {
376 supportedformats = {
377 'revlogv1',
377 'revlogv1',
378 'generaldelta',
378 'generaldelta',
379 'treemanifest',
379 'treemanifest',
380 REVLOGV2_REQUIREMENT,
380 REVLOGV2_REQUIREMENT,
381 }
381 }
382 _basesupported = supportedformats | {
382 _basesupported = supportedformats | {
383 'store',
383 'store',
384 'fncache',
384 'fncache',
385 'shared',
385 'shared',
386 'relshared',
386 'relshared',
387 'dotencode',
387 'dotencode',
388 'exp-sparse',
388 'exp-sparse',
389 }
389 }
390 openerreqs = {
390 openerreqs = {
391 'revlogv1',
391 'revlogv1',
392 'generaldelta',
392 'generaldelta',
393 'treemanifest',
393 'treemanifest',
394 }
394 }
395
395
396 # list of prefix for file which can be written without 'wlock'
396 # list of prefix for file which can be written without 'wlock'
397 # Extensions should extend this list when needed
397 # Extensions should extend this list when needed
398 _wlockfreeprefix = {
398 _wlockfreeprefix = {
399 # We migh consider requiring 'wlock' for the next
399 # We migh consider requiring 'wlock' for the next
400 # two, but pretty much all the existing code assume
400 # two, but pretty much all the existing code assume
401 # wlock is not needed so we keep them excluded for
401 # wlock is not needed so we keep them excluded for
402 # now.
402 # now.
403 'hgrc',
403 'hgrc',
404 'requires',
404 'requires',
405 # XXX cache is a complicatged business someone
405 # XXX cache is a complicatged business someone
406 # should investigate this in depth at some point
406 # should investigate this in depth at some point
407 'cache/',
407 'cache/',
408 # XXX shouldn't be dirstate covered by the wlock?
408 # XXX shouldn't be dirstate covered by the wlock?
409 'dirstate',
409 'dirstate',
410 # XXX bisect was still a bit too messy at the time
410 # XXX bisect was still a bit too messy at the time
411 # this changeset was introduced. Someone should fix
411 # this changeset was introduced. Someone should fix
412 # the remainig bit and drop this line
412 # the remainig bit and drop this line
413 'bisect.state',
413 'bisect.state',
414 }
414 }
415
415
416 def __init__(self, baseui, path, create=False, intents=None):
416 def __init__(self, baseui, path, create=False, intents=None):
417 self.requirements = set()
417 self.requirements = set()
418 self.filtername = None
418 self.filtername = None
419 # wvfs: rooted at the repository root, used to access the working copy
419 # wvfs: rooted at the repository root, used to access the working copy
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
422 self.vfs = None
422 self.vfs = None
423 # svfs: usually rooted at .hg/store, used to access repository history
423 # svfs: usually rooted at .hg/store, used to access repository history
424 # If this is a shared repository, this vfs may point to another
424 # If this is a shared repository, this vfs may point to another
425 # repository's .hg/store directory.
425 # repository's .hg/store directory.
426 self.svfs = None
426 self.svfs = None
427 self.root = self.wvfs.base
427 self.root = self.wvfs.base
428 self.path = self.wvfs.join(".hg")
428 self.path = self.wvfs.join(".hg")
429 self.origroot = path
429 self.origroot = path
430 # This is only used by context.workingctx.match in order to
430 # This is only used by context.workingctx.match in order to
431 # detect files in subrepos.
431 # detect files in subrepos.
432 self.auditor = pathutil.pathauditor(
432 self.auditor = pathutil.pathauditor(
433 self.root, callback=self._checknested)
433 self.root, callback=self._checknested)
434 # This is only used by context.basectx.match in order to detect
434 # This is only used by context.basectx.match in order to detect
435 # files in subrepos.
435 # files in subrepos.
436 self.nofsauditor = pathutil.pathauditor(
436 self.nofsauditor = pathutil.pathauditor(
437 self.root, callback=self._checknested, realfs=False, cached=True)
437 self.root, callback=self._checknested, realfs=False, cached=True)
438 self.baseui = baseui
438 self.baseui = baseui
439 self.ui = baseui.copy()
439 self.ui = baseui.copy()
440 self.ui.copy = baseui.copy # prevent copying repo configuration
440 self.ui.copy = baseui.copy # prevent copying repo configuration
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
442 if (self.ui.configbool('devel', 'all-warnings') or
442 if (self.ui.configbool('devel', 'all-warnings') or
443 self.ui.configbool('devel', 'check-locks')):
443 self.ui.configbool('devel', 'check-locks')):
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
445 # A list of callback to shape the phase if no data were found.
445 # A list of callback to shape the phase if no data were found.
446 # Callback are in the form: func(repo, roots) --> processed root.
446 # Callback are in the form: func(repo, roots) --> processed root.
447 # This list it to be filled by extension during repo setup
447 # This list it to be filled by extension during repo setup
448 self._phasedefaults = []
448 self._phasedefaults = []
449 try:
449 try:
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
451 self._loadextensions()
451 self._loadextensions()
452 except IOError:
452 except IOError:
453 pass
453 pass
454
454
455 if featuresetupfuncs:
455 if featuresetupfuncs:
456 self.supported = set(self._basesupported) # use private copy
456 self.supported = set(self._basesupported) # use private copy
457 extmods = set(m.__name__ for n, m
457 extmods = set(m.__name__ for n, m
458 in extensions.extensions(self.ui))
458 in extensions.extensions(self.ui))
459 for setupfunc in featuresetupfuncs:
459 for setupfunc in featuresetupfuncs:
460 if setupfunc.__module__ in extmods:
460 if setupfunc.__module__ in extmods:
461 setupfunc(self.ui, self.supported)
461 setupfunc(self.ui, self.supported)
462 else:
462 else:
463 self.supported = self._basesupported
463 self.supported = self._basesupported
464 color.setup(self.ui)
464 color.setup(self.ui)
465
465
466 # Add compression engines.
466 # Add compression engines.
467 for name in util.compengines:
467 for name in util.compengines:
468 engine = util.compengines[name]
468 engine = util.compengines[name]
469 if engine.revlogheader():
469 if engine.revlogheader():
470 self.supported.add('exp-compression-%s' % name)
470 self.supported.add('exp-compression-%s' % name)
471
471
472 if not self.vfs.isdir():
472 if not self.vfs.isdir():
473 if create:
473 if create:
474 self.requirements = newreporequirements(self)
474 self.requirements = newreporequirements(self)
475
475
476 if not self.wvfs.exists():
476 if not self.wvfs.exists():
477 self.wvfs.makedirs()
477 self.wvfs.makedirs()
478 self.vfs.makedir(notindexed=True)
478 self.vfs.makedir(notindexed=True)
479
479
480 if 'store' in self.requirements:
480 if 'store' in self.requirements:
481 self.vfs.mkdir("store")
481 self.vfs.mkdir("store")
482
482
483 # create an invalid changelog
483 # create an invalid changelog
484 self.vfs.append(
484 self.vfs.append(
485 "00changelog.i",
485 "00changelog.i",
486 '\0\0\0\2' # represents revlogv2
486 '\0\0\0\2' # represents revlogv2
487 ' dummy changelog to prevent using the old repo layout'
487 ' dummy changelog to prevent using the old repo layout'
488 )
488 )
489 else:
489 else:
490 raise error.RepoError(_("repository %s not found") % path)
490 raise error.RepoError(_("repository %s not found") % path)
491 elif create:
491 elif create:
492 raise error.RepoError(_("repository %s already exists") % path)
492 raise error.RepoError(_("repository %s already exists") % path)
493 else:
493 else:
494 try:
494 try:
495 self.requirements = scmutil.readrequires(
495 self.requirements = scmutil.readrequires(
496 self.vfs, self.supported)
496 self.vfs, self.supported)
497 except IOError as inst:
497 except IOError as inst:
498 if inst.errno != errno.ENOENT:
498 if inst.errno != errno.ENOENT:
499 raise
499 raise
500
500
501 cachepath = self.vfs.join('cache')
501 cachepath = self.vfs.join('cache')
502 self.sharedpath = self.path
502 self.sharedpath = self.path
503 try:
503 try:
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
505 if 'relshared' in self.requirements:
505 if 'relshared' in self.requirements:
506 sharedpath = self.vfs.join(sharedpath)
506 sharedpath = self.vfs.join(sharedpath)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
508 cachepath = vfs.join('cache')
508 cachepath = vfs.join('cache')
509 s = vfs.base
509 s = vfs.base
510 if not vfs.exists():
510 if not vfs.exists():
511 raise error.RepoError(
511 raise error.RepoError(
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
513 self.sharedpath = s
513 self.sharedpath = s
514 except IOError as inst:
514 except IOError as inst:
515 if inst.errno != errno.ENOENT:
515 if inst.errno != errno.ENOENT:
516 raise
516 raise
517
517
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
519 raise error.RepoError(_('repository is using sparse feature but '
519 raise error.RepoError(_('repository is using sparse feature but '
520 'sparse is not enabled; enable the '
520 'sparse is not enabled; enable the '
521 '"sparse" extensions to access'))
521 '"sparse" extensions to access'))
522
522
523 self.store = store.store(
523 self.store = store.store(
524 self.requirements, self.sharedpath,
524 self.requirements, self.sharedpath,
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
526 self.spath = self.store.path
526 self.spath = self.store.path
527 self.svfs = self.store.vfs
527 self.svfs = self.store.vfs
528 self.sjoin = self.store.join
528 self.sjoin = self.store.join
529 self.vfs.createmode = self.store.createmode
529 self.vfs.createmode = self.store.createmode
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
531 self.cachevfs.createmode = self.store.createmode
531 self.cachevfs.createmode = self.store.createmode
532 if (self.ui.configbool('devel', 'all-warnings') or
532 if (self.ui.configbool('devel', 'all-warnings') or
533 self.ui.configbool('devel', 'check-locks')):
533 self.ui.configbool('devel', 'check-locks')):
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
536 else: # standard vfs
536 else: # standard vfs
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
538 self._applyopenerreqs()
538 self._applyopenerreqs()
539 if create:
539 if create:
540 self._writerequirements()
540 self._writerequirements()
541
541
542 self._dirstatevalidatewarned = False
542 self._dirstatevalidatewarned = False
543
543
544 self._branchcaches = {}
544 self._branchcaches = {}
545 self._revbranchcache = None
545 self._revbranchcache = None
546 self._filterpats = {}
546 self._filterpats = {}
547 self._datafilters = {}
547 self._datafilters = {}
548 self._transref = self._lockref = self._wlockref = None
548 self._transref = self._lockref = self._wlockref = None
549
549
550 # A cache for various files under .hg/ that tracks file changes,
550 # A cache for various files under .hg/ that tracks file changes,
551 # (used by the filecache decorator)
551 # (used by the filecache decorator)
552 #
552 #
553 # Maps a property name to its util.filecacheentry
553 # Maps a property name to its util.filecacheentry
554 self._filecache = {}
554 self._filecache = {}
555
555
556 # hold sets of revision to be filtered
556 # hold sets of revision to be filtered
557 # should be cleared when something might have changed the filter value:
557 # should be cleared when something might have changed the filter value:
558 # - new changesets,
558 # - new changesets,
559 # - phase change,
559 # - phase change,
560 # - new obsolescence marker,
560 # - new obsolescence marker,
561 # - working directory parent change,
561 # - working directory parent change,
562 # - bookmark changes
562 # - bookmark changes
563 self.filteredrevcache = {}
563 self.filteredrevcache = {}
564
564
565 # post-dirstate-status hooks
565 # post-dirstate-status hooks
566 self._postdsstatus = []
566 self._postdsstatus = []
567
567
568 # generic mapping between names and nodes
568 # generic mapping between names and nodes
569 self.names = namespaces.namespaces()
569 self.names = namespaces.namespaces()
570
570
571 # Key to signature value.
571 # Key to signature value.
572 self._sparsesignaturecache = {}
572 self._sparsesignaturecache = {}
573 # Signature to cached matcher instance.
573 # Signature to cached matcher instance.
574 self._sparsematchercache = {}
574 self._sparsematchercache = {}
575
575
576 def _getvfsward(self, origfunc):
576 def _getvfsward(self, origfunc):
577 """build a ward for self.vfs"""
577 """build a ward for self.vfs"""
578 rref = weakref.ref(self)
578 rref = weakref.ref(self)
579 def checkvfs(path, mode=None):
579 def checkvfs(path, mode=None):
580 ret = origfunc(path, mode=mode)
580 ret = origfunc(path, mode=mode)
581 repo = rref()
581 repo = rref()
582 if (repo is None
582 if (repo is None
583 or not util.safehasattr(repo, '_wlockref')
583 or not util.safehasattr(repo, '_wlockref')
584 or not util.safehasattr(repo, '_lockref')):
584 or not util.safehasattr(repo, '_lockref')):
585 return
585 return
586 if mode in (None, 'r', 'rb'):
586 if mode in (None, 'r', 'rb'):
587 return
587 return
588 if path.startswith(repo.path):
588 if path.startswith(repo.path):
589 # truncate name relative to the repository (.hg)
589 # truncate name relative to the repository (.hg)
590 path = path[len(repo.path) + 1:]
590 path = path[len(repo.path) + 1:]
591 if path.startswith('cache/'):
591 if path.startswith('cache/'):
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
594 if path.startswith('journal.'):
594 if path.startswith('journal.'):
595 # journal is covered by 'lock'
595 # journal is covered by 'lock'
596 if repo._currentlock(repo._lockref) is None:
596 if repo._currentlock(repo._lockref) is None:
597 repo.ui.develwarn('write with no lock: "%s"' % path,
597 repo.ui.develwarn('write with no lock: "%s"' % path,
598 stacklevel=2, config='check-locks')
598 stacklevel=2, config='check-locks')
599 elif repo._currentlock(repo._wlockref) is None:
599 elif repo._currentlock(repo._wlockref) is None:
600 # rest of vfs files are covered by 'wlock'
600 # rest of vfs files are covered by 'wlock'
601 #
601 #
602 # exclude special files
602 # exclude special files
603 for prefix in self._wlockfreeprefix:
603 for prefix in self._wlockfreeprefix:
604 if path.startswith(prefix):
604 if path.startswith(prefix):
605 return
605 return
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
607 stacklevel=2, config='check-locks')
607 stacklevel=2, config='check-locks')
608 return ret
608 return ret
609 return checkvfs
609 return checkvfs
610
610
611 def _getsvfsward(self, origfunc):
611 def _getsvfsward(self, origfunc):
612 """build a ward for self.svfs"""
612 """build a ward for self.svfs"""
613 rref = weakref.ref(self)
613 rref = weakref.ref(self)
614 def checksvfs(path, mode=None):
614 def checksvfs(path, mode=None):
615 ret = origfunc(path, mode=mode)
615 ret = origfunc(path, mode=mode)
616 repo = rref()
616 repo = rref()
617 if repo is None or not util.safehasattr(repo, '_lockref'):
617 if repo is None or not util.safehasattr(repo, '_lockref'):
618 return
618 return
619 if mode in (None, 'r', 'rb'):
619 if mode in (None, 'r', 'rb'):
620 return
620 return
621 if path.startswith(repo.sharedpath):
621 if path.startswith(repo.sharedpath):
622 # truncate name relative to the repository (.hg)
622 # truncate name relative to the repository (.hg)
623 path = path[len(repo.sharedpath) + 1:]
623 path = path[len(repo.sharedpath) + 1:]
624 if repo._currentlock(repo._lockref) is None:
624 if repo._currentlock(repo._lockref) is None:
625 repo.ui.develwarn('write with no lock: "%s"' % path,
625 repo.ui.develwarn('write with no lock: "%s"' % path,
626 stacklevel=3)
626 stacklevel=3)
627 return ret
627 return ret
628 return checksvfs
628 return checksvfs
629
629
630 def close(self):
630 def close(self):
631 self._writecaches()
631 self._writecaches()
632
632
633 def _loadextensions(self):
633 def _loadextensions(self):
634 extensions.loadall(self.ui)
634 extensions.loadall(self.ui)
635
635
636 def _writecaches(self):
636 def _writecaches(self):
637 if self._revbranchcache:
637 if self._revbranchcache:
638 self._revbranchcache.write()
638 self._revbranchcache.write()
639
639
640 def _restrictcapabilities(self, caps):
640 def _restrictcapabilities(self, caps):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
642 caps = set(caps)
642 caps = set(caps)
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
644 role='client'))
644 role='client'))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
646 return caps
646 return caps
647
647
648 def _applyopenerreqs(self):
648 def _applyopenerreqs(self):
649 self.svfs.options = dict((r, 1) for r in self.requirements
649 self.svfs.options = dict((r, 1) for r in self.requirements
650 if r in self.openerreqs)
650 if r in self.openerreqs)
651 # experimental config: format.chunkcachesize
651 # experimental config: format.chunkcachesize
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
653 if chunkcachesize is not None:
653 if chunkcachesize is not None:
654 self.svfs.options['chunkcachesize'] = chunkcachesize
654 self.svfs.options['chunkcachesize'] = chunkcachesize
655 # experimental config: format.maxchainlen
655 # experimental config: format.maxchainlen
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
657 if maxchainlen is not None:
657 if maxchainlen is not None:
658 self.svfs.options['maxchainlen'] = maxchainlen
658 self.svfs.options['maxchainlen'] = maxchainlen
659 # experimental config: format.manifestcachesize
659 # experimental config: format.manifestcachesize
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
661 if manifestcachesize is not None:
661 if manifestcachesize is not None:
662 self.svfs.options['manifestcachesize'] = manifestcachesize
662 self.svfs.options['manifestcachesize'] = manifestcachesize
663 # experimental config: format.aggressivemergedeltas
663 # experimental config: format.aggressivemergedeltas
664 aggressivemergedeltas = self.ui.configbool('format',
664 aggressivemergedeltas = self.ui.configbool('format',
665 'aggressivemergedeltas')
665 'aggressivemergedeltas')
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
669 if 0 <= chainspan:
669 if 0 <= chainspan:
670 self.svfs.options['maxdeltachainspan'] = chainspan
670 self.svfs.options['maxdeltachainspan'] = chainspan
671 mmapindexthreshold = self.ui.configbytes('experimental',
671 mmapindexthreshold = self.ui.configbytes('experimental',
672 'mmapindexthreshold')
672 'mmapindexthreshold')
673 if mmapindexthreshold is not None:
673 if mmapindexthreshold is not None:
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
676 srdensitythres = float(self.ui.config('experimental',
676 srdensitythres = float(self.ui.config('experimental',
677 'sparse-read.density-threshold'))
677 'sparse-read.density-threshold'))
678 srmingapsize = self.ui.configbytes('experimental',
678 srmingapsize = self.ui.configbytes('experimental',
679 'sparse-read.min-gap-size')
679 'sparse-read.min-gap-size')
680 self.svfs.options['with-sparse-read'] = withsparseread
680 self.svfs.options['with-sparse-read'] = withsparseread
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
683
683
684 for r in self.requirements:
684 for r in self.requirements:
685 if r.startswith('exp-compression-'):
685 if r.startswith('exp-compression-'):
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687
687
688 # TODO move "revlogv2" to openerreqs once finalized.
688 # TODO move "revlogv2" to openerreqs once finalized.
689 if REVLOGV2_REQUIREMENT in self.requirements:
689 if REVLOGV2_REQUIREMENT in self.requirements:
690 self.svfs.options['revlogv2'] = True
690 self.svfs.options['revlogv2'] = True
691
691
692 def _writerequirements(self):
692 def _writerequirements(self):
693 scmutil.writerequires(self.vfs, self.requirements)
693 scmutil.writerequires(self.vfs, self.requirements)
694
694
695 def _checknested(self, path):
695 def _checknested(self, path):
696 """Determine if path is a legal nested repository."""
696 """Determine if path is a legal nested repository."""
697 if not path.startswith(self.root):
697 if not path.startswith(self.root):
698 return False
698 return False
699 subpath = path[len(self.root) + 1:]
699 subpath = path[len(self.root) + 1:]
700 normsubpath = util.pconvert(subpath)
700 normsubpath = util.pconvert(subpath)
701
701
702 # XXX: Checking against the current working copy is wrong in
702 # XXX: Checking against the current working copy is wrong in
703 # the sense that it can reject things like
703 # the sense that it can reject things like
704 #
704 #
705 # $ hg cat -r 10 sub/x.txt
705 # $ hg cat -r 10 sub/x.txt
706 #
706 #
707 # if sub/ is no longer a subrepository in the working copy
707 # if sub/ is no longer a subrepository in the working copy
708 # parent revision.
708 # parent revision.
709 #
709 #
710 # However, it can of course also allow things that would have
710 # However, it can of course also allow things that would have
711 # been rejected before, such as the above cat command if sub/
711 # been rejected before, such as the above cat command if sub/
712 # is a subrepository now, but was a normal directory before.
712 # is a subrepository now, but was a normal directory before.
713 # The old path auditor would have rejected by mistake since it
713 # The old path auditor would have rejected by mistake since it
714 # panics when it sees sub/.hg/.
714 # panics when it sees sub/.hg/.
715 #
715 #
716 # All in all, checking against the working copy seems sensible
716 # All in all, checking against the working copy seems sensible
717 # since we want to prevent access to nested repositories on
717 # since we want to prevent access to nested repositories on
718 # the filesystem *now*.
718 # the filesystem *now*.
719 ctx = self[None]
719 ctx = self[None]
720 parts = util.splitpath(subpath)
720 parts = util.splitpath(subpath)
721 while parts:
721 while parts:
722 prefix = '/'.join(parts)
722 prefix = '/'.join(parts)
723 if prefix in ctx.substate:
723 if prefix in ctx.substate:
724 if prefix == normsubpath:
724 if prefix == normsubpath:
725 return True
725 return True
726 else:
726 else:
727 sub = ctx.sub(prefix)
727 sub = ctx.sub(prefix)
728 return sub.checknested(subpath[len(prefix) + 1:])
728 return sub.checknested(subpath[len(prefix) + 1:])
729 else:
729 else:
730 parts.pop()
730 parts.pop()
731 return False
731 return False
732
732
733 def peer(self):
733 def peer(self):
734 return localpeer(self) # not cached to avoid reference cycle
734 return localpeer(self) # not cached to avoid reference cycle
735
735
736 def unfiltered(self):
736 def unfiltered(self):
737 """Return unfiltered version of the repository
737 """Return unfiltered version of the repository
738
738
739 Intended to be overwritten by filtered repo."""
739 Intended to be overwritten by filtered repo."""
740 return self
740 return self
741
741
742 def filtered(self, name, visibilityexceptions=None):
742 def filtered(self, name, visibilityexceptions=None):
743 """Return a filtered version of a repository"""
743 """Return a filtered version of a repository"""
744 cls = repoview.newtype(self.unfiltered().__class__)
744 cls = repoview.newtype(self.unfiltered().__class__)
745 return cls(self, name, visibilityexceptions)
745 return cls(self, name, visibilityexceptions)
746
746
747 @repofilecache('bookmarks', 'bookmarks.current')
747 @repofilecache('bookmarks', 'bookmarks.current')
748 def _bookmarks(self):
748 def _bookmarks(self):
749 return bookmarks.bmstore(self)
749 return bookmarks.bmstore(self)
750
750
751 @property
751 @property
752 def _activebookmark(self):
752 def _activebookmark(self):
753 return self._bookmarks.active
753 return self._bookmarks.active
754
754
755 # _phasesets depend on changelog. what we need is to call
755 # _phasesets depend on changelog. what we need is to call
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
757 # can't be easily expressed in filecache mechanism.
757 # can't be easily expressed in filecache mechanism.
758 @storecache('phaseroots', '00changelog.i')
758 @storecache('phaseroots', '00changelog.i')
759 def _phasecache(self):
759 def _phasecache(self):
760 return phases.phasecache(self, self._phasedefaults)
760 return phases.phasecache(self, self._phasedefaults)
761
761
762 @storecache('obsstore')
762 @storecache('obsstore')
763 def obsstore(self):
763 def obsstore(self):
764 return obsolete.makestore(self.ui, self)
764 return obsolete.makestore(self.ui, self)
765
765
766 @storecache('00changelog.i')
766 @storecache('00changelog.i')
767 def changelog(self):
767 def changelog(self):
768 return changelog.changelog(self.svfs,
768 return changelog.changelog(self.svfs,
769 trypending=txnutil.mayhavepending(self.root))
769 trypending=txnutil.mayhavepending(self.root))
770
770
771 def _constructmanifest(self):
771 def _constructmanifest(self):
772 # This is a temporary function while we migrate from manifest to
772 # This is a temporary function while we migrate from manifest to
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
774 # manifest creation.
774 # manifest creation.
775 return manifest.manifestrevlog(self.svfs)
775 return manifest.manifestrevlog(self.svfs)
776
776
777 @storecache('00manifest.i')
777 @storecache('00manifest.i')
778 def manifestlog(self):
778 def manifestlog(self):
779 return manifest.manifestlog(self.svfs, self)
779 return manifest.manifestlog(self.svfs, self)
780
780
781 @repofilecache('dirstate')
781 @repofilecache('dirstate')
782 def dirstate(self):
782 def dirstate(self):
783 sparsematchfn = lambda: sparse.matcher(self)
783 sparsematchfn = lambda: sparse.matcher(self)
784
784
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
786 self._dirstatevalidate, sparsematchfn)
786 self._dirstatevalidate, sparsematchfn)
787
787
788 def _dirstatevalidate(self, node):
788 def _dirstatevalidate(self, node):
789 try:
789 try:
790 self.changelog.rev(node)
790 self.changelog.rev(node)
791 return node
791 return node
792 except error.LookupError:
792 except error.LookupError:
793 if not self._dirstatevalidatewarned:
793 if not self._dirstatevalidatewarned:
794 self._dirstatevalidatewarned = True
794 self._dirstatevalidatewarned = True
795 self.ui.warn(_("warning: ignoring unknown"
795 self.ui.warn(_("warning: ignoring unknown"
796 " working parent %s!\n") % short(node))
796 " working parent %s!\n") % short(node))
797 return nullid
797 return nullid
798
798
799 @repofilecache(narrowspec.FILENAME)
799 @repofilecache(narrowspec.FILENAME)
800 def narrowpats(self):
800 def narrowpats(self):
801 """matcher patterns for this repository's narrowspec
801 """matcher patterns for this repository's narrowspec
802
802
803 A tuple of (includes, excludes).
803 A tuple of (includes, excludes).
804 """
804 """
805 source = self
805 source = self
806 if self.shared():
806 if self.shared():
807 from . import hg
807 from . import hg
808 source = hg.sharedreposource(self)
808 source = hg.sharedreposource(self)
809 return narrowspec.load(source)
809 return narrowspec.load(source)
810
810
811 @repofilecache(narrowspec.FILENAME)
811 @repofilecache(narrowspec.FILENAME)
812 def _narrowmatch(self):
812 def _narrowmatch(self):
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
814 return matchmod.always(self.root, '')
814 return matchmod.always(self.root, '')
815 include, exclude = self.narrowpats
815 include, exclude = self.narrowpats
816 return narrowspec.match(self.root, include=include, exclude=exclude)
816 return narrowspec.match(self.root, include=include, exclude=exclude)
817
817
818 # TODO(martinvonz): make this property-like instead?
818 # TODO(martinvonz): make this property-like instead?
819 def narrowmatch(self):
819 def narrowmatch(self):
820 return self._narrowmatch
820 return self._narrowmatch
821
821
822 def setnarrowpats(self, newincludes, newexcludes):
822 def setnarrowpats(self, newincludes, newexcludes):
823 target = self
823 target = self
824 if self.shared():
824 if self.shared():
825 from . import hg
825 from . import hg
826 target = hg.sharedreposource(self)
826 target = hg.sharedreposource(self)
827 narrowspec.save(target, newincludes, newexcludes)
827 narrowspec.save(target, newincludes, newexcludes)
828 self.invalidate(clearfilecache=True)
828 self.invalidate(clearfilecache=True)
829
829
830 def __getitem__(self, changeid):
830 def __getitem__(self, changeid):
831 if changeid is None:
831 if changeid is None:
832 return context.workingctx(self)
832 return context.workingctx(self)
833 if isinstance(changeid, context.basectx):
833 if isinstance(changeid, context.basectx):
834 return changeid
834 return changeid
835 if isinstance(changeid, slice):
835 if isinstance(changeid, slice):
836 # wdirrev isn't contiguous so the slice shouldn't include it
836 # wdirrev isn't contiguous so the slice shouldn't include it
837 return [context.changectx(self, i)
837 return [context.changectx(self, i)
838 for i in xrange(*changeid.indices(len(self)))
838 for i in xrange(*changeid.indices(len(self)))
839 if i not in self.changelog.filteredrevs]
839 if i not in self.changelog.filteredrevs]
840 try:
840 try:
841 return context.changectx(self, changeid)
841 return context.changectx(self, changeid)
842 except error.WdirUnsupported:
842 except error.WdirUnsupported:
843 return context.workingctx(self)
843 return context.workingctx(self)
844
844
845 def __contains__(self, changeid):
845 def __contains__(self, changeid):
846 """True if the given changeid exists
846 """True if the given changeid exists
847
847
848 error.LookupError is raised if an ambiguous node specified.
848 error.LookupError is raised if an ambiguous node specified.
849 """
849 """
850 try:
850 try:
851 self[changeid]
851 self[changeid]
852 return True
852 return True
853 except (error.RepoLookupError, error.FilteredIndexError,
853 except error.RepoLookupError:
854 error.FilteredLookupError):
855 return False
854 return False
856
855
857 def __nonzero__(self):
856 def __nonzero__(self):
858 return True
857 return True
859
858
860 __bool__ = __nonzero__
859 __bool__ = __nonzero__
861
860
862 def __len__(self):
861 def __len__(self):
863 # no need to pay the cost of repoview.changelog
862 # no need to pay the cost of repoview.changelog
864 unfi = self.unfiltered()
863 unfi = self.unfiltered()
865 return len(unfi.changelog)
864 return len(unfi.changelog)
866
865
867 def __iter__(self):
866 def __iter__(self):
868 return iter(self.changelog)
867 return iter(self.changelog)
869
868
870 def revs(self, expr, *args):
869 def revs(self, expr, *args):
871 '''Find revisions matching a revset.
870 '''Find revisions matching a revset.
872
871
873 The revset is specified as a string ``expr`` that may contain
872 The revset is specified as a string ``expr`` that may contain
874 %-formatting to escape certain types. See ``revsetlang.formatspec``.
873 %-formatting to escape certain types. See ``revsetlang.formatspec``.
875
874
876 Revset aliases from the configuration are not expanded. To expand
875 Revset aliases from the configuration are not expanded. To expand
877 user aliases, consider calling ``scmutil.revrange()`` or
876 user aliases, consider calling ``scmutil.revrange()`` or
878 ``repo.anyrevs([expr], user=True)``.
877 ``repo.anyrevs([expr], user=True)``.
879
878
880 Returns a revset.abstractsmartset, which is a list-like interface
879 Returns a revset.abstractsmartset, which is a list-like interface
881 that contains integer revisions.
880 that contains integer revisions.
882 '''
881 '''
883 expr = revsetlang.formatspec(expr, *args)
882 expr = revsetlang.formatspec(expr, *args)
884 m = revset.match(None, expr)
883 m = revset.match(None, expr)
885 return m(self)
884 return m(self)
886
885
887 def set(self, expr, *args):
886 def set(self, expr, *args):
888 '''Find revisions matching a revset and emit changectx instances.
887 '''Find revisions matching a revset and emit changectx instances.
889
888
890 This is a convenience wrapper around ``revs()`` that iterates the
889 This is a convenience wrapper around ``revs()`` that iterates the
891 result and is a generator of changectx instances.
890 result and is a generator of changectx instances.
892
891
893 Revset aliases from the configuration are not expanded. To expand
892 Revset aliases from the configuration are not expanded. To expand
894 user aliases, consider calling ``scmutil.revrange()``.
893 user aliases, consider calling ``scmutil.revrange()``.
895 '''
894 '''
896 for r in self.revs(expr, *args):
895 for r in self.revs(expr, *args):
897 yield self[r]
896 yield self[r]
898
897
899 def anyrevs(self, specs, user=False, localalias=None):
898 def anyrevs(self, specs, user=False, localalias=None):
900 '''Find revisions matching one of the given revsets.
899 '''Find revisions matching one of the given revsets.
901
900
902 Revset aliases from the configuration are not expanded by default. To
901 Revset aliases from the configuration are not expanded by default. To
903 expand user aliases, specify ``user=True``. To provide some local
902 expand user aliases, specify ``user=True``. To provide some local
904 definitions overriding user aliases, set ``localalias`` to
903 definitions overriding user aliases, set ``localalias`` to
905 ``{name: definitionstring}``.
904 ``{name: definitionstring}``.
906 '''
905 '''
907 if user:
906 if user:
908 m = revset.matchany(self.ui, specs,
907 m = revset.matchany(self.ui, specs,
909 lookup=revset.lookupfn(self),
908 lookup=revset.lookupfn(self),
910 localalias=localalias)
909 localalias=localalias)
911 else:
910 else:
912 m = revset.matchany(None, specs, localalias=localalias)
911 m = revset.matchany(None, specs, localalias=localalias)
913 return m(self)
912 return m(self)
914
913
915 def url(self):
914 def url(self):
916 return 'file:' + self.root
915 return 'file:' + self.root
917
916
918 def hook(self, name, throw=False, **args):
917 def hook(self, name, throw=False, **args):
919 """Call a hook, passing this repo instance.
918 """Call a hook, passing this repo instance.
920
919
921 This a convenience method to aid invoking hooks. Extensions likely
920 This a convenience method to aid invoking hooks. Extensions likely
922 won't call this unless they have registered a custom hook or are
921 won't call this unless they have registered a custom hook or are
923 replacing code that is expected to call a hook.
922 replacing code that is expected to call a hook.
924 """
923 """
925 return hook.hook(self.ui, self, name, throw, **args)
924 return hook.hook(self.ui, self, name, throw, **args)
926
925
927 @filteredpropertycache
926 @filteredpropertycache
928 def _tagscache(self):
927 def _tagscache(self):
929 '''Returns a tagscache object that contains various tags related
928 '''Returns a tagscache object that contains various tags related
930 caches.'''
929 caches.'''
931
930
932 # This simplifies its cache management by having one decorated
931 # This simplifies its cache management by having one decorated
933 # function (this one) and the rest simply fetch things from it.
932 # function (this one) and the rest simply fetch things from it.
934 class tagscache(object):
933 class tagscache(object):
935 def __init__(self):
934 def __init__(self):
936 # These two define the set of tags for this repository. tags
935 # These two define the set of tags for this repository. tags
937 # maps tag name to node; tagtypes maps tag name to 'global' or
936 # maps tag name to node; tagtypes maps tag name to 'global' or
938 # 'local'. (Global tags are defined by .hgtags across all
937 # 'local'. (Global tags are defined by .hgtags across all
939 # heads, and local tags are defined in .hg/localtags.)
938 # heads, and local tags are defined in .hg/localtags.)
940 # They constitute the in-memory cache of tags.
939 # They constitute the in-memory cache of tags.
941 self.tags = self.tagtypes = None
940 self.tags = self.tagtypes = None
942
941
943 self.nodetagscache = self.tagslist = None
942 self.nodetagscache = self.tagslist = None
944
943
945 cache = tagscache()
944 cache = tagscache()
946 cache.tags, cache.tagtypes = self._findtags()
945 cache.tags, cache.tagtypes = self._findtags()
947
946
948 return cache
947 return cache
949
948
950 def tags(self):
949 def tags(self):
951 '''return a mapping of tag to node'''
950 '''return a mapping of tag to node'''
952 t = {}
951 t = {}
953 if self.changelog.filteredrevs:
952 if self.changelog.filteredrevs:
954 tags, tt = self._findtags()
953 tags, tt = self._findtags()
955 else:
954 else:
956 tags = self._tagscache.tags
955 tags = self._tagscache.tags
957 for k, v in tags.iteritems():
956 for k, v in tags.iteritems():
958 try:
957 try:
959 # ignore tags to unknown nodes
958 # ignore tags to unknown nodes
960 self.changelog.rev(v)
959 self.changelog.rev(v)
961 t[k] = v
960 t[k] = v
962 except (error.LookupError, ValueError):
961 except (error.LookupError, ValueError):
963 pass
962 pass
964 return t
963 return t
965
964
966 def _findtags(self):
965 def _findtags(self):
967 '''Do the hard work of finding tags. Return a pair of dicts
966 '''Do the hard work of finding tags. Return a pair of dicts
968 (tags, tagtypes) where tags maps tag name to node, and tagtypes
967 (tags, tagtypes) where tags maps tag name to node, and tagtypes
969 maps tag name to a string like \'global\' or \'local\'.
968 maps tag name to a string like \'global\' or \'local\'.
970 Subclasses or extensions are free to add their own tags, but
969 Subclasses or extensions are free to add their own tags, but
971 should be aware that the returned dicts will be retained for the
970 should be aware that the returned dicts will be retained for the
972 duration of the localrepo object.'''
971 duration of the localrepo object.'''
973
972
974 # XXX what tagtype should subclasses/extensions use? Currently
973 # XXX what tagtype should subclasses/extensions use? Currently
975 # mq and bookmarks add tags, but do not set the tagtype at all.
974 # mq and bookmarks add tags, but do not set the tagtype at all.
976 # Should each extension invent its own tag type? Should there
975 # Should each extension invent its own tag type? Should there
977 # be one tagtype for all such "virtual" tags? Or is the status
976 # be one tagtype for all such "virtual" tags? Or is the status
978 # quo fine?
977 # quo fine?
979
978
980
979
981 # map tag name to (node, hist)
980 # map tag name to (node, hist)
982 alltags = tagsmod.findglobaltags(self.ui, self)
981 alltags = tagsmod.findglobaltags(self.ui, self)
983 # map tag name to tag type
982 # map tag name to tag type
984 tagtypes = dict((tag, 'global') for tag in alltags)
983 tagtypes = dict((tag, 'global') for tag in alltags)
985
984
986 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
985 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
987
986
988 # Build the return dicts. Have to re-encode tag names because
987 # Build the return dicts. Have to re-encode tag names because
989 # the tags module always uses UTF-8 (in order not to lose info
988 # the tags module always uses UTF-8 (in order not to lose info
990 # writing to the cache), but the rest of Mercurial wants them in
989 # writing to the cache), but the rest of Mercurial wants them in
991 # local encoding.
990 # local encoding.
992 tags = {}
991 tags = {}
993 for (name, (node, hist)) in alltags.iteritems():
992 for (name, (node, hist)) in alltags.iteritems():
994 if node != nullid:
993 if node != nullid:
995 tags[encoding.tolocal(name)] = node
994 tags[encoding.tolocal(name)] = node
996 tags['tip'] = self.changelog.tip()
995 tags['tip'] = self.changelog.tip()
997 tagtypes = dict([(encoding.tolocal(name), value)
996 tagtypes = dict([(encoding.tolocal(name), value)
998 for (name, value) in tagtypes.iteritems()])
997 for (name, value) in tagtypes.iteritems()])
999 return (tags, tagtypes)
998 return (tags, tagtypes)
1000
999
1001 def tagtype(self, tagname):
1000 def tagtype(self, tagname):
1002 '''
1001 '''
1003 return the type of the given tag. result can be:
1002 return the type of the given tag. result can be:
1004
1003
1005 'local' : a local tag
1004 'local' : a local tag
1006 'global' : a global tag
1005 'global' : a global tag
1007 None : tag does not exist
1006 None : tag does not exist
1008 '''
1007 '''
1009
1008
1010 return self._tagscache.tagtypes.get(tagname)
1009 return self._tagscache.tagtypes.get(tagname)
1011
1010
1012 def tagslist(self):
1011 def tagslist(self):
1013 '''return a list of tags ordered by revision'''
1012 '''return a list of tags ordered by revision'''
1014 if not self._tagscache.tagslist:
1013 if not self._tagscache.tagslist:
1015 l = []
1014 l = []
1016 for t, n in self.tags().iteritems():
1015 for t, n in self.tags().iteritems():
1017 l.append((self.changelog.rev(n), t, n))
1016 l.append((self.changelog.rev(n), t, n))
1018 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1017 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1019
1018
1020 return self._tagscache.tagslist
1019 return self._tagscache.tagslist
1021
1020
1022 def nodetags(self, node):
1021 def nodetags(self, node):
1023 '''return the tags associated with a node'''
1022 '''return the tags associated with a node'''
1024 if not self._tagscache.nodetagscache:
1023 if not self._tagscache.nodetagscache:
1025 nodetagscache = {}
1024 nodetagscache = {}
1026 for t, n in self._tagscache.tags.iteritems():
1025 for t, n in self._tagscache.tags.iteritems():
1027 nodetagscache.setdefault(n, []).append(t)
1026 nodetagscache.setdefault(n, []).append(t)
1028 for tags in nodetagscache.itervalues():
1027 for tags in nodetagscache.itervalues():
1029 tags.sort()
1028 tags.sort()
1030 self._tagscache.nodetagscache = nodetagscache
1029 self._tagscache.nodetagscache = nodetagscache
1031 return self._tagscache.nodetagscache.get(node, [])
1030 return self._tagscache.nodetagscache.get(node, [])
1032
1031
1033 def nodebookmarks(self, node):
1032 def nodebookmarks(self, node):
1034 """return the list of bookmarks pointing to the specified node"""
1033 """return the list of bookmarks pointing to the specified node"""
1035 marks = []
1034 marks = []
1036 for bookmark, n in self._bookmarks.iteritems():
1035 for bookmark, n in self._bookmarks.iteritems():
1037 if n == node:
1036 if n == node:
1038 marks.append(bookmark)
1037 marks.append(bookmark)
1039 return sorted(marks)
1038 return sorted(marks)
1040
1039
1041 def branchmap(self):
1040 def branchmap(self):
1042 '''returns a dictionary {branch: [branchheads]} with branchheads
1041 '''returns a dictionary {branch: [branchheads]} with branchheads
1043 ordered by increasing revision number'''
1042 ordered by increasing revision number'''
1044 branchmap.updatecache(self)
1043 branchmap.updatecache(self)
1045 return self._branchcaches[self.filtername]
1044 return self._branchcaches[self.filtername]
1046
1045
1047 @unfilteredmethod
1046 @unfilteredmethod
1048 def revbranchcache(self):
1047 def revbranchcache(self):
1049 if not self._revbranchcache:
1048 if not self._revbranchcache:
1050 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1049 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1051 return self._revbranchcache
1050 return self._revbranchcache
1052
1051
1053 def branchtip(self, branch, ignoremissing=False):
1052 def branchtip(self, branch, ignoremissing=False):
1054 '''return the tip node for a given branch
1053 '''return the tip node for a given branch
1055
1054
1056 If ignoremissing is True, then this method will not raise an error.
1055 If ignoremissing is True, then this method will not raise an error.
1057 This is helpful for callers that only expect None for a missing branch
1056 This is helpful for callers that only expect None for a missing branch
1058 (e.g. namespace).
1057 (e.g. namespace).
1059
1058
1060 '''
1059 '''
1061 try:
1060 try:
1062 return self.branchmap().branchtip(branch)
1061 return self.branchmap().branchtip(branch)
1063 except KeyError:
1062 except KeyError:
1064 if not ignoremissing:
1063 if not ignoremissing:
1065 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1064 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1066 else:
1065 else:
1067 pass
1066 pass
1068
1067
1069 def lookup(self, key):
1068 def lookup(self, key):
1070 return scmutil.revsymbol(self, key).node()
1069 return scmutil.revsymbol(self, key).node()
1071
1070
1072 def lookupbranch(self, key):
1071 def lookupbranch(self, key):
1073 if key in self.branchmap():
1072 if key in self.branchmap():
1074 return key
1073 return key
1075
1074
1076 return scmutil.revsymbol(self, key).branch()
1075 return scmutil.revsymbol(self, key).branch()
1077
1076
1078 def known(self, nodes):
1077 def known(self, nodes):
1079 cl = self.changelog
1078 cl = self.changelog
1080 nm = cl.nodemap
1079 nm = cl.nodemap
1081 filtered = cl.filteredrevs
1080 filtered = cl.filteredrevs
1082 result = []
1081 result = []
1083 for n in nodes:
1082 for n in nodes:
1084 r = nm.get(n)
1083 r = nm.get(n)
1085 resp = not (r is None or r in filtered)
1084 resp = not (r is None or r in filtered)
1086 result.append(resp)
1085 result.append(resp)
1087 return result
1086 return result
1088
1087
1089 def local(self):
1088 def local(self):
1090 return self
1089 return self
1091
1090
1092 def publishing(self):
1091 def publishing(self):
1093 # it's safe (and desirable) to trust the publish flag unconditionally
1092 # it's safe (and desirable) to trust the publish flag unconditionally
1094 # so that we don't finalize changes shared between users via ssh or nfs
1093 # so that we don't finalize changes shared between users via ssh or nfs
1095 return self.ui.configbool('phases', 'publish', untrusted=True)
1094 return self.ui.configbool('phases', 'publish', untrusted=True)
1096
1095
1097 def cancopy(self):
1096 def cancopy(self):
1098 # so statichttprepo's override of local() works
1097 # so statichttprepo's override of local() works
1099 if not self.local():
1098 if not self.local():
1100 return False
1099 return False
1101 if not self.publishing():
1100 if not self.publishing():
1102 return True
1101 return True
1103 # if publishing we can't copy if there is filtered content
1102 # if publishing we can't copy if there is filtered content
1104 return not self.filtered('visible').changelog.filteredrevs
1103 return not self.filtered('visible').changelog.filteredrevs
1105
1104
1106 def shared(self):
1105 def shared(self):
1107 '''the type of shared repository (None if not shared)'''
1106 '''the type of shared repository (None if not shared)'''
1108 if self.sharedpath != self.path:
1107 if self.sharedpath != self.path:
1109 return 'store'
1108 return 'store'
1110 return None
1109 return None
1111
1110
1112 def wjoin(self, f, *insidef):
1111 def wjoin(self, f, *insidef):
1113 return self.vfs.reljoin(self.root, f, *insidef)
1112 return self.vfs.reljoin(self.root, f, *insidef)
1114
1113
1115 def file(self, f):
1114 def file(self, f):
1116 if f[0] == '/':
1115 if f[0] == '/':
1117 f = f[1:]
1116 f = f[1:]
1118 return filelog.filelog(self.svfs, f)
1117 return filelog.filelog(self.svfs, f)
1119
1118
1120 def setparents(self, p1, p2=nullid):
1119 def setparents(self, p1, p2=nullid):
1121 with self.dirstate.parentchange():
1120 with self.dirstate.parentchange():
1122 copies = self.dirstate.setparents(p1, p2)
1121 copies = self.dirstate.setparents(p1, p2)
1123 pctx = self[p1]
1122 pctx = self[p1]
1124 if copies:
1123 if copies:
1125 # Adjust copy records, the dirstate cannot do it, it
1124 # Adjust copy records, the dirstate cannot do it, it
1126 # requires access to parents manifests. Preserve them
1125 # requires access to parents manifests. Preserve them
1127 # only for entries added to first parent.
1126 # only for entries added to first parent.
1128 for f in copies:
1127 for f in copies:
1129 if f not in pctx and copies[f] in pctx:
1128 if f not in pctx and copies[f] in pctx:
1130 self.dirstate.copy(copies[f], f)
1129 self.dirstate.copy(copies[f], f)
1131 if p2 == nullid:
1130 if p2 == nullid:
1132 for f, s in sorted(self.dirstate.copies().items()):
1131 for f, s in sorted(self.dirstate.copies().items()):
1133 if f not in pctx and s not in pctx:
1132 if f not in pctx and s not in pctx:
1134 self.dirstate.copy(None, f)
1133 self.dirstate.copy(None, f)
1135
1134
1136 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1135 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1137 """changeid can be a changeset revision, node, or tag.
1136 """changeid can be a changeset revision, node, or tag.
1138 fileid can be a file revision or node."""
1137 fileid can be a file revision or node."""
1139 return context.filectx(self, path, changeid, fileid,
1138 return context.filectx(self, path, changeid, fileid,
1140 changectx=changectx)
1139 changectx=changectx)
1141
1140
1142 def getcwd(self):
1141 def getcwd(self):
1143 return self.dirstate.getcwd()
1142 return self.dirstate.getcwd()
1144
1143
1145 def pathto(self, f, cwd=None):
1144 def pathto(self, f, cwd=None):
1146 return self.dirstate.pathto(f, cwd)
1145 return self.dirstate.pathto(f, cwd)
1147
1146
1148 def _loadfilter(self, filter):
1147 def _loadfilter(self, filter):
1149 if filter not in self._filterpats:
1148 if filter not in self._filterpats:
1150 l = []
1149 l = []
1151 for pat, cmd in self.ui.configitems(filter):
1150 for pat, cmd in self.ui.configitems(filter):
1152 if cmd == '!':
1151 if cmd == '!':
1153 continue
1152 continue
1154 mf = matchmod.match(self.root, '', [pat])
1153 mf = matchmod.match(self.root, '', [pat])
1155 fn = None
1154 fn = None
1156 params = cmd
1155 params = cmd
1157 for name, filterfn in self._datafilters.iteritems():
1156 for name, filterfn in self._datafilters.iteritems():
1158 if cmd.startswith(name):
1157 if cmd.startswith(name):
1159 fn = filterfn
1158 fn = filterfn
1160 params = cmd[len(name):].lstrip()
1159 params = cmd[len(name):].lstrip()
1161 break
1160 break
1162 if not fn:
1161 if not fn:
1163 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1162 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1164 # Wrap old filters not supporting keyword arguments
1163 # Wrap old filters not supporting keyword arguments
1165 if not pycompat.getargspec(fn)[2]:
1164 if not pycompat.getargspec(fn)[2]:
1166 oldfn = fn
1165 oldfn = fn
1167 fn = lambda s, c, **kwargs: oldfn(s, c)
1166 fn = lambda s, c, **kwargs: oldfn(s, c)
1168 l.append((mf, fn, params))
1167 l.append((mf, fn, params))
1169 self._filterpats[filter] = l
1168 self._filterpats[filter] = l
1170 return self._filterpats[filter]
1169 return self._filterpats[filter]
1171
1170
1172 def _filter(self, filterpats, filename, data):
1171 def _filter(self, filterpats, filename, data):
1173 for mf, fn, cmd in filterpats:
1172 for mf, fn, cmd in filterpats:
1174 if mf(filename):
1173 if mf(filename):
1175 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1174 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1176 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1175 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1177 break
1176 break
1178
1177
1179 return data
1178 return data
1180
1179
1181 @unfilteredpropertycache
1180 @unfilteredpropertycache
1182 def _encodefilterpats(self):
1181 def _encodefilterpats(self):
1183 return self._loadfilter('encode')
1182 return self._loadfilter('encode')
1184
1183
1185 @unfilteredpropertycache
1184 @unfilteredpropertycache
1186 def _decodefilterpats(self):
1185 def _decodefilterpats(self):
1187 return self._loadfilter('decode')
1186 return self._loadfilter('decode')
1188
1187
1189 def adddatafilter(self, name, filter):
1188 def adddatafilter(self, name, filter):
1190 self._datafilters[name] = filter
1189 self._datafilters[name] = filter
1191
1190
1192 def wread(self, filename):
1191 def wread(self, filename):
1193 if self.wvfs.islink(filename):
1192 if self.wvfs.islink(filename):
1194 data = self.wvfs.readlink(filename)
1193 data = self.wvfs.readlink(filename)
1195 else:
1194 else:
1196 data = self.wvfs.read(filename)
1195 data = self.wvfs.read(filename)
1197 return self._filter(self._encodefilterpats, filename, data)
1196 return self._filter(self._encodefilterpats, filename, data)
1198
1197
1199 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1198 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1200 """write ``data`` into ``filename`` in the working directory
1199 """write ``data`` into ``filename`` in the working directory
1201
1200
1202 This returns length of written (maybe decoded) data.
1201 This returns length of written (maybe decoded) data.
1203 """
1202 """
1204 data = self._filter(self._decodefilterpats, filename, data)
1203 data = self._filter(self._decodefilterpats, filename, data)
1205 if 'l' in flags:
1204 if 'l' in flags:
1206 self.wvfs.symlink(data, filename)
1205 self.wvfs.symlink(data, filename)
1207 else:
1206 else:
1208 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1207 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1209 **kwargs)
1208 **kwargs)
1210 if 'x' in flags:
1209 if 'x' in flags:
1211 self.wvfs.setflags(filename, False, True)
1210 self.wvfs.setflags(filename, False, True)
1212 else:
1211 else:
1213 self.wvfs.setflags(filename, False, False)
1212 self.wvfs.setflags(filename, False, False)
1214 return len(data)
1213 return len(data)
1215
1214
1216 def wwritedata(self, filename, data):
1215 def wwritedata(self, filename, data):
1217 return self._filter(self._decodefilterpats, filename, data)
1216 return self._filter(self._decodefilterpats, filename, data)
1218
1217
1219 def currenttransaction(self):
1218 def currenttransaction(self):
1220 """return the current transaction or None if non exists"""
1219 """return the current transaction or None if non exists"""
1221 if self._transref:
1220 if self._transref:
1222 tr = self._transref()
1221 tr = self._transref()
1223 else:
1222 else:
1224 tr = None
1223 tr = None
1225
1224
1226 if tr and tr.running():
1225 if tr and tr.running():
1227 return tr
1226 return tr
1228 return None
1227 return None
1229
1228
1230 def transaction(self, desc, report=None):
1229 def transaction(self, desc, report=None):
1231 if (self.ui.configbool('devel', 'all-warnings')
1230 if (self.ui.configbool('devel', 'all-warnings')
1232 or self.ui.configbool('devel', 'check-locks')):
1231 or self.ui.configbool('devel', 'check-locks')):
1233 if self._currentlock(self._lockref) is None:
1232 if self._currentlock(self._lockref) is None:
1234 raise error.ProgrammingError('transaction requires locking')
1233 raise error.ProgrammingError('transaction requires locking')
1235 tr = self.currenttransaction()
1234 tr = self.currenttransaction()
1236 if tr is not None:
1235 if tr is not None:
1237 return tr.nest(name=desc)
1236 return tr.nest(name=desc)
1238
1237
1239 # abort here if the journal already exists
1238 # abort here if the journal already exists
1240 if self.svfs.exists("journal"):
1239 if self.svfs.exists("journal"):
1241 raise error.RepoError(
1240 raise error.RepoError(
1242 _("abandoned transaction found"),
1241 _("abandoned transaction found"),
1243 hint=_("run 'hg recover' to clean up transaction"))
1242 hint=_("run 'hg recover' to clean up transaction"))
1244
1243
1245 idbase = "%.40f#%f" % (random.random(), time.time())
1244 idbase = "%.40f#%f" % (random.random(), time.time())
1246 ha = hex(hashlib.sha1(idbase).digest())
1245 ha = hex(hashlib.sha1(idbase).digest())
1247 txnid = 'TXN:' + ha
1246 txnid = 'TXN:' + ha
1248 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1247 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1249
1248
1250 self._writejournal(desc)
1249 self._writejournal(desc)
1251 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1250 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1252 if report:
1251 if report:
1253 rp = report
1252 rp = report
1254 else:
1253 else:
1255 rp = self.ui.warn
1254 rp = self.ui.warn
1256 vfsmap = {'plain': self.vfs} # root of .hg/
1255 vfsmap = {'plain': self.vfs} # root of .hg/
1257 # we must avoid cyclic reference between repo and transaction.
1256 # we must avoid cyclic reference between repo and transaction.
1258 reporef = weakref.ref(self)
1257 reporef = weakref.ref(self)
1259 # Code to track tag movement
1258 # Code to track tag movement
1260 #
1259 #
1261 # Since tags are all handled as file content, it is actually quite hard
1260 # Since tags are all handled as file content, it is actually quite hard
1262 # to track these movement from a code perspective. So we fallback to a
1261 # to track these movement from a code perspective. So we fallback to a
1263 # tracking at the repository level. One could envision to track changes
1262 # tracking at the repository level. One could envision to track changes
1264 # to the '.hgtags' file through changegroup apply but that fails to
1263 # to the '.hgtags' file through changegroup apply but that fails to
1265 # cope with case where transaction expose new heads without changegroup
1264 # cope with case where transaction expose new heads without changegroup
1266 # being involved (eg: phase movement).
1265 # being involved (eg: phase movement).
1267 #
1266 #
1268 # For now, We gate the feature behind a flag since this likely comes
1267 # For now, We gate the feature behind a flag since this likely comes
1269 # with performance impacts. The current code run more often than needed
1268 # with performance impacts. The current code run more often than needed
1270 # and do not use caches as much as it could. The current focus is on
1269 # and do not use caches as much as it could. The current focus is on
1271 # the behavior of the feature so we disable it by default. The flag
1270 # the behavior of the feature so we disable it by default. The flag
1272 # will be removed when we are happy with the performance impact.
1271 # will be removed when we are happy with the performance impact.
1273 #
1272 #
1274 # Once this feature is no longer experimental move the following
1273 # Once this feature is no longer experimental move the following
1275 # documentation to the appropriate help section:
1274 # documentation to the appropriate help section:
1276 #
1275 #
1277 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1276 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1278 # tags (new or changed or deleted tags). In addition the details of
1277 # tags (new or changed or deleted tags). In addition the details of
1279 # these changes are made available in a file at:
1278 # these changes are made available in a file at:
1280 # ``REPOROOT/.hg/changes/tags.changes``.
1279 # ``REPOROOT/.hg/changes/tags.changes``.
1281 # Make sure you check for HG_TAG_MOVED before reading that file as it
1280 # Make sure you check for HG_TAG_MOVED before reading that file as it
1282 # might exist from a previous transaction even if no tag were touched
1281 # might exist from a previous transaction even if no tag were touched
1283 # in this one. Changes are recorded in a line base format::
1282 # in this one. Changes are recorded in a line base format::
1284 #
1283 #
1285 # <action> <hex-node> <tag-name>\n
1284 # <action> <hex-node> <tag-name>\n
1286 #
1285 #
1287 # Actions are defined as follow:
1286 # Actions are defined as follow:
1288 # "-R": tag is removed,
1287 # "-R": tag is removed,
1289 # "+A": tag is added,
1288 # "+A": tag is added,
1290 # "-M": tag is moved (old value),
1289 # "-M": tag is moved (old value),
1291 # "+M": tag is moved (new value),
1290 # "+M": tag is moved (new value),
1292 tracktags = lambda x: None
1291 tracktags = lambda x: None
1293 # experimental config: experimental.hook-track-tags
1292 # experimental config: experimental.hook-track-tags
1294 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1293 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1295 if desc != 'strip' and shouldtracktags:
1294 if desc != 'strip' and shouldtracktags:
1296 oldheads = self.changelog.headrevs()
1295 oldheads = self.changelog.headrevs()
1297 def tracktags(tr2):
1296 def tracktags(tr2):
1298 repo = reporef()
1297 repo = reporef()
1299 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1298 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1300 newheads = repo.changelog.headrevs()
1299 newheads = repo.changelog.headrevs()
1301 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1300 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1302 # notes: we compare lists here.
1301 # notes: we compare lists here.
1303 # As we do it only once buiding set would not be cheaper
1302 # As we do it only once buiding set would not be cheaper
1304 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1303 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1305 if changes:
1304 if changes:
1306 tr2.hookargs['tag_moved'] = '1'
1305 tr2.hookargs['tag_moved'] = '1'
1307 with repo.vfs('changes/tags.changes', 'w',
1306 with repo.vfs('changes/tags.changes', 'w',
1308 atomictemp=True) as changesfile:
1307 atomictemp=True) as changesfile:
1309 # note: we do not register the file to the transaction
1308 # note: we do not register the file to the transaction
1310 # because we needs it to still exist on the transaction
1309 # because we needs it to still exist on the transaction
1311 # is close (for txnclose hooks)
1310 # is close (for txnclose hooks)
1312 tagsmod.writediff(changesfile, changes)
1311 tagsmod.writediff(changesfile, changes)
1313 def validate(tr2):
1312 def validate(tr2):
1314 """will run pre-closing hooks"""
1313 """will run pre-closing hooks"""
1315 # XXX the transaction API is a bit lacking here so we take a hacky
1314 # XXX the transaction API is a bit lacking here so we take a hacky
1316 # path for now
1315 # path for now
1317 #
1316 #
1318 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1317 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1319 # dict is copied before these run. In addition we needs the data
1318 # dict is copied before these run. In addition we needs the data
1320 # available to in memory hooks too.
1319 # available to in memory hooks too.
1321 #
1320 #
1322 # Moreover, we also need to make sure this runs before txnclose
1321 # Moreover, we also need to make sure this runs before txnclose
1323 # hooks and there is no "pending" mechanism that would execute
1322 # hooks and there is no "pending" mechanism that would execute
1324 # logic only if hooks are about to run.
1323 # logic only if hooks are about to run.
1325 #
1324 #
1326 # Fixing this limitation of the transaction is also needed to track
1325 # Fixing this limitation of the transaction is also needed to track
1327 # other families of changes (bookmarks, phases, obsolescence).
1326 # other families of changes (bookmarks, phases, obsolescence).
1328 #
1327 #
1329 # This will have to be fixed before we remove the experimental
1328 # This will have to be fixed before we remove the experimental
1330 # gating.
1329 # gating.
1331 tracktags(tr2)
1330 tracktags(tr2)
1332 repo = reporef()
1331 repo = reporef()
1333 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1332 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1334 scmutil.enforcesinglehead(repo, tr2, desc)
1333 scmutil.enforcesinglehead(repo, tr2, desc)
1335 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1334 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1336 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1335 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1337 args = tr.hookargs.copy()
1336 args = tr.hookargs.copy()
1338 args.update(bookmarks.preparehookargs(name, old, new))
1337 args.update(bookmarks.preparehookargs(name, old, new))
1339 repo.hook('pretxnclose-bookmark', throw=True,
1338 repo.hook('pretxnclose-bookmark', throw=True,
1340 txnname=desc,
1339 txnname=desc,
1341 **pycompat.strkwargs(args))
1340 **pycompat.strkwargs(args))
1342 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1341 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1343 cl = repo.unfiltered().changelog
1342 cl = repo.unfiltered().changelog
1344 for rev, (old, new) in tr.changes['phases'].items():
1343 for rev, (old, new) in tr.changes['phases'].items():
1345 args = tr.hookargs.copy()
1344 args = tr.hookargs.copy()
1346 node = hex(cl.node(rev))
1345 node = hex(cl.node(rev))
1347 args.update(phases.preparehookargs(node, old, new))
1346 args.update(phases.preparehookargs(node, old, new))
1348 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1347 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1349 **pycompat.strkwargs(args))
1348 **pycompat.strkwargs(args))
1350
1349
1351 repo.hook('pretxnclose', throw=True,
1350 repo.hook('pretxnclose', throw=True,
1352 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1351 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1353 def releasefn(tr, success):
1352 def releasefn(tr, success):
1354 repo = reporef()
1353 repo = reporef()
1355 if success:
1354 if success:
1356 # this should be explicitly invoked here, because
1355 # this should be explicitly invoked here, because
1357 # in-memory changes aren't written out at closing
1356 # in-memory changes aren't written out at closing
1358 # transaction, if tr.addfilegenerator (via
1357 # transaction, if tr.addfilegenerator (via
1359 # dirstate.write or so) isn't invoked while
1358 # dirstate.write or so) isn't invoked while
1360 # transaction running
1359 # transaction running
1361 repo.dirstate.write(None)
1360 repo.dirstate.write(None)
1362 else:
1361 else:
1363 # discard all changes (including ones already written
1362 # discard all changes (including ones already written
1364 # out) in this transaction
1363 # out) in this transaction
1365 repo.dirstate.restorebackup(None, 'journal.dirstate')
1364 repo.dirstate.restorebackup(None, 'journal.dirstate')
1366
1365
1367 repo.invalidate(clearfilecache=True)
1366 repo.invalidate(clearfilecache=True)
1368
1367
1369 tr = transaction.transaction(rp, self.svfs, vfsmap,
1368 tr = transaction.transaction(rp, self.svfs, vfsmap,
1370 "journal",
1369 "journal",
1371 "undo",
1370 "undo",
1372 aftertrans(renames),
1371 aftertrans(renames),
1373 self.store.createmode,
1372 self.store.createmode,
1374 validator=validate,
1373 validator=validate,
1375 releasefn=releasefn,
1374 releasefn=releasefn,
1376 checkambigfiles=_cachedfiles,
1375 checkambigfiles=_cachedfiles,
1377 name=desc)
1376 name=desc)
1378 tr.changes['revs'] = xrange(0, 0)
1377 tr.changes['revs'] = xrange(0, 0)
1379 tr.changes['obsmarkers'] = set()
1378 tr.changes['obsmarkers'] = set()
1380 tr.changes['phases'] = {}
1379 tr.changes['phases'] = {}
1381 tr.changes['bookmarks'] = {}
1380 tr.changes['bookmarks'] = {}
1382
1381
1383 tr.hookargs['txnid'] = txnid
1382 tr.hookargs['txnid'] = txnid
1384 # note: writing the fncache only during finalize mean that the file is
1383 # note: writing the fncache only during finalize mean that the file is
1385 # outdated when running hooks. As fncache is used for streaming clone,
1384 # outdated when running hooks. As fncache is used for streaming clone,
1386 # this is not expected to break anything that happen during the hooks.
1385 # this is not expected to break anything that happen during the hooks.
1387 tr.addfinalize('flush-fncache', self.store.write)
1386 tr.addfinalize('flush-fncache', self.store.write)
1388 def txnclosehook(tr2):
1387 def txnclosehook(tr2):
1389 """To be run if transaction is successful, will schedule a hook run
1388 """To be run if transaction is successful, will schedule a hook run
1390 """
1389 """
1391 # Don't reference tr2 in hook() so we don't hold a reference.
1390 # Don't reference tr2 in hook() so we don't hold a reference.
1392 # This reduces memory consumption when there are multiple
1391 # This reduces memory consumption when there are multiple
1393 # transactions per lock. This can likely go away if issue5045
1392 # transactions per lock. This can likely go away if issue5045
1394 # fixes the function accumulation.
1393 # fixes the function accumulation.
1395 hookargs = tr2.hookargs
1394 hookargs = tr2.hookargs
1396
1395
1397 def hookfunc():
1396 def hookfunc():
1398 repo = reporef()
1397 repo = reporef()
1399 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1398 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1400 bmchanges = sorted(tr.changes['bookmarks'].items())
1399 bmchanges = sorted(tr.changes['bookmarks'].items())
1401 for name, (old, new) in bmchanges:
1400 for name, (old, new) in bmchanges:
1402 args = tr.hookargs.copy()
1401 args = tr.hookargs.copy()
1403 args.update(bookmarks.preparehookargs(name, old, new))
1402 args.update(bookmarks.preparehookargs(name, old, new))
1404 repo.hook('txnclose-bookmark', throw=False,
1403 repo.hook('txnclose-bookmark', throw=False,
1405 txnname=desc, **pycompat.strkwargs(args))
1404 txnname=desc, **pycompat.strkwargs(args))
1406
1405
1407 if hook.hashook(repo.ui, 'txnclose-phase'):
1406 if hook.hashook(repo.ui, 'txnclose-phase'):
1408 cl = repo.unfiltered().changelog
1407 cl = repo.unfiltered().changelog
1409 phasemv = sorted(tr.changes['phases'].items())
1408 phasemv = sorted(tr.changes['phases'].items())
1410 for rev, (old, new) in phasemv:
1409 for rev, (old, new) in phasemv:
1411 args = tr.hookargs.copy()
1410 args = tr.hookargs.copy()
1412 node = hex(cl.node(rev))
1411 node = hex(cl.node(rev))
1413 args.update(phases.preparehookargs(node, old, new))
1412 args.update(phases.preparehookargs(node, old, new))
1414 repo.hook('txnclose-phase', throw=False, txnname=desc,
1413 repo.hook('txnclose-phase', throw=False, txnname=desc,
1415 **pycompat.strkwargs(args))
1414 **pycompat.strkwargs(args))
1416
1415
1417 repo.hook('txnclose', throw=False, txnname=desc,
1416 repo.hook('txnclose', throw=False, txnname=desc,
1418 **pycompat.strkwargs(hookargs))
1417 **pycompat.strkwargs(hookargs))
1419 reporef()._afterlock(hookfunc)
1418 reporef()._afterlock(hookfunc)
1420 tr.addfinalize('txnclose-hook', txnclosehook)
1419 tr.addfinalize('txnclose-hook', txnclosehook)
1421 # Include a leading "-" to make it happen before the transaction summary
1420 # Include a leading "-" to make it happen before the transaction summary
1422 # reports registered via scmutil.registersummarycallback() whose names
1421 # reports registered via scmutil.registersummarycallback() whose names
1423 # are 00-txnreport etc. That way, the caches will be warm when the
1422 # are 00-txnreport etc. That way, the caches will be warm when the
1424 # callbacks run.
1423 # callbacks run.
1425 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1424 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1426 def txnaborthook(tr2):
1425 def txnaborthook(tr2):
1427 """To be run if transaction is aborted
1426 """To be run if transaction is aborted
1428 """
1427 """
1429 reporef().hook('txnabort', throw=False, txnname=desc,
1428 reporef().hook('txnabort', throw=False, txnname=desc,
1430 **pycompat.strkwargs(tr2.hookargs))
1429 **pycompat.strkwargs(tr2.hookargs))
1431 tr.addabort('txnabort-hook', txnaborthook)
1430 tr.addabort('txnabort-hook', txnaborthook)
1432 # avoid eager cache invalidation. in-memory data should be identical
1431 # avoid eager cache invalidation. in-memory data should be identical
1433 # to stored data if transaction has no error.
1432 # to stored data if transaction has no error.
1434 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1433 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1435 self._transref = weakref.ref(tr)
1434 self._transref = weakref.ref(tr)
1436 scmutil.registersummarycallback(self, tr, desc)
1435 scmutil.registersummarycallback(self, tr, desc)
1437 return tr
1436 return tr
1438
1437
1439 def _journalfiles(self):
1438 def _journalfiles(self):
1440 return ((self.svfs, 'journal'),
1439 return ((self.svfs, 'journal'),
1441 (self.vfs, 'journal.dirstate'),
1440 (self.vfs, 'journal.dirstate'),
1442 (self.vfs, 'journal.branch'),
1441 (self.vfs, 'journal.branch'),
1443 (self.vfs, 'journal.desc'),
1442 (self.vfs, 'journal.desc'),
1444 (self.vfs, 'journal.bookmarks'),
1443 (self.vfs, 'journal.bookmarks'),
1445 (self.svfs, 'journal.phaseroots'))
1444 (self.svfs, 'journal.phaseroots'))
1446
1445
1447 def undofiles(self):
1446 def undofiles(self):
1448 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1447 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1449
1448
1450 @unfilteredmethod
1449 @unfilteredmethod
1451 def _writejournal(self, desc):
1450 def _writejournal(self, desc):
1452 self.dirstate.savebackup(None, 'journal.dirstate')
1451 self.dirstate.savebackup(None, 'journal.dirstate')
1453 self.vfs.write("journal.branch",
1452 self.vfs.write("journal.branch",
1454 encoding.fromlocal(self.dirstate.branch()))
1453 encoding.fromlocal(self.dirstate.branch()))
1455 self.vfs.write("journal.desc",
1454 self.vfs.write("journal.desc",
1456 "%d\n%s\n" % (len(self), desc))
1455 "%d\n%s\n" % (len(self), desc))
1457 self.vfs.write("journal.bookmarks",
1456 self.vfs.write("journal.bookmarks",
1458 self.vfs.tryread("bookmarks"))
1457 self.vfs.tryread("bookmarks"))
1459 self.svfs.write("journal.phaseroots",
1458 self.svfs.write("journal.phaseroots",
1460 self.svfs.tryread("phaseroots"))
1459 self.svfs.tryread("phaseroots"))
1461
1460
1462 def recover(self):
1461 def recover(self):
1463 with self.lock():
1462 with self.lock():
1464 if self.svfs.exists("journal"):
1463 if self.svfs.exists("journal"):
1465 self.ui.status(_("rolling back interrupted transaction\n"))
1464 self.ui.status(_("rolling back interrupted transaction\n"))
1466 vfsmap = {'': self.svfs,
1465 vfsmap = {'': self.svfs,
1467 'plain': self.vfs,}
1466 'plain': self.vfs,}
1468 transaction.rollback(self.svfs, vfsmap, "journal",
1467 transaction.rollback(self.svfs, vfsmap, "journal",
1469 self.ui.warn,
1468 self.ui.warn,
1470 checkambigfiles=_cachedfiles)
1469 checkambigfiles=_cachedfiles)
1471 self.invalidate()
1470 self.invalidate()
1472 return True
1471 return True
1473 else:
1472 else:
1474 self.ui.warn(_("no interrupted transaction available\n"))
1473 self.ui.warn(_("no interrupted transaction available\n"))
1475 return False
1474 return False
1476
1475
1477 def rollback(self, dryrun=False, force=False):
1476 def rollback(self, dryrun=False, force=False):
1478 wlock = lock = dsguard = None
1477 wlock = lock = dsguard = None
1479 try:
1478 try:
1480 wlock = self.wlock()
1479 wlock = self.wlock()
1481 lock = self.lock()
1480 lock = self.lock()
1482 if self.svfs.exists("undo"):
1481 if self.svfs.exists("undo"):
1483 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1482 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1484
1483
1485 return self._rollback(dryrun, force, dsguard)
1484 return self._rollback(dryrun, force, dsguard)
1486 else:
1485 else:
1487 self.ui.warn(_("no rollback information available\n"))
1486 self.ui.warn(_("no rollback information available\n"))
1488 return 1
1487 return 1
1489 finally:
1488 finally:
1490 release(dsguard, lock, wlock)
1489 release(dsguard, lock, wlock)
1491
1490
1492 @unfilteredmethod # Until we get smarter cache management
1491 @unfilteredmethod # Until we get smarter cache management
1493 def _rollback(self, dryrun, force, dsguard):
1492 def _rollback(self, dryrun, force, dsguard):
1494 ui = self.ui
1493 ui = self.ui
1495 try:
1494 try:
1496 args = self.vfs.read('undo.desc').splitlines()
1495 args = self.vfs.read('undo.desc').splitlines()
1497 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1496 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1498 if len(args) >= 3:
1497 if len(args) >= 3:
1499 detail = args[2]
1498 detail = args[2]
1500 oldtip = oldlen - 1
1499 oldtip = oldlen - 1
1501
1500
1502 if detail and ui.verbose:
1501 if detail and ui.verbose:
1503 msg = (_('repository tip rolled back to revision %d'
1502 msg = (_('repository tip rolled back to revision %d'
1504 ' (undo %s: %s)\n')
1503 ' (undo %s: %s)\n')
1505 % (oldtip, desc, detail))
1504 % (oldtip, desc, detail))
1506 else:
1505 else:
1507 msg = (_('repository tip rolled back to revision %d'
1506 msg = (_('repository tip rolled back to revision %d'
1508 ' (undo %s)\n')
1507 ' (undo %s)\n')
1509 % (oldtip, desc))
1508 % (oldtip, desc))
1510 except IOError:
1509 except IOError:
1511 msg = _('rolling back unknown transaction\n')
1510 msg = _('rolling back unknown transaction\n')
1512 desc = None
1511 desc = None
1513
1512
1514 if not force and self['.'] != self['tip'] and desc == 'commit':
1513 if not force and self['.'] != self['tip'] and desc == 'commit':
1515 raise error.Abort(
1514 raise error.Abort(
1516 _('rollback of last commit while not checked out '
1515 _('rollback of last commit while not checked out '
1517 'may lose data'), hint=_('use -f to force'))
1516 'may lose data'), hint=_('use -f to force'))
1518
1517
1519 ui.status(msg)
1518 ui.status(msg)
1520 if dryrun:
1519 if dryrun:
1521 return 0
1520 return 0
1522
1521
1523 parents = self.dirstate.parents()
1522 parents = self.dirstate.parents()
1524 self.destroying()
1523 self.destroying()
1525 vfsmap = {'plain': self.vfs, '': self.svfs}
1524 vfsmap = {'plain': self.vfs, '': self.svfs}
1526 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1525 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1527 checkambigfiles=_cachedfiles)
1526 checkambigfiles=_cachedfiles)
1528 if self.vfs.exists('undo.bookmarks'):
1527 if self.vfs.exists('undo.bookmarks'):
1529 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1528 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1530 if self.svfs.exists('undo.phaseroots'):
1529 if self.svfs.exists('undo.phaseroots'):
1531 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1530 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1532 self.invalidate()
1531 self.invalidate()
1533
1532
1534 parentgone = (parents[0] not in self.changelog.nodemap or
1533 parentgone = (parents[0] not in self.changelog.nodemap or
1535 parents[1] not in self.changelog.nodemap)
1534 parents[1] not in self.changelog.nodemap)
1536 if parentgone:
1535 if parentgone:
1537 # prevent dirstateguard from overwriting already restored one
1536 # prevent dirstateguard from overwriting already restored one
1538 dsguard.close()
1537 dsguard.close()
1539
1538
1540 self.dirstate.restorebackup(None, 'undo.dirstate')
1539 self.dirstate.restorebackup(None, 'undo.dirstate')
1541 try:
1540 try:
1542 branch = self.vfs.read('undo.branch')
1541 branch = self.vfs.read('undo.branch')
1543 self.dirstate.setbranch(encoding.tolocal(branch))
1542 self.dirstate.setbranch(encoding.tolocal(branch))
1544 except IOError:
1543 except IOError:
1545 ui.warn(_('named branch could not be reset: '
1544 ui.warn(_('named branch could not be reset: '
1546 'current branch is still \'%s\'\n')
1545 'current branch is still \'%s\'\n')
1547 % self.dirstate.branch())
1546 % self.dirstate.branch())
1548
1547
1549 parents = tuple([p.rev() for p in self[None].parents()])
1548 parents = tuple([p.rev() for p in self[None].parents()])
1550 if len(parents) > 1:
1549 if len(parents) > 1:
1551 ui.status(_('working directory now based on '
1550 ui.status(_('working directory now based on '
1552 'revisions %d and %d\n') % parents)
1551 'revisions %d and %d\n') % parents)
1553 else:
1552 else:
1554 ui.status(_('working directory now based on '
1553 ui.status(_('working directory now based on '
1555 'revision %d\n') % parents)
1554 'revision %d\n') % parents)
1556 mergemod.mergestate.clean(self, self['.'].node())
1555 mergemod.mergestate.clean(self, self['.'].node())
1557
1556
1558 # TODO: if we know which new heads may result from this rollback, pass
1557 # TODO: if we know which new heads may result from this rollback, pass
1559 # them to destroy(), which will prevent the branchhead cache from being
1558 # them to destroy(), which will prevent the branchhead cache from being
1560 # invalidated.
1559 # invalidated.
1561 self.destroyed()
1560 self.destroyed()
1562 return 0
1561 return 0
1563
1562
1564 def _buildcacheupdater(self, newtransaction):
1563 def _buildcacheupdater(self, newtransaction):
1565 """called during transaction to build the callback updating cache
1564 """called during transaction to build the callback updating cache
1566
1565
1567 Lives on the repository to help extension who might want to augment
1566 Lives on the repository to help extension who might want to augment
1568 this logic. For this purpose, the created transaction is passed to the
1567 this logic. For this purpose, the created transaction is passed to the
1569 method.
1568 method.
1570 """
1569 """
1571 # we must avoid cyclic reference between repo and transaction.
1570 # we must avoid cyclic reference between repo and transaction.
1572 reporef = weakref.ref(self)
1571 reporef = weakref.ref(self)
1573 def updater(tr):
1572 def updater(tr):
1574 repo = reporef()
1573 repo = reporef()
1575 repo.updatecaches(tr)
1574 repo.updatecaches(tr)
1576 return updater
1575 return updater
1577
1576
1578 @unfilteredmethod
1577 @unfilteredmethod
1579 def updatecaches(self, tr=None, full=False):
1578 def updatecaches(self, tr=None, full=False):
1580 """warm appropriate caches
1579 """warm appropriate caches
1581
1580
1582 If this function is called after a transaction closed. The transaction
1581 If this function is called after a transaction closed. The transaction
1583 will be available in the 'tr' argument. This can be used to selectively
1582 will be available in the 'tr' argument. This can be used to selectively
1584 update caches relevant to the changes in that transaction.
1583 update caches relevant to the changes in that transaction.
1585
1584
1586 If 'full' is set, make sure all caches the function knows about have
1585 If 'full' is set, make sure all caches the function knows about have
1587 up-to-date data. Even the ones usually loaded more lazily.
1586 up-to-date data. Even the ones usually loaded more lazily.
1588 """
1587 """
1589 if tr is not None and tr.hookargs.get('source') == 'strip':
1588 if tr is not None and tr.hookargs.get('source') == 'strip':
1590 # During strip, many caches are invalid but
1589 # During strip, many caches are invalid but
1591 # later call to `destroyed` will refresh them.
1590 # later call to `destroyed` will refresh them.
1592 return
1591 return
1593
1592
1594 if tr is None or tr.changes['revs']:
1593 if tr is None or tr.changes['revs']:
1595 # updating the unfiltered branchmap should refresh all the others,
1594 # updating the unfiltered branchmap should refresh all the others,
1596 self.ui.debug('updating the branch cache\n')
1595 self.ui.debug('updating the branch cache\n')
1597 branchmap.updatecache(self.filtered('served'))
1596 branchmap.updatecache(self.filtered('served'))
1598
1597
1599 if full:
1598 if full:
1600 rbc = self.revbranchcache()
1599 rbc = self.revbranchcache()
1601 for r in self.changelog:
1600 for r in self.changelog:
1602 rbc.branchinfo(r)
1601 rbc.branchinfo(r)
1603 rbc.write()
1602 rbc.write()
1604
1603
1605 def invalidatecaches(self):
1604 def invalidatecaches(self):
1606
1605
1607 if '_tagscache' in vars(self):
1606 if '_tagscache' in vars(self):
1608 # can't use delattr on proxy
1607 # can't use delattr on proxy
1609 del self.__dict__['_tagscache']
1608 del self.__dict__['_tagscache']
1610
1609
1611 self.unfiltered()._branchcaches.clear()
1610 self.unfiltered()._branchcaches.clear()
1612 self.invalidatevolatilesets()
1611 self.invalidatevolatilesets()
1613 self._sparsesignaturecache.clear()
1612 self._sparsesignaturecache.clear()
1614
1613
1615 def invalidatevolatilesets(self):
1614 def invalidatevolatilesets(self):
1616 self.filteredrevcache.clear()
1615 self.filteredrevcache.clear()
1617 obsolete.clearobscaches(self)
1616 obsolete.clearobscaches(self)
1618
1617
1619 def invalidatedirstate(self):
1618 def invalidatedirstate(self):
1620 '''Invalidates the dirstate, causing the next call to dirstate
1619 '''Invalidates the dirstate, causing the next call to dirstate
1621 to check if it was modified since the last time it was read,
1620 to check if it was modified since the last time it was read,
1622 rereading it if it has.
1621 rereading it if it has.
1623
1622
1624 This is different to dirstate.invalidate() that it doesn't always
1623 This is different to dirstate.invalidate() that it doesn't always
1625 rereads the dirstate. Use dirstate.invalidate() if you want to
1624 rereads the dirstate. Use dirstate.invalidate() if you want to
1626 explicitly read the dirstate again (i.e. restoring it to a previous
1625 explicitly read the dirstate again (i.e. restoring it to a previous
1627 known good state).'''
1626 known good state).'''
1628 if hasunfilteredcache(self, 'dirstate'):
1627 if hasunfilteredcache(self, 'dirstate'):
1629 for k in self.dirstate._filecache:
1628 for k in self.dirstate._filecache:
1630 try:
1629 try:
1631 delattr(self.dirstate, k)
1630 delattr(self.dirstate, k)
1632 except AttributeError:
1631 except AttributeError:
1633 pass
1632 pass
1634 delattr(self.unfiltered(), 'dirstate')
1633 delattr(self.unfiltered(), 'dirstate')
1635
1634
1636 def invalidate(self, clearfilecache=False):
1635 def invalidate(self, clearfilecache=False):
1637 '''Invalidates both store and non-store parts other than dirstate
1636 '''Invalidates both store and non-store parts other than dirstate
1638
1637
1639 If a transaction is running, invalidation of store is omitted,
1638 If a transaction is running, invalidation of store is omitted,
1640 because discarding in-memory changes might cause inconsistency
1639 because discarding in-memory changes might cause inconsistency
1641 (e.g. incomplete fncache causes unintentional failure, but
1640 (e.g. incomplete fncache causes unintentional failure, but
1642 redundant one doesn't).
1641 redundant one doesn't).
1643 '''
1642 '''
1644 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1643 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1645 for k in list(self._filecache.keys()):
1644 for k in list(self._filecache.keys()):
1646 # dirstate is invalidated separately in invalidatedirstate()
1645 # dirstate is invalidated separately in invalidatedirstate()
1647 if k == 'dirstate':
1646 if k == 'dirstate':
1648 continue
1647 continue
1649 if (k == 'changelog' and
1648 if (k == 'changelog' and
1650 self.currenttransaction() and
1649 self.currenttransaction() and
1651 self.changelog._delayed):
1650 self.changelog._delayed):
1652 # The changelog object may store unwritten revisions. We don't
1651 # The changelog object may store unwritten revisions. We don't
1653 # want to lose them.
1652 # want to lose them.
1654 # TODO: Solve the problem instead of working around it.
1653 # TODO: Solve the problem instead of working around it.
1655 continue
1654 continue
1656
1655
1657 if clearfilecache:
1656 if clearfilecache:
1658 del self._filecache[k]
1657 del self._filecache[k]
1659 try:
1658 try:
1660 delattr(unfiltered, k)
1659 delattr(unfiltered, k)
1661 except AttributeError:
1660 except AttributeError:
1662 pass
1661 pass
1663 self.invalidatecaches()
1662 self.invalidatecaches()
1664 if not self.currenttransaction():
1663 if not self.currenttransaction():
1665 # TODO: Changing contents of store outside transaction
1664 # TODO: Changing contents of store outside transaction
1666 # causes inconsistency. We should make in-memory store
1665 # causes inconsistency. We should make in-memory store
1667 # changes detectable, and abort if changed.
1666 # changes detectable, and abort if changed.
1668 self.store.invalidatecaches()
1667 self.store.invalidatecaches()
1669
1668
1670 def invalidateall(self):
1669 def invalidateall(self):
1671 '''Fully invalidates both store and non-store parts, causing the
1670 '''Fully invalidates both store and non-store parts, causing the
1672 subsequent operation to reread any outside changes.'''
1671 subsequent operation to reread any outside changes.'''
1673 # extension should hook this to invalidate its caches
1672 # extension should hook this to invalidate its caches
1674 self.invalidate()
1673 self.invalidate()
1675 self.invalidatedirstate()
1674 self.invalidatedirstate()
1676
1675
1677 @unfilteredmethod
1676 @unfilteredmethod
1678 def _refreshfilecachestats(self, tr):
1677 def _refreshfilecachestats(self, tr):
1679 """Reload stats of cached files so that they are flagged as valid"""
1678 """Reload stats of cached files so that they are flagged as valid"""
1680 for k, ce in self._filecache.items():
1679 for k, ce in self._filecache.items():
1681 k = pycompat.sysstr(k)
1680 k = pycompat.sysstr(k)
1682 if k == r'dirstate' or k not in self.__dict__:
1681 if k == r'dirstate' or k not in self.__dict__:
1683 continue
1682 continue
1684 ce.refresh()
1683 ce.refresh()
1685
1684
1686 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1685 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1687 inheritchecker=None, parentenvvar=None):
1686 inheritchecker=None, parentenvvar=None):
1688 parentlock = None
1687 parentlock = None
1689 # the contents of parentenvvar are used by the underlying lock to
1688 # the contents of parentenvvar are used by the underlying lock to
1690 # determine whether it can be inherited
1689 # determine whether it can be inherited
1691 if parentenvvar is not None:
1690 if parentenvvar is not None:
1692 parentlock = encoding.environ.get(parentenvvar)
1691 parentlock = encoding.environ.get(parentenvvar)
1693
1692
1694 timeout = 0
1693 timeout = 0
1695 warntimeout = 0
1694 warntimeout = 0
1696 if wait:
1695 if wait:
1697 timeout = self.ui.configint("ui", "timeout")
1696 timeout = self.ui.configint("ui", "timeout")
1698 warntimeout = self.ui.configint("ui", "timeout.warn")
1697 warntimeout = self.ui.configint("ui", "timeout.warn")
1699
1698
1700 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1699 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1701 releasefn=releasefn,
1700 releasefn=releasefn,
1702 acquirefn=acquirefn, desc=desc,
1701 acquirefn=acquirefn, desc=desc,
1703 inheritchecker=inheritchecker,
1702 inheritchecker=inheritchecker,
1704 parentlock=parentlock)
1703 parentlock=parentlock)
1705 return l
1704 return l
1706
1705
1707 def _afterlock(self, callback):
1706 def _afterlock(self, callback):
1708 """add a callback to be run when the repository is fully unlocked
1707 """add a callback to be run when the repository is fully unlocked
1709
1708
1710 The callback will be executed when the outermost lock is released
1709 The callback will be executed when the outermost lock is released
1711 (with wlock being higher level than 'lock')."""
1710 (with wlock being higher level than 'lock')."""
1712 for ref in (self._wlockref, self._lockref):
1711 for ref in (self._wlockref, self._lockref):
1713 l = ref and ref()
1712 l = ref and ref()
1714 if l and l.held:
1713 if l and l.held:
1715 l.postrelease.append(callback)
1714 l.postrelease.append(callback)
1716 break
1715 break
1717 else: # no lock have been found.
1716 else: # no lock have been found.
1718 callback()
1717 callback()
1719
1718
1720 def lock(self, wait=True):
1719 def lock(self, wait=True):
1721 '''Lock the repository store (.hg/store) and return a weak reference
1720 '''Lock the repository store (.hg/store) and return a weak reference
1722 to the lock. Use this before modifying the store (e.g. committing or
1721 to the lock. Use this before modifying the store (e.g. committing or
1723 stripping). If you are opening a transaction, get a lock as well.)
1722 stripping). If you are opening a transaction, get a lock as well.)
1724
1723
1725 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1726 'wlock' first to avoid a dead-lock hazard.'''
1725 'wlock' first to avoid a dead-lock hazard.'''
1727 l = self._currentlock(self._lockref)
1726 l = self._currentlock(self._lockref)
1728 if l is not None:
1727 if l is not None:
1729 l.lock()
1728 l.lock()
1730 return l
1729 return l
1731
1730
1732 l = self._lock(self.svfs, "lock", wait, None,
1731 l = self._lock(self.svfs, "lock", wait, None,
1733 self.invalidate, _('repository %s') % self.origroot)
1732 self.invalidate, _('repository %s') % self.origroot)
1734 self._lockref = weakref.ref(l)
1733 self._lockref = weakref.ref(l)
1735 return l
1734 return l
1736
1735
1737 def _wlockchecktransaction(self):
1736 def _wlockchecktransaction(self):
1738 if self.currenttransaction() is not None:
1737 if self.currenttransaction() is not None:
1739 raise error.LockInheritanceContractViolation(
1738 raise error.LockInheritanceContractViolation(
1740 'wlock cannot be inherited in the middle of a transaction')
1739 'wlock cannot be inherited in the middle of a transaction')
1741
1740
1742 def wlock(self, wait=True):
1741 def wlock(self, wait=True):
1743 '''Lock the non-store parts of the repository (everything under
1742 '''Lock the non-store parts of the repository (everything under
1744 .hg except .hg/store) and return a weak reference to the lock.
1743 .hg except .hg/store) and return a weak reference to the lock.
1745
1744
1746 Use this before modifying files in .hg.
1745 Use this before modifying files in .hg.
1747
1746
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1747 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 'wlock' first to avoid a dead-lock hazard.'''
1748 'wlock' first to avoid a dead-lock hazard.'''
1750 l = self._wlockref and self._wlockref()
1749 l = self._wlockref and self._wlockref()
1751 if l is not None and l.held:
1750 if l is not None and l.held:
1752 l.lock()
1751 l.lock()
1753 return l
1752 return l
1754
1753
1755 # We do not need to check for non-waiting lock acquisition. Such
1754 # We do not need to check for non-waiting lock acquisition. Such
1756 # acquisition would not cause dead-lock as they would just fail.
1755 # acquisition would not cause dead-lock as they would just fail.
1757 if wait and (self.ui.configbool('devel', 'all-warnings')
1756 if wait and (self.ui.configbool('devel', 'all-warnings')
1758 or self.ui.configbool('devel', 'check-locks')):
1757 or self.ui.configbool('devel', 'check-locks')):
1759 if self._currentlock(self._lockref) is not None:
1758 if self._currentlock(self._lockref) is not None:
1760 self.ui.develwarn('"wlock" acquired after "lock"')
1759 self.ui.develwarn('"wlock" acquired after "lock"')
1761
1760
1762 def unlock():
1761 def unlock():
1763 if self.dirstate.pendingparentchange():
1762 if self.dirstate.pendingparentchange():
1764 self.dirstate.invalidate()
1763 self.dirstate.invalidate()
1765 else:
1764 else:
1766 self.dirstate.write(None)
1765 self.dirstate.write(None)
1767
1766
1768 self._filecache['dirstate'].refresh()
1767 self._filecache['dirstate'].refresh()
1769
1768
1770 l = self._lock(self.vfs, "wlock", wait, unlock,
1769 l = self._lock(self.vfs, "wlock", wait, unlock,
1771 self.invalidatedirstate, _('working directory of %s') %
1770 self.invalidatedirstate, _('working directory of %s') %
1772 self.origroot,
1771 self.origroot,
1773 inheritchecker=self._wlockchecktransaction,
1772 inheritchecker=self._wlockchecktransaction,
1774 parentenvvar='HG_WLOCK_LOCKER')
1773 parentenvvar='HG_WLOCK_LOCKER')
1775 self._wlockref = weakref.ref(l)
1774 self._wlockref = weakref.ref(l)
1776 return l
1775 return l
1777
1776
1778 def _currentlock(self, lockref):
1777 def _currentlock(self, lockref):
1779 """Returns the lock if it's held, or None if it's not."""
1778 """Returns the lock if it's held, or None if it's not."""
1780 if lockref is None:
1779 if lockref is None:
1781 return None
1780 return None
1782 l = lockref()
1781 l = lockref()
1783 if l is None or not l.held:
1782 if l is None or not l.held:
1784 return None
1783 return None
1785 return l
1784 return l
1786
1785
1787 def currentwlock(self):
1786 def currentwlock(self):
1788 """Returns the wlock if it's held, or None if it's not."""
1787 """Returns the wlock if it's held, or None if it's not."""
1789 return self._currentlock(self._wlockref)
1788 return self._currentlock(self._wlockref)
1790
1789
1791 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1790 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1792 """
1791 """
1793 commit an individual file as part of a larger transaction
1792 commit an individual file as part of a larger transaction
1794 """
1793 """
1795
1794
1796 fname = fctx.path()
1795 fname = fctx.path()
1797 fparent1 = manifest1.get(fname, nullid)
1796 fparent1 = manifest1.get(fname, nullid)
1798 fparent2 = manifest2.get(fname, nullid)
1797 fparent2 = manifest2.get(fname, nullid)
1799 if isinstance(fctx, context.filectx):
1798 if isinstance(fctx, context.filectx):
1800 node = fctx.filenode()
1799 node = fctx.filenode()
1801 if node in [fparent1, fparent2]:
1800 if node in [fparent1, fparent2]:
1802 self.ui.debug('reusing %s filelog entry\n' % fname)
1801 self.ui.debug('reusing %s filelog entry\n' % fname)
1803 if manifest1.flags(fname) != fctx.flags():
1802 if manifest1.flags(fname) != fctx.flags():
1804 changelist.append(fname)
1803 changelist.append(fname)
1805 return node
1804 return node
1806
1805
1807 flog = self.file(fname)
1806 flog = self.file(fname)
1808 meta = {}
1807 meta = {}
1809 copy = fctx.renamed()
1808 copy = fctx.renamed()
1810 if copy and copy[0] != fname:
1809 if copy and copy[0] != fname:
1811 # Mark the new revision of this file as a copy of another
1810 # Mark the new revision of this file as a copy of another
1812 # file. This copy data will effectively act as a parent
1811 # file. This copy data will effectively act as a parent
1813 # of this new revision. If this is a merge, the first
1812 # of this new revision. If this is a merge, the first
1814 # parent will be the nullid (meaning "look up the copy data")
1813 # parent will be the nullid (meaning "look up the copy data")
1815 # and the second one will be the other parent. For example:
1814 # and the second one will be the other parent. For example:
1816 #
1815 #
1817 # 0 --- 1 --- 3 rev1 changes file foo
1816 # 0 --- 1 --- 3 rev1 changes file foo
1818 # \ / rev2 renames foo to bar and changes it
1817 # \ / rev2 renames foo to bar and changes it
1819 # \- 2 -/ rev3 should have bar with all changes and
1818 # \- 2 -/ rev3 should have bar with all changes and
1820 # should record that bar descends from
1819 # should record that bar descends from
1821 # bar in rev2 and foo in rev1
1820 # bar in rev2 and foo in rev1
1822 #
1821 #
1823 # this allows this merge to succeed:
1822 # this allows this merge to succeed:
1824 #
1823 #
1825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1824 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1826 # \ / merging rev3 and rev4 should use bar@rev2
1825 # \ / merging rev3 and rev4 should use bar@rev2
1827 # \- 2 --- 4 as the merge base
1826 # \- 2 --- 4 as the merge base
1828 #
1827 #
1829
1828
1830 cfname = copy[0]
1829 cfname = copy[0]
1831 crev = manifest1.get(cfname)
1830 crev = manifest1.get(cfname)
1832 newfparent = fparent2
1831 newfparent = fparent2
1833
1832
1834 if manifest2: # branch merge
1833 if manifest2: # branch merge
1835 if fparent2 == nullid or crev is None: # copied on remote side
1834 if fparent2 == nullid or crev is None: # copied on remote side
1836 if cfname in manifest2:
1835 if cfname in manifest2:
1837 crev = manifest2[cfname]
1836 crev = manifest2[cfname]
1838 newfparent = fparent1
1837 newfparent = fparent1
1839
1838
1840 # Here, we used to search backwards through history to try to find
1839 # Here, we used to search backwards through history to try to find
1841 # where the file copy came from if the source of a copy was not in
1840 # where the file copy came from if the source of a copy was not in
1842 # the parent directory. However, this doesn't actually make sense to
1841 # the parent directory. However, this doesn't actually make sense to
1843 # do (what does a copy from something not in your working copy even
1842 # do (what does a copy from something not in your working copy even
1844 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1843 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1845 # the user that copy information was dropped, so if they didn't
1844 # the user that copy information was dropped, so if they didn't
1846 # expect this outcome it can be fixed, but this is the correct
1845 # expect this outcome it can be fixed, but this is the correct
1847 # behavior in this circumstance.
1846 # behavior in this circumstance.
1848
1847
1849 if crev:
1848 if crev:
1850 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1849 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1851 meta["copy"] = cfname
1850 meta["copy"] = cfname
1852 meta["copyrev"] = hex(crev)
1851 meta["copyrev"] = hex(crev)
1853 fparent1, fparent2 = nullid, newfparent
1852 fparent1, fparent2 = nullid, newfparent
1854 else:
1853 else:
1855 self.ui.warn(_("warning: can't find ancestor for '%s' "
1854 self.ui.warn(_("warning: can't find ancestor for '%s' "
1856 "copied from '%s'!\n") % (fname, cfname))
1855 "copied from '%s'!\n") % (fname, cfname))
1857
1856
1858 elif fparent1 == nullid:
1857 elif fparent1 == nullid:
1859 fparent1, fparent2 = fparent2, nullid
1858 fparent1, fparent2 = fparent2, nullid
1860 elif fparent2 != nullid:
1859 elif fparent2 != nullid:
1861 # is one parent an ancestor of the other?
1860 # is one parent an ancestor of the other?
1862 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1861 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1863 if fparent1 in fparentancestors:
1862 if fparent1 in fparentancestors:
1864 fparent1, fparent2 = fparent2, nullid
1863 fparent1, fparent2 = fparent2, nullid
1865 elif fparent2 in fparentancestors:
1864 elif fparent2 in fparentancestors:
1866 fparent2 = nullid
1865 fparent2 = nullid
1867
1866
1868 # is the file changed?
1867 # is the file changed?
1869 text = fctx.data()
1868 text = fctx.data()
1870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1869 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1871 changelist.append(fname)
1870 changelist.append(fname)
1872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1871 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1873 # are just the flags changed during merge?
1872 # are just the flags changed during merge?
1874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1873 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1875 changelist.append(fname)
1874 changelist.append(fname)
1876
1875
1877 return fparent1
1876 return fparent1
1878
1877
1879 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1878 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1880 """check for commit arguments that aren't committable"""
1879 """check for commit arguments that aren't committable"""
1881 if match.isexact() or match.prefix():
1880 if match.isexact() or match.prefix():
1882 matched = set(status.modified + status.added + status.removed)
1881 matched = set(status.modified + status.added + status.removed)
1883
1882
1884 for f in match.files():
1883 for f in match.files():
1885 f = self.dirstate.normalize(f)
1884 f = self.dirstate.normalize(f)
1886 if f == '.' or f in matched or f in wctx.substate:
1885 if f == '.' or f in matched or f in wctx.substate:
1887 continue
1886 continue
1888 if f in status.deleted:
1887 if f in status.deleted:
1889 fail(f, _('file not found!'))
1888 fail(f, _('file not found!'))
1890 if f in vdirs: # visited directory
1889 if f in vdirs: # visited directory
1891 d = f + '/'
1890 d = f + '/'
1892 for mf in matched:
1891 for mf in matched:
1893 if mf.startswith(d):
1892 if mf.startswith(d):
1894 break
1893 break
1895 else:
1894 else:
1896 fail(f, _("no match under directory!"))
1895 fail(f, _("no match under directory!"))
1897 elif f not in self.dirstate:
1896 elif f not in self.dirstate:
1898 fail(f, _("file not tracked!"))
1897 fail(f, _("file not tracked!"))
1899
1898
1900 @unfilteredmethod
1899 @unfilteredmethod
1901 def commit(self, text="", user=None, date=None, match=None, force=False,
1900 def commit(self, text="", user=None, date=None, match=None, force=False,
1902 editor=False, extra=None):
1901 editor=False, extra=None):
1903 """Add a new revision to current repository.
1902 """Add a new revision to current repository.
1904
1903
1905 Revision information is gathered from the working directory,
1904 Revision information is gathered from the working directory,
1906 match can be used to filter the committed files. If editor is
1905 match can be used to filter the committed files. If editor is
1907 supplied, it is called to get a commit message.
1906 supplied, it is called to get a commit message.
1908 """
1907 """
1909 if extra is None:
1908 if extra is None:
1910 extra = {}
1909 extra = {}
1911
1910
1912 def fail(f, msg):
1911 def fail(f, msg):
1913 raise error.Abort('%s: %s' % (f, msg))
1912 raise error.Abort('%s: %s' % (f, msg))
1914
1913
1915 if not match:
1914 if not match:
1916 match = matchmod.always(self.root, '')
1915 match = matchmod.always(self.root, '')
1917
1916
1918 if not force:
1917 if not force:
1919 vdirs = []
1918 vdirs = []
1920 match.explicitdir = vdirs.append
1919 match.explicitdir = vdirs.append
1921 match.bad = fail
1920 match.bad = fail
1922
1921
1923 wlock = lock = tr = None
1922 wlock = lock = tr = None
1924 try:
1923 try:
1925 wlock = self.wlock()
1924 wlock = self.wlock()
1926 lock = self.lock() # for recent changelog (see issue4368)
1925 lock = self.lock() # for recent changelog (see issue4368)
1927
1926
1928 wctx = self[None]
1927 wctx = self[None]
1929 merge = len(wctx.parents()) > 1
1928 merge = len(wctx.parents()) > 1
1930
1929
1931 if not force and merge and not match.always():
1930 if not force and merge and not match.always():
1932 raise error.Abort(_('cannot partially commit a merge '
1931 raise error.Abort(_('cannot partially commit a merge '
1933 '(do not specify files or patterns)'))
1932 '(do not specify files or patterns)'))
1934
1933
1935 status = self.status(match=match, clean=force)
1934 status = self.status(match=match, clean=force)
1936 if force:
1935 if force:
1937 status.modified.extend(status.clean) # mq may commit clean files
1936 status.modified.extend(status.clean) # mq may commit clean files
1938
1937
1939 # check subrepos
1938 # check subrepos
1940 subs, commitsubs, newstate = subrepoutil.precommit(
1939 subs, commitsubs, newstate = subrepoutil.precommit(
1941 self.ui, wctx, status, match, force=force)
1940 self.ui, wctx, status, match, force=force)
1942
1941
1943 # make sure all explicit patterns are matched
1942 # make sure all explicit patterns are matched
1944 if not force:
1943 if not force:
1945 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1944 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1946
1945
1947 cctx = context.workingcommitctx(self, status,
1946 cctx = context.workingcommitctx(self, status,
1948 text, user, date, extra)
1947 text, user, date, extra)
1949
1948
1950 # internal config: ui.allowemptycommit
1949 # internal config: ui.allowemptycommit
1951 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1950 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1952 or extra.get('close') or merge or cctx.files()
1951 or extra.get('close') or merge or cctx.files()
1953 or self.ui.configbool('ui', 'allowemptycommit'))
1952 or self.ui.configbool('ui', 'allowemptycommit'))
1954 if not allowemptycommit:
1953 if not allowemptycommit:
1955 return None
1954 return None
1956
1955
1957 if merge and cctx.deleted():
1956 if merge and cctx.deleted():
1958 raise error.Abort(_("cannot commit merge with missing files"))
1957 raise error.Abort(_("cannot commit merge with missing files"))
1959
1958
1960 ms = mergemod.mergestate.read(self)
1959 ms = mergemod.mergestate.read(self)
1961 mergeutil.checkunresolved(ms)
1960 mergeutil.checkunresolved(ms)
1962
1961
1963 if editor:
1962 if editor:
1964 cctx._text = editor(self, cctx, subs)
1963 cctx._text = editor(self, cctx, subs)
1965 edited = (text != cctx._text)
1964 edited = (text != cctx._text)
1966
1965
1967 # Save commit message in case this transaction gets rolled back
1966 # Save commit message in case this transaction gets rolled back
1968 # (e.g. by a pretxncommit hook). Leave the content alone on
1967 # (e.g. by a pretxncommit hook). Leave the content alone on
1969 # the assumption that the user will use the same editor again.
1968 # the assumption that the user will use the same editor again.
1970 msgfn = self.savecommitmessage(cctx._text)
1969 msgfn = self.savecommitmessage(cctx._text)
1971
1970
1972 # commit subs and write new state
1971 # commit subs and write new state
1973 if subs:
1972 if subs:
1974 for s in sorted(commitsubs):
1973 for s in sorted(commitsubs):
1975 sub = wctx.sub(s)
1974 sub = wctx.sub(s)
1976 self.ui.status(_('committing subrepository %s\n') %
1975 self.ui.status(_('committing subrepository %s\n') %
1977 subrepoutil.subrelpath(sub))
1976 subrepoutil.subrelpath(sub))
1978 sr = sub.commit(cctx._text, user, date)
1977 sr = sub.commit(cctx._text, user, date)
1979 newstate[s] = (newstate[s][0], sr)
1978 newstate[s] = (newstate[s][0], sr)
1980 subrepoutil.writestate(self, newstate)
1979 subrepoutil.writestate(self, newstate)
1981
1980
1982 p1, p2 = self.dirstate.parents()
1981 p1, p2 = self.dirstate.parents()
1983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1982 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1984 try:
1983 try:
1985 self.hook("precommit", throw=True, parent1=hookp1,
1984 self.hook("precommit", throw=True, parent1=hookp1,
1986 parent2=hookp2)
1985 parent2=hookp2)
1987 tr = self.transaction('commit')
1986 tr = self.transaction('commit')
1988 ret = self.commitctx(cctx, True)
1987 ret = self.commitctx(cctx, True)
1989 except: # re-raises
1988 except: # re-raises
1990 if edited:
1989 if edited:
1991 self.ui.write(
1990 self.ui.write(
1992 _('note: commit message saved in %s\n') % msgfn)
1991 _('note: commit message saved in %s\n') % msgfn)
1993 raise
1992 raise
1994 # update bookmarks, dirstate and mergestate
1993 # update bookmarks, dirstate and mergestate
1995 bookmarks.update(self, [p1, p2], ret)
1994 bookmarks.update(self, [p1, p2], ret)
1996 cctx.markcommitted(ret)
1995 cctx.markcommitted(ret)
1997 ms.reset()
1996 ms.reset()
1998 tr.close()
1997 tr.close()
1999
1998
2000 finally:
1999 finally:
2001 lockmod.release(tr, lock, wlock)
2000 lockmod.release(tr, lock, wlock)
2002
2001
2003 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2002 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2004 # hack for command that use a temporary commit (eg: histedit)
2003 # hack for command that use a temporary commit (eg: histedit)
2005 # temporary commit got stripped before hook release
2004 # temporary commit got stripped before hook release
2006 if self.changelog.hasnode(ret):
2005 if self.changelog.hasnode(ret):
2007 self.hook("commit", node=node, parent1=parent1,
2006 self.hook("commit", node=node, parent1=parent1,
2008 parent2=parent2)
2007 parent2=parent2)
2009 self._afterlock(commithook)
2008 self._afterlock(commithook)
2010 return ret
2009 return ret
2011
2010
2012 @unfilteredmethod
2011 @unfilteredmethod
2013 def commitctx(self, ctx, error=False):
2012 def commitctx(self, ctx, error=False):
2014 """Add a new revision to current repository.
2013 """Add a new revision to current repository.
2015 Revision information is passed via the context argument.
2014 Revision information is passed via the context argument.
2016 """
2015 """
2017
2016
2018 tr = None
2017 tr = None
2019 p1, p2 = ctx.p1(), ctx.p2()
2018 p1, p2 = ctx.p1(), ctx.p2()
2020 user = ctx.user()
2019 user = ctx.user()
2021
2020
2022 lock = self.lock()
2021 lock = self.lock()
2023 try:
2022 try:
2024 tr = self.transaction("commit")
2023 tr = self.transaction("commit")
2025 trp = weakref.proxy(tr)
2024 trp = weakref.proxy(tr)
2026
2025
2027 if ctx.manifestnode():
2026 if ctx.manifestnode():
2028 # reuse an existing manifest revision
2027 # reuse an existing manifest revision
2029 mn = ctx.manifestnode()
2028 mn = ctx.manifestnode()
2030 files = ctx.files()
2029 files = ctx.files()
2031 elif ctx.files():
2030 elif ctx.files():
2032 m1ctx = p1.manifestctx()
2031 m1ctx = p1.manifestctx()
2033 m2ctx = p2.manifestctx()
2032 m2ctx = p2.manifestctx()
2034 mctx = m1ctx.copy()
2033 mctx = m1ctx.copy()
2035
2034
2036 m = mctx.read()
2035 m = mctx.read()
2037 m1 = m1ctx.read()
2036 m1 = m1ctx.read()
2038 m2 = m2ctx.read()
2037 m2 = m2ctx.read()
2039
2038
2040 # check in files
2039 # check in files
2041 added = []
2040 added = []
2042 changed = []
2041 changed = []
2043 removed = list(ctx.removed())
2042 removed = list(ctx.removed())
2044 linkrev = len(self)
2043 linkrev = len(self)
2045 self.ui.note(_("committing files:\n"))
2044 self.ui.note(_("committing files:\n"))
2046 for f in sorted(ctx.modified() + ctx.added()):
2045 for f in sorted(ctx.modified() + ctx.added()):
2047 self.ui.note(f + "\n")
2046 self.ui.note(f + "\n")
2048 try:
2047 try:
2049 fctx = ctx[f]
2048 fctx = ctx[f]
2050 if fctx is None:
2049 if fctx is None:
2051 removed.append(f)
2050 removed.append(f)
2052 else:
2051 else:
2053 added.append(f)
2052 added.append(f)
2054 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2053 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2055 trp, changed)
2054 trp, changed)
2056 m.setflag(f, fctx.flags())
2055 m.setflag(f, fctx.flags())
2057 except OSError as inst:
2056 except OSError as inst:
2058 self.ui.warn(_("trouble committing %s!\n") % f)
2057 self.ui.warn(_("trouble committing %s!\n") % f)
2059 raise
2058 raise
2060 except IOError as inst:
2059 except IOError as inst:
2061 errcode = getattr(inst, 'errno', errno.ENOENT)
2060 errcode = getattr(inst, 'errno', errno.ENOENT)
2062 if error or errcode and errcode != errno.ENOENT:
2061 if error or errcode and errcode != errno.ENOENT:
2063 self.ui.warn(_("trouble committing %s!\n") % f)
2062 self.ui.warn(_("trouble committing %s!\n") % f)
2064 raise
2063 raise
2065
2064
2066 # update manifest
2065 # update manifest
2067 self.ui.note(_("committing manifest\n"))
2066 self.ui.note(_("committing manifest\n"))
2068 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2067 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2069 drop = [f for f in removed if f in m]
2068 drop = [f for f in removed if f in m]
2070 for f in drop:
2069 for f in drop:
2071 del m[f]
2070 del m[f]
2072 mn = mctx.write(trp, linkrev,
2071 mn = mctx.write(trp, linkrev,
2073 p1.manifestnode(), p2.manifestnode(),
2072 p1.manifestnode(), p2.manifestnode(),
2074 added, drop)
2073 added, drop)
2075 files = changed + removed
2074 files = changed + removed
2076 else:
2075 else:
2077 mn = p1.manifestnode()
2076 mn = p1.manifestnode()
2078 files = []
2077 files = []
2079
2078
2080 # update changelog
2079 # update changelog
2081 self.ui.note(_("committing changelog\n"))
2080 self.ui.note(_("committing changelog\n"))
2082 self.changelog.delayupdate(tr)
2081 self.changelog.delayupdate(tr)
2083 n = self.changelog.add(mn, files, ctx.description(),
2082 n = self.changelog.add(mn, files, ctx.description(),
2084 trp, p1.node(), p2.node(),
2083 trp, p1.node(), p2.node(),
2085 user, ctx.date(), ctx.extra().copy())
2084 user, ctx.date(), ctx.extra().copy())
2086 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2085 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2087 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2086 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2088 parent2=xp2)
2087 parent2=xp2)
2089 # set the new commit is proper phase
2088 # set the new commit is proper phase
2090 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2089 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2091 if targetphase:
2090 if targetphase:
2092 # retract boundary do not alter parent changeset.
2091 # retract boundary do not alter parent changeset.
2093 # if a parent have higher the resulting phase will
2092 # if a parent have higher the resulting phase will
2094 # be compliant anyway
2093 # be compliant anyway
2095 #
2094 #
2096 # if minimal phase was 0 we don't need to retract anything
2095 # if minimal phase was 0 we don't need to retract anything
2097 phases.registernew(self, tr, targetphase, [n])
2096 phases.registernew(self, tr, targetphase, [n])
2098 tr.close()
2097 tr.close()
2099 return n
2098 return n
2100 finally:
2099 finally:
2101 if tr:
2100 if tr:
2102 tr.release()
2101 tr.release()
2103 lock.release()
2102 lock.release()
2104
2103
2105 @unfilteredmethod
2104 @unfilteredmethod
2106 def destroying(self):
2105 def destroying(self):
2107 '''Inform the repository that nodes are about to be destroyed.
2106 '''Inform the repository that nodes are about to be destroyed.
2108 Intended for use by strip and rollback, so there's a common
2107 Intended for use by strip and rollback, so there's a common
2109 place for anything that has to be done before destroying history.
2108 place for anything that has to be done before destroying history.
2110
2109
2111 This is mostly useful for saving state that is in memory and waiting
2110 This is mostly useful for saving state that is in memory and waiting
2112 to be flushed when the current lock is released. Because a call to
2111 to be flushed when the current lock is released. Because a call to
2113 destroyed is imminent, the repo will be invalidated causing those
2112 destroyed is imminent, the repo will be invalidated causing those
2114 changes to stay in memory (waiting for the next unlock), or vanish
2113 changes to stay in memory (waiting for the next unlock), or vanish
2115 completely.
2114 completely.
2116 '''
2115 '''
2117 # When using the same lock to commit and strip, the phasecache is left
2116 # When using the same lock to commit and strip, the phasecache is left
2118 # dirty after committing. Then when we strip, the repo is invalidated,
2117 # dirty after committing. Then when we strip, the repo is invalidated,
2119 # causing those changes to disappear.
2118 # causing those changes to disappear.
2120 if '_phasecache' in vars(self):
2119 if '_phasecache' in vars(self):
2121 self._phasecache.write()
2120 self._phasecache.write()
2122
2121
2123 @unfilteredmethod
2122 @unfilteredmethod
2124 def destroyed(self):
2123 def destroyed(self):
2125 '''Inform the repository that nodes have been destroyed.
2124 '''Inform the repository that nodes have been destroyed.
2126 Intended for use by strip and rollback, so there's a common
2125 Intended for use by strip and rollback, so there's a common
2127 place for anything that has to be done after destroying history.
2126 place for anything that has to be done after destroying history.
2128 '''
2127 '''
2129 # When one tries to:
2128 # When one tries to:
2130 # 1) destroy nodes thus calling this method (e.g. strip)
2129 # 1) destroy nodes thus calling this method (e.g. strip)
2131 # 2) use phasecache somewhere (e.g. commit)
2130 # 2) use phasecache somewhere (e.g. commit)
2132 #
2131 #
2133 # then 2) will fail because the phasecache contains nodes that were
2132 # then 2) will fail because the phasecache contains nodes that were
2134 # removed. We can either remove phasecache from the filecache,
2133 # removed. We can either remove phasecache from the filecache,
2135 # causing it to reload next time it is accessed, or simply filter
2134 # causing it to reload next time it is accessed, or simply filter
2136 # the removed nodes now and write the updated cache.
2135 # the removed nodes now and write the updated cache.
2137 self._phasecache.filterunknown(self)
2136 self._phasecache.filterunknown(self)
2138 self._phasecache.write()
2137 self._phasecache.write()
2139
2138
2140 # refresh all repository caches
2139 # refresh all repository caches
2141 self.updatecaches()
2140 self.updatecaches()
2142
2141
2143 # Ensure the persistent tag cache is updated. Doing it now
2142 # Ensure the persistent tag cache is updated. Doing it now
2144 # means that the tag cache only has to worry about destroyed
2143 # means that the tag cache only has to worry about destroyed
2145 # heads immediately after a strip/rollback. That in turn
2144 # heads immediately after a strip/rollback. That in turn
2146 # guarantees that "cachetip == currenttip" (comparing both rev
2145 # guarantees that "cachetip == currenttip" (comparing both rev
2147 # and node) always means no nodes have been added or destroyed.
2146 # and node) always means no nodes have been added or destroyed.
2148
2147
2149 # XXX this is suboptimal when qrefresh'ing: we strip the current
2148 # XXX this is suboptimal when qrefresh'ing: we strip the current
2150 # head, refresh the tag cache, then immediately add a new head.
2149 # head, refresh the tag cache, then immediately add a new head.
2151 # But I think doing it this way is necessary for the "instant
2150 # But I think doing it this way is necessary for the "instant
2152 # tag cache retrieval" case to work.
2151 # tag cache retrieval" case to work.
2153 self.invalidate()
2152 self.invalidate()
2154
2153
2155 def status(self, node1='.', node2=None, match=None,
2154 def status(self, node1='.', node2=None, match=None,
2156 ignored=False, clean=False, unknown=False,
2155 ignored=False, clean=False, unknown=False,
2157 listsubrepos=False):
2156 listsubrepos=False):
2158 '''a convenience method that calls node1.status(node2)'''
2157 '''a convenience method that calls node1.status(node2)'''
2159 return self[node1].status(node2, match, ignored, clean, unknown,
2158 return self[node1].status(node2, match, ignored, clean, unknown,
2160 listsubrepos)
2159 listsubrepos)
2161
2160
2162 def addpostdsstatus(self, ps):
2161 def addpostdsstatus(self, ps):
2163 """Add a callback to run within the wlock, at the point at which status
2162 """Add a callback to run within the wlock, at the point at which status
2164 fixups happen.
2163 fixups happen.
2165
2164
2166 On status completion, callback(wctx, status) will be called with the
2165 On status completion, callback(wctx, status) will be called with the
2167 wlock held, unless the dirstate has changed from underneath or the wlock
2166 wlock held, unless the dirstate has changed from underneath or the wlock
2168 couldn't be grabbed.
2167 couldn't be grabbed.
2169
2168
2170 Callbacks should not capture and use a cached copy of the dirstate --
2169 Callbacks should not capture and use a cached copy of the dirstate --
2171 it might change in the meanwhile. Instead, they should access the
2170 it might change in the meanwhile. Instead, they should access the
2172 dirstate via wctx.repo().dirstate.
2171 dirstate via wctx.repo().dirstate.
2173
2172
2174 This list is emptied out after each status run -- extensions should
2173 This list is emptied out after each status run -- extensions should
2175 make sure it adds to this list each time dirstate.status is called.
2174 make sure it adds to this list each time dirstate.status is called.
2176 Extensions should also make sure they don't call this for statuses
2175 Extensions should also make sure they don't call this for statuses
2177 that don't involve the dirstate.
2176 that don't involve the dirstate.
2178 """
2177 """
2179
2178
2180 # The list is located here for uniqueness reasons -- it is actually
2179 # The list is located here for uniqueness reasons -- it is actually
2181 # managed by the workingctx, but that isn't unique per-repo.
2180 # managed by the workingctx, but that isn't unique per-repo.
2182 self._postdsstatus.append(ps)
2181 self._postdsstatus.append(ps)
2183
2182
2184 def postdsstatus(self):
2183 def postdsstatus(self):
2185 """Used by workingctx to get the list of post-dirstate-status hooks."""
2184 """Used by workingctx to get the list of post-dirstate-status hooks."""
2186 return self._postdsstatus
2185 return self._postdsstatus
2187
2186
2188 def clearpostdsstatus(self):
2187 def clearpostdsstatus(self):
2189 """Used by workingctx to clear post-dirstate-status hooks."""
2188 """Used by workingctx to clear post-dirstate-status hooks."""
2190 del self._postdsstatus[:]
2189 del self._postdsstatus[:]
2191
2190
2192 def heads(self, start=None):
2191 def heads(self, start=None):
2193 if start is None:
2192 if start is None:
2194 cl = self.changelog
2193 cl = self.changelog
2195 headrevs = reversed(cl.headrevs())
2194 headrevs = reversed(cl.headrevs())
2196 return [cl.node(rev) for rev in headrevs]
2195 return [cl.node(rev) for rev in headrevs]
2197
2196
2198 heads = self.changelog.heads(start)
2197 heads = self.changelog.heads(start)
2199 # sort the output in rev descending order
2198 # sort the output in rev descending order
2200 return sorted(heads, key=self.changelog.rev, reverse=True)
2199 return sorted(heads, key=self.changelog.rev, reverse=True)
2201
2200
2202 def branchheads(self, branch=None, start=None, closed=False):
2201 def branchheads(self, branch=None, start=None, closed=False):
2203 '''return a (possibly filtered) list of heads for the given branch
2202 '''return a (possibly filtered) list of heads for the given branch
2204
2203
2205 Heads are returned in topological order, from newest to oldest.
2204 Heads are returned in topological order, from newest to oldest.
2206 If branch is None, use the dirstate branch.
2205 If branch is None, use the dirstate branch.
2207 If start is not None, return only heads reachable from start.
2206 If start is not None, return only heads reachable from start.
2208 If closed is True, return heads that are marked as closed as well.
2207 If closed is True, return heads that are marked as closed as well.
2209 '''
2208 '''
2210 if branch is None:
2209 if branch is None:
2211 branch = self[None].branch()
2210 branch = self[None].branch()
2212 branches = self.branchmap()
2211 branches = self.branchmap()
2213 if branch not in branches:
2212 if branch not in branches:
2214 return []
2213 return []
2215 # the cache returns heads ordered lowest to highest
2214 # the cache returns heads ordered lowest to highest
2216 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2215 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2217 if start is not None:
2216 if start is not None:
2218 # filter out the heads that cannot be reached from startrev
2217 # filter out the heads that cannot be reached from startrev
2219 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2218 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2220 bheads = [h for h in bheads if h in fbheads]
2219 bheads = [h for h in bheads if h in fbheads]
2221 return bheads
2220 return bheads
2222
2221
2223 def branches(self, nodes):
2222 def branches(self, nodes):
2224 if not nodes:
2223 if not nodes:
2225 nodes = [self.changelog.tip()]
2224 nodes = [self.changelog.tip()]
2226 b = []
2225 b = []
2227 for n in nodes:
2226 for n in nodes:
2228 t = n
2227 t = n
2229 while True:
2228 while True:
2230 p = self.changelog.parents(n)
2229 p = self.changelog.parents(n)
2231 if p[1] != nullid or p[0] == nullid:
2230 if p[1] != nullid or p[0] == nullid:
2232 b.append((t, n, p[0], p[1]))
2231 b.append((t, n, p[0], p[1]))
2233 break
2232 break
2234 n = p[0]
2233 n = p[0]
2235 return b
2234 return b
2236
2235
2237 def between(self, pairs):
2236 def between(self, pairs):
2238 r = []
2237 r = []
2239
2238
2240 for top, bottom in pairs:
2239 for top, bottom in pairs:
2241 n, l, i = top, [], 0
2240 n, l, i = top, [], 0
2242 f = 1
2241 f = 1
2243
2242
2244 while n != bottom and n != nullid:
2243 while n != bottom and n != nullid:
2245 p = self.changelog.parents(n)[0]
2244 p = self.changelog.parents(n)[0]
2246 if i == f:
2245 if i == f:
2247 l.append(n)
2246 l.append(n)
2248 f = f * 2
2247 f = f * 2
2249 n = p
2248 n = p
2250 i += 1
2249 i += 1
2251
2250
2252 r.append(l)
2251 r.append(l)
2253
2252
2254 return r
2253 return r
2255
2254
2256 def checkpush(self, pushop):
2255 def checkpush(self, pushop):
2257 """Extensions can override this function if additional checks have
2256 """Extensions can override this function if additional checks have
2258 to be performed before pushing, or call it if they override push
2257 to be performed before pushing, or call it if they override push
2259 command.
2258 command.
2260 """
2259 """
2261
2260
2262 @unfilteredpropertycache
2261 @unfilteredpropertycache
2263 def prepushoutgoinghooks(self):
2262 def prepushoutgoinghooks(self):
2264 """Return util.hooks consists of a pushop with repo, remote, outgoing
2263 """Return util.hooks consists of a pushop with repo, remote, outgoing
2265 methods, which are called before pushing changesets.
2264 methods, which are called before pushing changesets.
2266 """
2265 """
2267 return util.hooks()
2266 return util.hooks()
2268
2267
2269 def pushkey(self, namespace, key, old, new):
2268 def pushkey(self, namespace, key, old, new):
2270 try:
2269 try:
2271 tr = self.currenttransaction()
2270 tr = self.currenttransaction()
2272 hookargs = {}
2271 hookargs = {}
2273 if tr is not None:
2272 if tr is not None:
2274 hookargs.update(tr.hookargs)
2273 hookargs.update(tr.hookargs)
2275 hookargs = pycompat.strkwargs(hookargs)
2274 hookargs = pycompat.strkwargs(hookargs)
2276 hookargs[r'namespace'] = namespace
2275 hookargs[r'namespace'] = namespace
2277 hookargs[r'key'] = key
2276 hookargs[r'key'] = key
2278 hookargs[r'old'] = old
2277 hookargs[r'old'] = old
2279 hookargs[r'new'] = new
2278 hookargs[r'new'] = new
2280 self.hook('prepushkey', throw=True, **hookargs)
2279 self.hook('prepushkey', throw=True, **hookargs)
2281 except error.HookAbort as exc:
2280 except error.HookAbort as exc:
2282 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2281 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2283 if exc.hint:
2282 if exc.hint:
2284 self.ui.write_err(_("(%s)\n") % exc.hint)
2283 self.ui.write_err(_("(%s)\n") % exc.hint)
2285 return False
2284 return False
2286 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2285 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2287 ret = pushkey.push(self, namespace, key, old, new)
2286 ret = pushkey.push(self, namespace, key, old, new)
2288 def runhook():
2287 def runhook():
2289 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2288 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2290 ret=ret)
2289 ret=ret)
2291 self._afterlock(runhook)
2290 self._afterlock(runhook)
2292 return ret
2291 return ret
2293
2292
2294 def listkeys(self, namespace):
2293 def listkeys(self, namespace):
2295 self.hook('prelistkeys', throw=True, namespace=namespace)
2294 self.hook('prelistkeys', throw=True, namespace=namespace)
2296 self.ui.debug('listing keys for "%s"\n' % namespace)
2295 self.ui.debug('listing keys for "%s"\n' % namespace)
2297 values = pushkey.list(self, namespace)
2296 values = pushkey.list(self, namespace)
2298 self.hook('listkeys', namespace=namespace, values=values)
2297 self.hook('listkeys', namespace=namespace, values=values)
2299 return values
2298 return values
2300
2299
2301 def debugwireargs(self, one, two, three=None, four=None, five=None):
2300 def debugwireargs(self, one, two, three=None, four=None, five=None):
2302 '''used to test argument passing over the wire'''
2301 '''used to test argument passing over the wire'''
2303 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2302 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2304 pycompat.bytestr(four),
2303 pycompat.bytestr(four),
2305 pycompat.bytestr(five))
2304 pycompat.bytestr(five))
2306
2305
2307 def savecommitmessage(self, text):
2306 def savecommitmessage(self, text):
2308 fp = self.vfs('last-message.txt', 'wb')
2307 fp = self.vfs('last-message.txt', 'wb')
2309 try:
2308 try:
2310 fp.write(text)
2309 fp.write(text)
2311 finally:
2310 finally:
2312 fp.close()
2311 fp.close()
2313 return self.pathto(fp.name[len(self.root) + 1:])
2312 return self.pathto(fp.name[len(self.root) + 1:])
2314
2313
2315 # used to avoid circular references so destructors work
2314 # used to avoid circular references so destructors work
2316 def aftertrans(files):
2315 def aftertrans(files):
2317 renamefiles = [tuple(t) for t in files]
2316 renamefiles = [tuple(t) for t in files]
2318 def a():
2317 def a():
2319 for vfs, src, dest in renamefiles:
2318 for vfs, src, dest in renamefiles:
2320 # if src and dest refer to a same file, vfs.rename is a no-op,
2319 # if src and dest refer to a same file, vfs.rename is a no-op,
2321 # leaving both src and dest on disk. delete dest to make sure
2320 # leaving both src and dest on disk. delete dest to make sure
2322 # the rename couldn't be such a no-op.
2321 # the rename couldn't be such a no-op.
2323 vfs.tryunlink(dest)
2322 vfs.tryunlink(dest)
2324 try:
2323 try:
2325 vfs.rename(src, dest)
2324 vfs.rename(src, dest)
2326 except OSError: # journal file does not yet exist
2325 except OSError: # journal file does not yet exist
2327 pass
2326 pass
2328 return a
2327 return a
2329
2328
2330 def undoname(fn):
2329 def undoname(fn):
2331 base, name = os.path.split(fn)
2330 base, name = os.path.split(fn)
2332 assert name.startswith('journal')
2331 assert name.startswith('journal')
2333 return os.path.join(base, name.replace('journal', 'undo', 1))
2332 return os.path.join(base, name.replace('journal', 'undo', 1))
2334
2333
2335 def instance(ui, path, create, intents=None):
2334 def instance(ui, path, create, intents=None):
2336 return localrepository(ui, util.urllocalpath(path), create,
2335 return localrepository(ui, util.urllocalpath(path), create,
2337 intents=intents)
2336 intents=intents)
2338
2337
2339 def islocal(path):
2338 def islocal(path):
2340 return True
2339 return True
2341
2340
2342 def newreporequirements(repo):
2341 def newreporequirements(repo):
2343 """Determine the set of requirements for a new local repository.
2342 """Determine the set of requirements for a new local repository.
2344
2343
2345 Extensions can wrap this function to specify custom requirements for
2344 Extensions can wrap this function to specify custom requirements for
2346 new repositories.
2345 new repositories.
2347 """
2346 """
2348 ui = repo.ui
2347 ui = repo.ui
2349 requirements = {'revlogv1'}
2348 requirements = {'revlogv1'}
2350 if ui.configbool('format', 'usestore'):
2349 if ui.configbool('format', 'usestore'):
2351 requirements.add('store')
2350 requirements.add('store')
2352 if ui.configbool('format', 'usefncache'):
2351 if ui.configbool('format', 'usefncache'):
2353 requirements.add('fncache')
2352 requirements.add('fncache')
2354 if ui.configbool('format', 'dotencode'):
2353 if ui.configbool('format', 'dotencode'):
2355 requirements.add('dotencode')
2354 requirements.add('dotencode')
2356
2355
2357 compengine = ui.config('experimental', 'format.compression')
2356 compengine = ui.config('experimental', 'format.compression')
2358 if compengine not in util.compengines:
2357 if compengine not in util.compengines:
2359 raise error.Abort(_('compression engine %s defined by '
2358 raise error.Abort(_('compression engine %s defined by '
2360 'experimental.format.compression not available') %
2359 'experimental.format.compression not available') %
2361 compengine,
2360 compengine,
2362 hint=_('run "hg debuginstall" to list available '
2361 hint=_('run "hg debuginstall" to list available '
2363 'compression engines'))
2362 'compression engines'))
2364
2363
2365 # zlib is the historical default and doesn't need an explicit requirement.
2364 # zlib is the historical default and doesn't need an explicit requirement.
2366 if compengine != 'zlib':
2365 if compengine != 'zlib':
2367 requirements.add('exp-compression-%s' % compengine)
2366 requirements.add('exp-compression-%s' % compengine)
2368
2367
2369 if scmutil.gdinitconfig(ui):
2368 if scmutil.gdinitconfig(ui):
2370 requirements.add('generaldelta')
2369 requirements.add('generaldelta')
2371 if ui.configbool('experimental', 'treemanifest'):
2370 if ui.configbool('experimental', 'treemanifest'):
2372 requirements.add('treemanifest')
2371 requirements.add('treemanifest')
2373
2372
2374 revlogv2 = ui.config('experimental', 'revlogv2')
2373 revlogv2 = ui.config('experimental', 'revlogv2')
2375 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2374 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2376 requirements.remove('revlogv1')
2375 requirements.remove('revlogv1')
2377 # generaldelta is implied by revlogv2.
2376 # generaldelta is implied by revlogv2.
2378 requirements.discard('generaldelta')
2377 requirements.discard('generaldelta')
2379 requirements.add(REVLOGV2_REQUIREMENT)
2378 requirements.add(REVLOGV2_REQUIREMENT)
2380
2379
2381 return requirements
2380 return requirements
General Comments 0
You need to be logged in to leave comments. Login now