##// END OF EJS Templates
context: move reuse of context object to repo.__getitem__ (API)...
Martin von Zweigbergk -
r37191:bb47dc2f default
parent child Browse files
Show More
@@ -1,2611 +1,2598 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from . import (
29 from . import (
30 dagop,
30 dagop,
31 encoding,
31 encoding,
32 error,
32 error,
33 fileset,
33 fileset,
34 match as matchmod,
34 match as matchmod,
35 obsolete as obsmod,
35 obsolete as obsmod,
36 obsutil,
36 obsutil,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 repoview,
41 repoview,
42 revlog,
42 revlog,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56 nonascii = re.compile(br'[^\x21-\x7f]').search
56 nonascii = re.compile(br'[^\x21-\x7f]').search
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65 def __new__(cls, repo, changeid='', *args, **kwargs):
66 if isinstance(changeid, basectx):
67 return changeid
68
69 return super(basectx, cls).__new__(cls)
70
65
71 def __bytes__(self):
66 def __bytes__(self):
72 return short(self.node())
67 return short(self.node())
73
68
74 __str__ = encoding.strmethod(__bytes__)
69 __str__ = encoding.strmethod(__bytes__)
75
70
76 def __repr__(self):
71 def __repr__(self):
77 return r"<%s %s>" % (type(self).__name__, str(self))
72 return r"<%s %s>" % (type(self).__name__, str(self))
78
73
79 def __eq__(self, other):
74 def __eq__(self, other):
80 try:
75 try:
81 return type(self) == type(other) and self._rev == other._rev
76 return type(self) == type(other) and self._rev == other._rev
82 except AttributeError:
77 except AttributeError:
83 return False
78 return False
84
79
85 def __ne__(self, other):
80 def __ne__(self, other):
86 return not (self == other)
81 return not (self == other)
87
82
88 def __contains__(self, key):
83 def __contains__(self, key):
89 return key in self._manifest
84 return key in self._manifest
90
85
91 def __getitem__(self, key):
86 def __getitem__(self, key):
92 return self.filectx(key)
87 return self.filectx(key)
93
88
94 def __iter__(self):
89 def __iter__(self):
95 return iter(self._manifest)
90 return iter(self._manifest)
96
91
97 def _buildstatusmanifest(self, status):
92 def _buildstatusmanifest(self, status):
98 """Builds a manifest that includes the given status results, if this is
93 """Builds a manifest that includes the given status results, if this is
99 a working copy context. For non-working copy contexts, it just returns
94 a working copy context. For non-working copy contexts, it just returns
100 the normal manifest."""
95 the normal manifest."""
101 return self.manifest()
96 return self.manifest()
102
97
103 def _matchstatus(self, other, match):
98 def _matchstatus(self, other, match):
104 """This internal method provides a way for child objects to override the
99 """This internal method provides a way for child objects to override the
105 match operator.
100 match operator.
106 """
101 """
107 return match
102 return match
108
103
109 def _buildstatus(self, other, s, match, listignored, listclean,
104 def _buildstatus(self, other, s, match, listignored, listclean,
110 listunknown):
105 listunknown):
111 """build a status with respect to another context"""
106 """build a status with respect to another context"""
112 # Load earliest manifest first for caching reasons. More specifically,
107 # Load earliest manifest first for caching reasons. More specifically,
113 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # if you have revisions 1000 and 1001, 1001 is probably stored as a
114 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
115 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # 1000 and cache it so that when you read 1001, we just need to apply a
116 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta to what's in the cache. So that's one full reconstruction + one
117 # delta application.
112 # delta application.
118 mf2 = None
113 mf2 = None
119 if self.rev() is not None and self.rev() < other.rev():
114 if self.rev() is not None and self.rev() < other.rev():
120 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
121 mf1 = other._buildstatusmanifest(s)
116 mf1 = other._buildstatusmanifest(s)
122 if mf2 is None:
117 if mf2 is None:
123 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
124
119
125 modified, added = [], []
120 modified, added = [], []
126 removed = []
121 removed = []
127 clean = []
122 clean = []
128 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deletedset = set(deleted)
124 deletedset = set(deleted)
130 d = mf1.diff(mf2, match=match, clean=listclean)
125 d = mf1.diff(mf2, match=match, clean=listclean)
131 for fn, value in d.iteritems():
126 for fn, value in d.iteritems():
132 if fn in deletedset:
127 if fn in deletedset:
133 continue
128 continue
134 if value is None:
129 if value is None:
135 clean.append(fn)
130 clean.append(fn)
136 continue
131 continue
137 (node1, flag1), (node2, flag2) = value
132 (node1, flag1), (node2, flag2) = value
138 if node1 is None:
133 if node1 is None:
139 added.append(fn)
134 added.append(fn)
140 elif node2 is None:
135 elif node2 is None:
141 removed.append(fn)
136 removed.append(fn)
142 elif flag1 != flag2:
137 elif flag1 != flag2:
143 modified.append(fn)
138 modified.append(fn)
144 elif node2 not in wdirnodes:
139 elif node2 not in wdirnodes:
145 # When comparing files between two commits, we save time by
140 # When comparing files between two commits, we save time by
146 # not comparing the file contents when the nodeids differ.
141 # not comparing the file contents when the nodeids differ.
147 # Note that this means we incorrectly report a reverted change
142 # Note that this means we incorrectly report a reverted change
148 # to a file as a modification.
143 # to a file as a modification.
149 modified.append(fn)
144 modified.append(fn)
150 elif self[fn].cmp(other[fn]):
145 elif self[fn].cmp(other[fn]):
151 modified.append(fn)
146 modified.append(fn)
152 else:
147 else:
153 clean.append(fn)
148 clean.append(fn)
154
149
155 if removed:
150 if removed:
156 # need to filter files if they are already reported as removed
151 # need to filter files if they are already reported as removed
157 unknown = [fn for fn in unknown if fn not in mf1 and
152 unknown = [fn for fn in unknown if fn not in mf1 and
158 (not match or match(fn))]
153 (not match or match(fn))]
159 ignored = [fn for fn in ignored if fn not in mf1 and
154 ignored = [fn for fn in ignored if fn not in mf1 and
160 (not match or match(fn))]
155 (not match or match(fn))]
161 # if they're deleted, don't report them as removed
156 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
157 removed = [fn for fn in removed if fn not in deletedset]
163
158
164 return scmutil.status(modified, added, removed, deleted, unknown,
159 return scmutil.status(modified, added, removed, deleted, unknown,
165 ignored, clean)
160 ignored, clean)
166
161
167 @propertycache
162 @propertycache
168 def substate(self):
163 def substate(self):
169 return subrepoutil.state(self, self._repo.ui)
164 return subrepoutil.state(self, self._repo.ui)
170
165
171 def subrev(self, subpath):
166 def subrev(self, subpath):
172 return self.substate[subpath][1]
167 return self.substate[subpath][1]
173
168
174 def rev(self):
169 def rev(self):
175 return self._rev
170 return self._rev
176 def node(self):
171 def node(self):
177 return self._node
172 return self._node
178 def hex(self):
173 def hex(self):
179 return hex(self.node())
174 return hex(self.node())
180 def manifest(self):
175 def manifest(self):
181 return self._manifest
176 return self._manifest
182 def manifestctx(self):
177 def manifestctx(self):
183 return self._manifestctx
178 return self._manifestctx
184 def repo(self):
179 def repo(self):
185 return self._repo
180 return self._repo
186 def phasestr(self):
181 def phasestr(self):
187 return phases.phasenames[self.phase()]
182 return phases.phasenames[self.phase()]
188 def mutable(self):
183 def mutable(self):
189 return self.phase() > phases.public
184 return self.phase() > phases.public
190
185
191 def getfileset(self, expr):
186 def getfileset(self, expr):
192 return fileset.getfileset(self, expr)
187 return fileset.getfileset(self, expr)
193
188
194 def obsolete(self):
189 def obsolete(self):
195 """True if the changeset is obsolete"""
190 """True if the changeset is obsolete"""
196 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
191 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
197
192
198 def extinct(self):
193 def extinct(self):
199 """True if the changeset is extinct"""
194 """True if the changeset is extinct"""
200 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
195 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
201
196
202 def orphan(self):
197 def orphan(self):
203 """True if the changeset is not obsolete but it's ancestor are"""
198 """True if the changeset is not obsolete but it's ancestor are"""
204 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
199 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
205
200
206 def phasedivergent(self):
201 def phasedivergent(self):
207 """True if the changeset try to be a successor of a public changeset
202 """True if the changeset try to be a successor of a public changeset
208
203
209 Only non-public and non-obsolete changesets may be bumped.
204 Only non-public and non-obsolete changesets may be bumped.
210 """
205 """
211 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
206 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
212
207
213 def contentdivergent(self):
208 def contentdivergent(self):
214 """Is a successors of a changeset with multiple possible successors set
209 """Is a successors of a changeset with multiple possible successors set
215
210
216 Only non-public and non-obsolete changesets may be divergent.
211 Only non-public and non-obsolete changesets may be divergent.
217 """
212 """
218 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
213 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
219
214
220 def isunstable(self):
215 def isunstable(self):
221 """True if the changeset is either unstable, bumped or divergent"""
216 """True if the changeset is either unstable, bumped or divergent"""
222 return self.orphan() or self.phasedivergent() or self.contentdivergent()
217 return self.orphan() or self.phasedivergent() or self.contentdivergent()
223
218
224 def instabilities(self):
219 def instabilities(self):
225 """return the list of instabilities affecting this changeset.
220 """return the list of instabilities affecting this changeset.
226
221
227 Instabilities are returned as strings. possible values are:
222 Instabilities are returned as strings. possible values are:
228 - orphan,
223 - orphan,
229 - phase-divergent,
224 - phase-divergent,
230 - content-divergent.
225 - content-divergent.
231 """
226 """
232 instabilities = []
227 instabilities = []
233 if self.orphan():
228 if self.orphan():
234 instabilities.append('orphan')
229 instabilities.append('orphan')
235 if self.phasedivergent():
230 if self.phasedivergent():
236 instabilities.append('phase-divergent')
231 instabilities.append('phase-divergent')
237 if self.contentdivergent():
232 if self.contentdivergent():
238 instabilities.append('content-divergent')
233 instabilities.append('content-divergent')
239 return instabilities
234 return instabilities
240
235
241 def parents(self):
236 def parents(self):
242 """return contexts for each parent changeset"""
237 """return contexts for each parent changeset"""
243 return self._parents
238 return self._parents
244
239
245 def p1(self):
240 def p1(self):
246 return self._parents[0]
241 return self._parents[0]
247
242
248 def p2(self):
243 def p2(self):
249 parents = self._parents
244 parents = self._parents
250 if len(parents) == 2:
245 if len(parents) == 2:
251 return parents[1]
246 return parents[1]
252 return changectx(self._repo, nullrev)
247 return changectx(self._repo, nullrev)
253
248
254 def _fileinfo(self, path):
249 def _fileinfo(self, path):
255 if r'_manifest' in self.__dict__:
250 if r'_manifest' in self.__dict__:
256 try:
251 try:
257 return self._manifest[path], self._manifest.flags(path)
252 return self._manifest[path], self._manifest.flags(path)
258 except KeyError:
253 except KeyError:
259 raise error.ManifestLookupError(self._node, path,
254 raise error.ManifestLookupError(self._node, path,
260 _('not found in manifest'))
255 _('not found in manifest'))
261 if r'_manifestdelta' in self.__dict__ or path in self.files():
256 if r'_manifestdelta' in self.__dict__ or path in self.files():
262 if path in self._manifestdelta:
257 if path in self._manifestdelta:
263 return (self._manifestdelta[path],
258 return (self._manifestdelta[path],
264 self._manifestdelta.flags(path))
259 self._manifestdelta.flags(path))
265 mfl = self._repo.manifestlog
260 mfl = self._repo.manifestlog
266 try:
261 try:
267 node, flag = mfl[self._changeset.manifest].find(path)
262 node, flag = mfl[self._changeset.manifest].find(path)
268 except KeyError:
263 except KeyError:
269 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
270 _('not found in manifest'))
265 _('not found in manifest'))
271
266
272 return node, flag
267 return node, flag
273
268
274 def filenode(self, path):
269 def filenode(self, path):
275 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
276
271
277 def flags(self, path):
272 def flags(self, path):
278 try:
273 try:
279 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
280 except error.LookupError:
275 except error.LookupError:
281 return ''
276 return ''
282
277
283 def sub(self, path, allowcreate=True):
278 def sub(self, path, allowcreate=True):
284 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
285 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
286
281
287 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
288 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
289
284
290 def workingsub(self, path):
285 def workingsub(self, path):
291 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
292 context.
287 context.
293 '''
288 '''
294 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
295
290
296 def match(self, pats=None, include=None, exclude=None, default='glob',
291 def match(self, pats=None, include=None, exclude=None, default='glob',
297 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
298 r = self._repo
293 r = self._repo
299 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
300 include, exclude, default,
295 include, exclude, default,
301 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
302 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
303
298
304 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
305 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
306 if ctx2 is None:
301 if ctx2 is None:
307 ctx2 = self.p1()
302 ctx2 = self.p1()
308 if ctx2 is not None:
303 if ctx2 is not None:
309 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
310 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
305 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
311 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
312
307
313 def dirs(self):
308 def dirs(self):
314 return self._manifest.dirs()
309 return self._manifest.dirs()
315
310
316 def hasdir(self, dir):
311 def hasdir(self, dir):
317 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
318
313
319 def status(self, other=None, match=None, listignored=False,
314 def status(self, other=None, match=None, listignored=False,
320 listclean=False, listunknown=False, listsubrepos=False):
315 listclean=False, listunknown=False, listsubrepos=False):
321 """return status of files between two nodes or node and working
316 """return status of files between two nodes or node and working
322 directory.
317 directory.
323
318
324 If other is None, compare this node with working directory.
319 If other is None, compare this node with working directory.
325
320
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
321 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 """
322 """
328
323
329 ctx1 = self
324 ctx1 = self
330 ctx2 = self._repo[other]
325 ctx2 = self._repo[other]
331
326
332 # This next code block is, admittedly, fragile logic that tests for
327 # This next code block is, admittedly, fragile logic that tests for
333 # reversing the contexts and wouldn't need to exist if it weren't for
328 # reversing the contexts and wouldn't need to exist if it weren't for
334 # the fast (and common) code path of comparing the working directory
329 # the fast (and common) code path of comparing the working directory
335 # with its first parent.
330 # with its first parent.
336 #
331 #
337 # What we're aiming for here is the ability to call:
332 # What we're aiming for here is the ability to call:
338 #
333 #
339 # workingctx.status(parentctx)
334 # workingctx.status(parentctx)
340 #
335 #
341 # If we always built the manifest for each context and compared those,
336 # If we always built the manifest for each context and compared those,
342 # then we'd be done. But the special case of the above call means we
337 # then we'd be done. But the special case of the above call means we
343 # just copy the manifest of the parent.
338 # just copy the manifest of the parent.
344 reversed = False
339 reversed = False
345 if (not isinstance(ctx1, changectx)
340 if (not isinstance(ctx1, changectx)
346 and isinstance(ctx2, changectx)):
341 and isinstance(ctx2, changectx)):
347 reversed = True
342 reversed = True
348 ctx1, ctx2 = ctx2, ctx1
343 ctx1, ctx2 = ctx2, ctx1
349
344
350 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
345 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
351 match = ctx2._matchstatus(ctx1, match)
346 match = ctx2._matchstatus(ctx1, match)
352 r = scmutil.status([], [], [], [], [], [], [])
347 r = scmutil.status([], [], [], [], [], [], [])
353 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
348 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
354 listunknown)
349 listunknown)
355
350
356 if reversed:
351 if reversed:
357 # Reverse added and removed. Clear deleted, unknown and ignored as
352 # Reverse added and removed. Clear deleted, unknown and ignored as
358 # these make no sense to reverse.
353 # these make no sense to reverse.
359 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
354 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
360 r.clean)
355 r.clean)
361
356
362 if listsubrepos:
357 if listsubrepos:
363 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
358 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
364 try:
359 try:
365 rev2 = ctx2.subrev(subpath)
360 rev2 = ctx2.subrev(subpath)
366 except KeyError:
361 except KeyError:
367 # A subrepo that existed in node1 was deleted between
362 # A subrepo that existed in node1 was deleted between
368 # node1 and node2 (inclusive). Thus, ctx2's substate
363 # node1 and node2 (inclusive). Thus, ctx2's substate
369 # won't contain that subpath. The best we can do ignore it.
364 # won't contain that subpath. The best we can do ignore it.
370 rev2 = None
365 rev2 = None
371 submatch = matchmod.subdirmatcher(subpath, match)
366 submatch = matchmod.subdirmatcher(subpath, match)
372 s = sub.status(rev2, match=submatch, ignored=listignored,
367 s = sub.status(rev2, match=submatch, ignored=listignored,
373 clean=listclean, unknown=listunknown,
368 clean=listclean, unknown=listunknown,
374 listsubrepos=True)
369 listsubrepos=True)
375 for rfiles, sfiles in zip(r, s):
370 for rfiles, sfiles in zip(r, s):
376 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
371 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
377
372
378 for l in r:
373 for l in r:
379 l.sort()
374 l.sort()
380
375
381 return r
376 return r
382
377
383 def _filterederror(repo, changeid):
378 def _filterederror(repo, changeid):
384 """build an exception to be raised about a filtered changeid
379 """build an exception to be raised about a filtered changeid
385
380
386 This is extracted in a function to help extensions (eg: evolve) to
381 This is extracted in a function to help extensions (eg: evolve) to
387 experiment with various message variants."""
382 experiment with various message variants."""
388 if repo.filtername.startswith('visible'):
383 if repo.filtername.startswith('visible'):
389
384
390 # Check if the changeset is obsolete
385 # Check if the changeset is obsolete
391 unfilteredrepo = repo.unfiltered()
386 unfilteredrepo = repo.unfiltered()
392 ctx = unfilteredrepo[changeid]
387 ctx = unfilteredrepo[changeid]
393
388
394 # If the changeset is obsolete, enrich the message with the reason
389 # If the changeset is obsolete, enrich the message with the reason
395 # that made this changeset not visible
390 # that made this changeset not visible
396 if ctx.obsolete():
391 if ctx.obsolete():
397 msg = obsutil._getfilteredreason(repo, changeid, ctx)
392 msg = obsutil._getfilteredreason(repo, changeid, ctx)
398 else:
393 else:
399 msg = _("hidden revision '%s'") % changeid
394 msg = _("hidden revision '%s'") % changeid
400
395
401 hint = _('use --hidden to access hidden revisions')
396 hint = _('use --hidden to access hidden revisions')
402
397
403 return error.FilteredRepoLookupError(msg, hint=hint)
398 return error.FilteredRepoLookupError(msg, hint=hint)
404 msg = _("filtered revision '%s' (not in '%s' subset)")
399 msg = _("filtered revision '%s' (not in '%s' subset)")
405 msg %= (changeid, repo.filtername)
400 msg %= (changeid, repo.filtername)
406 return error.FilteredRepoLookupError(msg)
401 return error.FilteredRepoLookupError(msg)
407
402
408 class changectx(basectx):
403 class changectx(basectx):
409 """A changecontext object makes access to data related to a particular
404 """A changecontext object makes access to data related to a particular
410 changeset convenient. It represents a read-only context already present in
405 changeset convenient. It represents a read-only context already present in
411 the repo."""
406 the repo."""
412 def __init__(self, repo, changeid='.'):
407 def __init__(self, repo, changeid='.'):
413 """changeid is a revision number, node, or tag"""
408 """changeid is a revision number, node, or tag"""
414
409
415 # since basectx.__new__ already took care of copying the object, we
416 # don't need to do anything in __init__, so we just exit here
417 if isinstance(changeid, basectx):
418 return
419
420 if changeid == '':
410 if changeid == '':
421 changeid = '.'
411 changeid = '.'
422 self._repo = repo
412 self._repo = repo
423
413
424 try:
414 try:
425 if isinstance(changeid, int):
415 if isinstance(changeid, int):
426 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
427 self._rev = changeid
417 self._rev = changeid
428 return
418 return
429 if not pycompat.ispy3 and isinstance(changeid, long):
419 if not pycompat.ispy3 and isinstance(changeid, long):
430 changeid = "%d" % changeid
420 changeid = "%d" % changeid
431 if changeid == 'null':
421 if changeid == 'null':
432 self._node = nullid
422 self._node = nullid
433 self._rev = nullrev
423 self._rev = nullrev
434 return
424 return
435 if changeid == 'tip':
425 if changeid == 'tip':
436 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
437 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
438 return
428 return
439 if (changeid == '.'
429 if (changeid == '.'
440 or repo.local() and changeid == repo.dirstate.p1()):
430 or repo.local() and changeid == repo.dirstate.p1()):
441 # this is a hack to delay/avoid loading obsmarkers
431 # this is a hack to delay/avoid loading obsmarkers
442 # when we know that '.' won't be hidden
432 # when we know that '.' won't be hidden
443 self._node = repo.dirstate.p1()
433 self._node = repo.dirstate.p1()
444 self._rev = repo.unfiltered().changelog.rev(self._node)
434 self._rev = repo.unfiltered().changelog.rev(self._node)
445 return
435 return
446 if len(changeid) == 20:
436 if len(changeid) == 20:
447 try:
437 try:
448 self._node = changeid
438 self._node = changeid
449 self._rev = repo.changelog.rev(changeid)
439 self._rev = repo.changelog.rev(changeid)
450 return
440 return
451 except error.FilteredRepoLookupError:
441 except error.FilteredRepoLookupError:
452 raise
442 raise
453 except LookupError:
443 except LookupError:
454 pass
444 pass
455
445
456 try:
446 try:
457 r = int(changeid)
447 r = int(changeid)
458 if '%d' % r != changeid:
448 if '%d' % r != changeid:
459 raise ValueError
449 raise ValueError
460 l = len(repo.changelog)
450 l = len(repo.changelog)
461 if r < 0:
451 if r < 0:
462 r += l
452 r += l
463 if r < 0 or r >= l and r != wdirrev:
453 if r < 0 or r >= l and r != wdirrev:
464 raise ValueError
454 raise ValueError
465 self._rev = r
455 self._rev = r
466 self._node = repo.changelog.node(r)
456 self._node = repo.changelog.node(r)
467 return
457 return
468 except error.FilteredIndexError:
458 except error.FilteredIndexError:
469 raise
459 raise
470 except (ValueError, OverflowError, IndexError):
460 except (ValueError, OverflowError, IndexError):
471 pass
461 pass
472
462
473 if len(changeid) == 40:
463 if len(changeid) == 40:
474 try:
464 try:
475 self._node = bin(changeid)
465 self._node = bin(changeid)
476 self._rev = repo.changelog.rev(self._node)
466 self._rev = repo.changelog.rev(self._node)
477 return
467 return
478 except error.FilteredLookupError:
468 except error.FilteredLookupError:
479 raise
469 raise
480 except (TypeError, LookupError):
470 except (TypeError, LookupError):
481 pass
471 pass
482
472
483 # lookup bookmarks through the name interface
473 # lookup bookmarks through the name interface
484 try:
474 try:
485 self._node = repo.names.singlenode(repo, changeid)
475 self._node = repo.names.singlenode(repo, changeid)
486 self._rev = repo.changelog.rev(self._node)
476 self._rev = repo.changelog.rev(self._node)
487 return
477 return
488 except KeyError:
478 except KeyError:
489 pass
479 pass
490 except error.FilteredRepoLookupError:
480 except error.FilteredRepoLookupError:
491 raise
481 raise
492 except error.RepoLookupError:
482 except error.RepoLookupError:
493 pass
483 pass
494
484
495 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 self._node = repo.unfiltered().changelog._partialmatch(changeid)
496 if self._node is not None:
486 if self._node is not None:
497 self._rev = repo.changelog.rev(self._node)
487 self._rev = repo.changelog.rev(self._node)
498 return
488 return
499
489
500 # lookup failed
490 # lookup failed
501 # check if it might have come from damaged dirstate
491 # check if it might have come from damaged dirstate
502 #
492 #
503 # XXX we could avoid the unfiltered if we had a recognizable
493 # XXX we could avoid the unfiltered if we had a recognizable
504 # exception for filtered changeset access
494 # exception for filtered changeset access
505 if (repo.local()
495 if (repo.local()
506 and changeid in repo.unfiltered().dirstate.parents()):
496 and changeid in repo.unfiltered().dirstate.parents()):
507 msg = _("working directory has unknown parent '%s'!")
497 msg = _("working directory has unknown parent '%s'!")
508 raise error.Abort(msg % short(changeid))
498 raise error.Abort(msg % short(changeid))
509 try:
499 try:
510 if len(changeid) == 20 and nonascii(changeid):
500 if len(changeid) == 20 and nonascii(changeid):
511 changeid = hex(changeid)
501 changeid = hex(changeid)
512 except TypeError:
502 except TypeError:
513 pass
503 pass
514 except (error.FilteredIndexError, error.FilteredLookupError,
504 except (error.FilteredIndexError, error.FilteredLookupError,
515 error.FilteredRepoLookupError):
505 error.FilteredRepoLookupError):
516 raise _filterederror(repo, changeid)
506 raise _filterederror(repo, changeid)
517 except IndexError:
507 except IndexError:
518 pass
508 pass
519 raise error.RepoLookupError(
509 raise error.RepoLookupError(
520 _("unknown revision '%s'") % changeid)
510 _("unknown revision '%s'") % changeid)
521
511
522 def __hash__(self):
512 def __hash__(self):
523 try:
513 try:
524 return hash(self._rev)
514 return hash(self._rev)
525 except AttributeError:
515 except AttributeError:
526 return id(self)
516 return id(self)
527
517
528 def __nonzero__(self):
518 def __nonzero__(self):
529 return self._rev != nullrev
519 return self._rev != nullrev
530
520
531 __bool__ = __nonzero__
521 __bool__ = __nonzero__
532
522
533 @propertycache
523 @propertycache
534 def _changeset(self):
524 def _changeset(self):
535 return self._repo.changelog.changelogrevision(self.rev())
525 return self._repo.changelog.changelogrevision(self.rev())
536
526
537 @propertycache
527 @propertycache
538 def _manifest(self):
528 def _manifest(self):
539 return self._manifestctx.read()
529 return self._manifestctx.read()
540
530
541 @property
531 @property
542 def _manifestctx(self):
532 def _manifestctx(self):
543 return self._repo.manifestlog[self._changeset.manifest]
533 return self._repo.manifestlog[self._changeset.manifest]
544
534
545 @propertycache
535 @propertycache
546 def _manifestdelta(self):
536 def _manifestdelta(self):
547 return self._manifestctx.readdelta()
537 return self._manifestctx.readdelta()
548
538
549 @propertycache
539 @propertycache
550 def _parents(self):
540 def _parents(self):
551 repo = self._repo
541 repo = self._repo
552 p1, p2 = repo.changelog.parentrevs(self._rev)
542 p1, p2 = repo.changelog.parentrevs(self._rev)
553 if p2 == nullrev:
543 if p2 == nullrev:
554 return [changectx(repo, p1)]
544 return [changectx(repo, p1)]
555 return [changectx(repo, p1), changectx(repo, p2)]
545 return [changectx(repo, p1), changectx(repo, p2)]
556
546
557 def changeset(self):
547 def changeset(self):
558 c = self._changeset
548 c = self._changeset
559 return (
549 return (
560 c.manifest,
550 c.manifest,
561 c.user,
551 c.user,
562 c.date,
552 c.date,
563 c.files,
553 c.files,
564 c.description,
554 c.description,
565 c.extra,
555 c.extra,
566 )
556 )
567 def manifestnode(self):
557 def manifestnode(self):
568 return self._changeset.manifest
558 return self._changeset.manifest
569
559
570 def user(self):
560 def user(self):
571 return self._changeset.user
561 return self._changeset.user
572 def date(self):
562 def date(self):
573 return self._changeset.date
563 return self._changeset.date
574 def files(self):
564 def files(self):
575 return self._changeset.files
565 return self._changeset.files
576 def description(self):
566 def description(self):
577 return self._changeset.description
567 return self._changeset.description
578 def branch(self):
568 def branch(self):
579 return encoding.tolocal(self._changeset.extra.get("branch"))
569 return encoding.tolocal(self._changeset.extra.get("branch"))
580 def closesbranch(self):
570 def closesbranch(self):
581 return 'close' in self._changeset.extra
571 return 'close' in self._changeset.extra
582 def extra(self):
572 def extra(self):
583 """Return a dict of extra information."""
573 """Return a dict of extra information."""
584 return self._changeset.extra
574 return self._changeset.extra
585 def tags(self):
575 def tags(self):
586 """Return a list of byte tag names"""
576 """Return a list of byte tag names"""
587 return self._repo.nodetags(self._node)
577 return self._repo.nodetags(self._node)
588 def bookmarks(self):
578 def bookmarks(self):
589 """Return a list of byte bookmark names."""
579 """Return a list of byte bookmark names."""
590 return self._repo.nodebookmarks(self._node)
580 return self._repo.nodebookmarks(self._node)
591 def phase(self):
581 def phase(self):
592 return self._repo._phasecache.phase(self._repo, self._rev)
582 return self._repo._phasecache.phase(self._repo, self._rev)
593 def hidden(self):
583 def hidden(self):
594 return self._rev in repoview.filterrevs(self._repo, 'visible')
584 return self._rev in repoview.filterrevs(self._repo, 'visible')
595
585
596 def isinmemory(self):
586 def isinmemory(self):
597 return False
587 return False
598
588
599 def children(self):
589 def children(self):
600 """return list of changectx contexts for each child changeset.
590 """return list of changectx contexts for each child changeset.
601
591
602 This returns only the immediate child changesets. Use descendants() to
592 This returns only the immediate child changesets. Use descendants() to
603 recursively walk children.
593 recursively walk children.
604 """
594 """
605 c = self._repo.changelog.children(self._node)
595 c = self._repo.changelog.children(self._node)
606 return [changectx(self._repo, x) for x in c]
596 return [changectx(self._repo, x) for x in c]
607
597
608 def ancestors(self):
598 def ancestors(self):
609 for a in self._repo.changelog.ancestors([self._rev]):
599 for a in self._repo.changelog.ancestors([self._rev]):
610 yield changectx(self._repo, a)
600 yield changectx(self._repo, a)
611
601
612 def descendants(self):
602 def descendants(self):
613 """Recursively yield all children of the changeset.
603 """Recursively yield all children of the changeset.
614
604
615 For just the immediate children, use children()
605 For just the immediate children, use children()
616 """
606 """
617 for d in self._repo.changelog.descendants([self._rev]):
607 for d in self._repo.changelog.descendants([self._rev]):
618 yield changectx(self._repo, d)
608 yield changectx(self._repo, d)
619
609
620 def filectx(self, path, fileid=None, filelog=None):
610 def filectx(self, path, fileid=None, filelog=None):
621 """get a file context from this changeset"""
611 """get a file context from this changeset"""
622 if fileid is None:
612 if fileid is None:
623 fileid = self.filenode(path)
613 fileid = self.filenode(path)
624 return filectx(self._repo, path, fileid=fileid,
614 return filectx(self._repo, path, fileid=fileid,
625 changectx=self, filelog=filelog)
615 changectx=self, filelog=filelog)
626
616
627 def ancestor(self, c2, warn=False):
617 def ancestor(self, c2, warn=False):
628 """return the "best" ancestor context of self and c2
618 """return the "best" ancestor context of self and c2
629
619
630 If there are multiple candidates, it will show a message and check
620 If there are multiple candidates, it will show a message and check
631 merge.preferancestor configuration before falling back to the
621 merge.preferancestor configuration before falling back to the
632 revlog ancestor."""
622 revlog ancestor."""
633 # deal with workingctxs
623 # deal with workingctxs
634 n2 = c2._node
624 n2 = c2._node
635 if n2 is None:
625 if n2 is None:
636 n2 = c2._parents[0]._node
626 n2 = c2._parents[0]._node
637 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
627 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
638 if not cahs:
628 if not cahs:
639 anc = nullid
629 anc = nullid
640 elif len(cahs) == 1:
630 elif len(cahs) == 1:
641 anc = cahs[0]
631 anc = cahs[0]
642 else:
632 else:
643 # experimental config: merge.preferancestor
633 # experimental config: merge.preferancestor
644 for r in self._repo.ui.configlist('merge', 'preferancestor'):
634 for r in self._repo.ui.configlist('merge', 'preferancestor'):
645 try:
635 try:
646 ctx = changectx(self._repo, r)
636 ctx = changectx(self._repo, r)
647 except error.RepoLookupError:
637 except error.RepoLookupError:
648 continue
638 continue
649 anc = ctx.node()
639 anc = ctx.node()
650 if anc in cahs:
640 if anc in cahs:
651 break
641 break
652 else:
642 else:
653 anc = self._repo.changelog.ancestor(self._node, n2)
643 anc = self._repo.changelog.ancestor(self._node, n2)
654 if warn:
644 if warn:
655 self._repo.ui.status(
645 self._repo.ui.status(
656 (_("note: using %s as ancestor of %s and %s\n") %
646 (_("note: using %s as ancestor of %s and %s\n") %
657 (short(anc), short(self._node), short(n2))) +
647 (short(anc), short(self._node), short(n2))) +
658 ''.join(_(" alternatively, use --config "
648 ''.join(_(" alternatively, use --config "
659 "merge.preferancestor=%s\n") %
649 "merge.preferancestor=%s\n") %
660 short(n) for n in sorted(cahs) if n != anc))
650 short(n) for n in sorted(cahs) if n != anc))
661 return changectx(self._repo, anc)
651 return changectx(self._repo, anc)
662
652
663 def descendant(self, other):
653 def descendant(self, other):
664 """True if other is descendant of this changeset"""
654 """True if other is descendant of this changeset"""
665 return self._repo.changelog.descendant(self._rev, other._rev)
655 return self._repo.changelog.descendant(self._rev, other._rev)
666
656
667 def walk(self, match):
657 def walk(self, match):
668 '''Generates matching file names.'''
658 '''Generates matching file names.'''
669
659
670 # Wrap match.bad method to have message with nodeid
660 # Wrap match.bad method to have message with nodeid
671 def bad(fn, msg):
661 def bad(fn, msg):
672 # The manifest doesn't know about subrepos, so don't complain about
662 # The manifest doesn't know about subrepos, so don't complain about
673 # paths into valid subrepos.
663 # paths into valid subrepos.
674 if any(fn == s or fn.startswith(s + '/')
664 if any(fn == s or fn.startswith(s + '/')
675 for s in self.substate):
665 for s in self.substate):
676 return
666 return
677 match.bad(fn, _('no such file in rev %s') % self)
667 match.bad(fn, _('no such file in rev %s') % self)
678
668
679 m = matchmod.badmatch(match, bad)
669 m = matchmod.badmatch(match, bad)
680 return self._manifest.walk(m)
670 return self._manifest.walk(m)
681
671
682 def matches(self, match):
672 def matches(self, match):
683 return self.walk(match)
673 return self.walk(match)
684
674
685 class basefilectx(object):
675 class basefilectx(object):
686 """A filecontext object represents the common logic for its children:
676 """A filecontext object represents the common logic for its children:
687 filectx: read-only access to a filerevision that is already present
677 filectx: read-only access to a filerevision that is already present
688 in the repo,
678 in the repo,
689 workingfilectx: a filecontext that represents files from the working
679 workingfilectx: a filecontext that represents files from the working
690 directory,
680 directory,
691 memfilectx: a filecontext that represents files in-memory,
681 memfilectx: a filecontext that represents files in-memory,
692 overlayfilectx: duplicate another filecontext with some fields overridden.
682 overlayfilectx: duplicate another filecontext with some fields overridden.
693 """
683 """
694 @propertycache
684 @propertycache
695 def _filelog(self):
685 def _filelog(self):
696 return self._repo.file(self._path)
686 return self._repo.file(self._path)
697
687
698 @propertycache
688 @propertycache
699 def _changeid(self):
689 def _changeid(self):
700 if r'_changeid' in self.__dict__:
690 if r'_changeid' in self.__dict__:
701 return self._changeid
691 return self._changeid
702 elif r'_changectx' in self.__dict__:
692 elif r'_changectx' in self.__dict__:
703 return self._changectx.rev()
693 return self._changectx.rev()
704 elif r'_descendantrev' in self.__dict__:
694 elif r'_descendantrev' in self.__dict__:
705 # this file context was created from a revision with a known
695 # this file context was created from a revision with a known
706 # descendant, we can (lazily) correct for linkrev aliases
696 # descendant, we can (lazily) correct for linkrev aliases
707 return self._adjustlinkrev(self._descendantrev)
697 return self._adjustlinkrev(self._descendantrev)
708 else:
698 else:
709 return self._filelog.linkrev(self._filerev)
699 return self._filelog.linkrev(self._filerev)
710
700
711 @propertycache
701 @propertycache
712 def _filenode(self):
702 def _filenode(self):
713 if r'_fileid' in self.__dict__:
703 if r'_fileid' in self.__dict__:
714 return self._filelog.lookup(self._fileid)
704 return self._filelog.lookup(self._fileid)
715 else:
705 else:
716 return self._changectx.filenode(self._path)
706 return self._changectx.filenode(self._path)
717
707
718 @propertycache
708 @propertycache
719 def _filerev(self):
709 def _filerev(self):
720 return self._filelog.rev(self._filenode)
710 return self._filelog.rev(self._filenode)
721
711
722 @propertycache
712 @propertycache
723 def _repopath(self):
713 def _repopath(self):
724 return self._path
714 return self._path
725
715
726 def __nonzero__(self):
716 def __nonzero__(self):
727 try:
717 try:
728 self._filenode
718 self._filenode
729 return True
719 return True
730 except error.LookupError:
720 except error.LookupError:
731 # file is missing
721 # file is missing
732 return False
722 return False
733
723
734 __bool__ = __nonzero__
724 __bool__ = __nonzero__
735
725
736 def __bytes__(self):
726 def __bytes__(self):
737 try:
727 try:
738 return "%s@%s" % (self.path(), self._changectx)
728 return "%s@%s" % (self.path(), self._changectx)
739 except error.LookupError:
729 except error.LookupError:
740 return "%s@???" % self.path()
730 return "%s@???" % self.path()
741
731
742 __str__ = encoding.strmethod(__bytes__)
732 __str__ = encoding.strmethod(__bytes__)
743
733
744 def __repr__(self):
734 def __repr__(self):
745 return r"<%s %s>" % (type(self).__name__, str(self))
735 return r"<%s %s>" % (type(self).__name__, str(self))
746
736
747 def __hash__(self):
737 def __hash__(self):
748 try:
738 try:
749 return hash((self._path, self._filenode))
739 return hash((self._path, self._filenode))
750 except AttributeError:
740 except AttributeError:
751 return id(self)
741 return id(self)
752
742
753 def __eq__(self, other):
743 def __eq__(self, other):
754 try:
744 try:
755 return (type(self) == type(other) and self._path == other._path
745 return (type(self) == type(other) and self._path == other._path
756 and self._filenode == other._filenode)
746 and self._filenode == other._filenode)
757 except AttributeError:
747 except AttributeError:
758 return False
748 return False
759
749
760 def __ne__(self, other):
750 def __ne__(self, other):
761 return not (self == other)
751 return not (self == other)
762
752
763 def filerev(self):
753 def filerev(self):
764 return self._filerev
754 return self._filerev
765 def filenode(self):
755 def filenode(self):
766 return self._filenode
756 return self._filenode
767 @propertycache
757 @propertycache
768 def _flags(self):
758 def _flags(self):
769 return self._changectx.flags(self._path)
759 return self._changectx.flags(self._path)
770 def flags(self):
760 def flags(self):
771 return self._flags
761 return self._flags
772 def filelog(self):
762 def filelog(self):
773 return self._filelog
763 return self._filelog
774 def rev(self):
764 def rev(self):
775 return self._changeid
765 return self._changeid
776 def linkrev(self):
766 def linkrev(self):
777 return self._filelog.linkrev(self._filerev)
767 return self._filelog.linkrev(self._filerev)
778 def node(self):
768 def node(self):
779 return self._changectx.node()
769 return self._changectx.node()
780 def hex(self):
770 def hex(self):
781 return self._changectx.hex()
771 return self._changectx.hex()
782 def user(self):
772 def user(self):
783 return self._changectx.user()
773 return self._changectx.user()
784 def date(self):
774 def date(self):
785 return self._changectx.date()
775 return self._changectx.date()
786 def files(self):
776 def files(self):
787 return self._changectx.files()
777 return self._changectx.files()
788 def description(self):
778 def description(self):
789 return self._changectx.description()
779 return self._changectx.description()
790 def branch(self):
780 def branch(self):
791 return self._changectx.branch()
781 return self._changectx.branch()
792 def extra(self):
782 def extra(self):
793 return self._changectx.extra()
783 return self._changectx.extra()
794 def phase(self):
784 def phase(self):
795 return self._changectx.phase()
785 return self._changectx.phase()
796 def phasestr(self):
786 def phasestr(self):
797 return self._changectx.phasestr()
787 return self._changectx.phasestr()
798 def obsolete(self):
788 def obsolete(self):
799 return self._changectx.obsolete()
789 return self._changectx.obsolete()
800 def instabilities(self):
790 def instabilities(self):
801 return self._changectx.instabilities()
791 return self._changectx.instabilities()
802 def manifest(self):
792 def manifest(self):
803 return self._changectx.manifest()
793 return self._changectx.manifest()
804 def changectx(self):
794 def changectx(self):
805 return self._changectx
795 return self._changectx
806 def renamed(self):
796 def renamed(self):
807 return self._copied
797 return self._copied
808 def repo(self):
798 def repo(self):
809 return self._repo
799 return self._repo
810 def size(self):
800 def size(self):
811 return len(self.data())
801 return len(self.data())
812
802
813 def path(self):
803 def path(self):
814 return self._path
804 return self._path
815
805
816 def isbinary(self):
806 def isbinary(self):
817 try:
807 try:
818 return stringutil.binary(self.data())
808 return stringutil.binary(self.data())
819 except IOError:
809 except IOError:
820 return False
810 return False
821 def isexec(self):
811 def isexec(self):
822 return 'x' in self.flags()
812 return 'x' in self.flags()
823 def islink(self):
813 def islink(self):
824 return 'l' in self.flags()
814 return 'l' in self.flags()
825
815
826 def isabsent(self):
816 def isabsent(self):
827 """whether this filectx represents a file not in self._changectx
817 """whether this filectx represents a file not in self._changectx
828
818
829 This is mainly for merge code to detect change/delete conflicts. This is
819 This is mainly for merge code to detect change/delete conflicts. This is
830 expected to be True for all subclasses of basectx."""
820 expected to be True for all subclasses of basectx."""
831 return False
821 return False
832
822
833 _customcmp = False
823 _customcmp = False
834 def cmp(self, fctx):
824 def cmp(self, fctx):
835 """compare with other file context
825 """compare with other file context
836
826
837 returns True if different than fctx.
827 returns True if different than fctx.
838 """
828 """
839 if fctx._customcmp:
829 if fctx._customcmp:
840 return fctx.cmp(self)
830 return fctx.cmp(self)
841
831
842 if (fctx._filenode is None
832 if (fctx._filenode is None
843 and (self._repo._encodefilterpats
833 and (self._repo._encodefilterpats
844 # if file data starts with '\1\n', empty metadata block is
834 # if file data starts with '\1\n', empty metadata block is
845 # prepended, which adds 4 bytes to filelog.size().
835 # prepended, which adds 4 bytes to filelog.size().
846 or self.size() - 4 == fctx.size())
836 or self.size() - 4 == fctx.size())
847 or self.size() == fctx.size()):
837 or self.size() == fctx.size()):
848 return self._filelog.cmp(self._filenode, fctx.data())
838 return self._filelog.cmp(self._filenode, fctx.data())
849
839
850 return True
840 return True
851
841
852 def _adjustlinkrev(self, srcrev, inclusive=False):
842 def _adjustlinkrev(self, srcrev, inclusive=False):
853 """return the first ancestor of <srcrev> introducing <fnode>
843 """return the first ancestor of <srcrev> introducing <fnode>
854
844
855 If the linkrev of the file revision does not point to an ancestor of
845 If the linkrev of the file revision does not point to an ancestor of
856 srcrev, we'll walk down the ancestors until we find one introducing
846 srcrev, we'll walk down the ancestors until we find one introducing
857 this file revision.
847 this file revision.
858
848
859 :srcrev: the changeset revision we search ancestors from
849 :srcrev: the changeset revision we search ancestors from
860 :inclusive: if true, the src revision will also be checked
850 :inclusive: if true, the src revision will also be checked
861 """
851 """
862 repo = self._repo
852 repo = self._repo
863 cl = repo.unfiltered().changelog
853 cl = repo.unfiltered().changelog
864 mfl = repo.manifestlog
854 mfl = repo.manifestlog
865 # fetch the linkrev
855 # fetch the linkrev
866 lkr = self.linkrev()
856 lkr = self.linkrev()
867 # hack to reuse ancestor computation when searching for renames
857 # hack to reuse ancestor computation when searching for renames
868 memberanc = getattr(self, '_ancestrycontext', None)
858 memberanc = getattr(self, '_ancestrycontext', None)
869 iteranc = None
859 iteranc = None
870 if srcrev is None:
860 if srcrev is None:
871 # wctx case, used by workingfilectx during mergecopy
861 # wctx case, used by workingfilectx during mergecopy
872 revs = [p.rev() for p in self._repo[None].parents()]
862 revs = [p.rev() for p in self._repo[None].parents()]
873 inclusive = True # we skipped the real (revless) source
863 inclusive = True # we skipped the real (revless) source
874 else:
864 else:
875 revs = [srcrev]
865 revs = [srcrev]
876 if memberanc is None:
866 if memberanc is None:
877 memberanc = iteranc = cl.ancestors(revs, lkr,
867 memberanc = iteranc = cl.ancestors(revs, lkr,
878 inclusive=inclusive)
868 inclusive=inclusive)
879 # check if this linkrev is an ancestor of srcrev
869 # check if this linkrev is an ancestor of srcrev
880 if lkr not in memberanc:
870 if lkr not in memberanc:
881 if iteranc is None:
871 if iteranc is None:
882 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
872 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
883 fnode = self._filenode
873 fnode = self._filenode
884 path = self._path
874 path = self._path
885 for a in iteranc:
875 for a in iteranc:
886 ac = cl.read(a) # get changeset data (we avoid object creation)
876 ac = cl.read(a) # get changeset data (we avoid object creation)
887 if path in ac[3]: # checking the 'files' field.
877 if path in ac[3]: # checking the 'files' field.
888 # The file has been touched, check if the content is
878 # The file has been touched, check if the content is
889 # similar to the one we search for.
879 # similar to the one we search for.
890 if fnode == mfl[ac[0]].readfast().get(path):
880 if fnode == mfl[ac[0]].readfast().get(path):
891 return a
881 return a
892 # In theory, we should never get out of that loop without a result.
882 # In theory, we should never get out of that loop without a result.
893 # But if manifest uses a buggy file revision (not children of the
883 # But if manifest uses a buggy file revision (not children of the
894 # one it replaces) we could. Such a buggy situation will likely
884 # one it replaces) we could. Such a buggy situation will likely
895 # result is crash somewhere else at to some point.
885 # result is crash somewhere else at to some point.
896 return lkr
886 return lkr
897
887
898 def introrev(self):
888 def introrev(self):
899 """return the rev of the changeset which introduced this file revision
889 """return the rev of the changeset which introduced this file revision
900
890
901 This method is different from linkrev because it take into account the
891 This method is different from linkrev because it take into account the
902 changeset the filectx was created from. It ensures the returned
892 changeset the filectx was created from. It ensures the returned
903 revision is one of its ancestors. This prevents bugs from
893 revision is one of its ancestors. This prevents bugs from
904 'linkrev-shadowing' when a file revision is used by multiple
894 'linkrev-shadowing' when a file revision is used by multiple
905 changesets.
895 changesets.
906 """
896 """
907 lkr = self.linkrev()
897 lkr = self.linkrev()
908 attrs = vars(self)
898 attrs = vars(self)
909 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
899 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
910 if noctx or self.rev() == lkr:
900 if noctx or self.rev() == lkr:
911 return self.linkrev()
901 return self.linkrev()
912 return self._adjustlinkrev(self.rev(), inclusive=True)
902 return self._adjustlinkrev(self.rev(), inclusive=True)
913
903
914 def introfilectx(self):
904 def introfilectx(self):
915 """Return filectx having identical contents, but pointing to the
905 """Return filectx having identical contents, but pointing to the
916 changeset revision where this filectx was introduced"""
906 changeset revision where this filectx was introduced"""
917 introrev = self.introrev()
907 introrev = self.introrev()
918 if self.rev() == introrev:
908 if self.rev() == introrev:
919 return self
909 return self
920 return self.filectx(self.filenode(), changeid=introrev)
910 return self.filectx(self.filenode(), changeid=introrev)
921
911
922 def _parentfilectx(self, path, fileid, filelog):
912 def _parentfilectx(self, path, fileid, filelog):
923 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
913 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
924 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
914 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
925 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
915 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
926 # If self is associated with a changeset (probably explicitly
916 # If self is associated with a changeset (probably explicitly
927 # fed), ensure the created filectx is associated with a
917 # fed), ensure the created filectx is associated with a
928 # changeset that is an ancestor of self.changectx.
918 # changeset that is an ancestor of self.changectx.
929 # This lets us later use _adjustlinkrev to get a correct link.
919 # This lets us later use _adjustlinkrev to get a correct link.
930 fctx._descendantrev = self.rev()
920 fctx._descendantrev = self.rev()
931 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
932 elif r'_descendantrev' in vars(self):
922 elif r'_descendantrev' in vars(self):
933 # Otherwise propagate _descendantrev if we have one associated.
923 # Otherwise propagate _descendantrev if we have one associated.
934 fctx._descendantrev = self._descendantrev
924 fctx._descendantrev = self._descendantrev
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
925 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
936 return fctx
926 return fctx
937
927
938 def parents(self):
928 def parents(self):
939 _path = self._path
929 _path = self._path
940 fl = self._filelog
930 fl = self._filelog
941 parents = self._filelog.parents(self._filenode)
931 parents = self._filelog.parents(self._filenode)
942 pl = [(_path, node, fl) for node in parents if node != nullid]
932 pl = [(_path, node, fl) for node in parents if node != nullid]
943
933
944 r = fl.renamed(self._filenode)
934 r = fl.renamed(self._filenode)
945 if r:
935 if r:
946 # - In the simple rename case, both parent are nullid, pl is empty.
936 # - In the simple rename case, both parent are nullid, pl is empty.
947 # - In case of merge, only one of the parent is null id and should
937 # - In case of merge, only one of the parent is null id and should
948 # be replaced with the rename information. This parent is -always-
938 # be replaced with the rename information. This parent is -always-
949 # the first one.
939 # the first one.
950 #
940 #
951 # As null id have always been filtered out in the previous list
941 # As null id have always been filtered out in the previous list
952 # comprehension, inserting to 0 will always result in "replacing
942 # comprehension, inserting to 0 will always result in "replacing
953 # first nullid parent with rename information.
943 # first nullid parent with rename information.
954 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
944 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
955
945
956 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
946 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
957
947
958 def p1(self):
948 def p1(self):
959 return self.parents()[0]
949 return self.parents()[0]
960
950
961 def p2(self):
951 def p2(self):
962 p = self.parents()
952 p = self.parents()
963 if len(p) == 2:
953 if len(p) == 2:
964 return p[1]
954 return p[1]
965 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
955 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
966
956
967 def annotate(self, follow=False, skiprevs=None, diffopts=None):
957 def annotate(self, follow=False, skiprevs=None, diffopts=None):
968 """Returns a list of annotateline objects for each line in the file
958 """Returns a list of annotateline objects for each line in the file
969
959
970 - line.fctx is the filectx of the node where that line was last changed
960 - line.fctx is the filectx of the node where that line was last changed
971 - line.lineno is the line number at the first appearance in the managed
961 - line.lineno is the line number at the first appearance in the managed
972 file
962 file
973 - line.text is the data on that line (including newline character)
963 - line.text is the data on that line (including newline character)
974 """
964 """
975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
965 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
976
966
977 def parents(f):
967 def parents(f):
978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
968 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
969 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
970 # from the topmost introrev (= srcrev) down to p.linkrev() if it
981 # isn't an ancestor of the srcrev.
971 # isn't an ancestor of the srcrev.
982 f._changeid
972 f._changeid
983 pl = f.parents()
973 pl = f.parents()
984
974
985 # Don't return renamed parents if we aren't following.
975 # Don't return renamed parents if we aren't following.
986 if not follow:
976 if not follow:
987 pl = [p for p in pl if p.path() == f.path()]
977 pl = [p for p in pl if p.path() == f.path()]
988
978
989 # renamed filectx won't have a filelog yet, so set it
979 # renamed filectx won't have a filelog yet, so set it
990 # from the cache to save time
980 # from the cache to save time
991 for p in pl:
981 for p in pl:
992 if not r'_filelog' in p.__dict__:
982 if not r'_filelog' in p.__dict__:
993 p._filelog = getlog(p.path())
983 p._filelog = getlog(p.path())
994
984
995 return pl
985 return pl
996
986
997 # use linkrev to find the first changeset where self appeared
987 # use linkrev to find the first changeset where self appeared
998 base = self.introfilectx()
988 base = self.introfilectx()
999 if getattr(base, '_ancestrycontext', None) is None:
989 if getattr(base, '_ancestrycontext', None) is None:
1000 cl = self._repo.changelog
990 cl = self._repo.changelog
1001 if base.rev() is None:
991 if base.rev() is None:
1002 # wctx is not inclusive, but works because _ancestrycontext
992 # wctx is not inclusive, but works because _ancestrycontext
1003 # is used to test filelog revisions
993 # is used to test filelog revisions
1004 ac = cl.ancestors([p.rev() for p in base.parents()],
994 ac = cl.ancestors([p.rev() for p in base.parents()],
1005 inclusive=True)
995 inclusive=True)
1006 else:
996 else:
1007 ac = cl.ancestors([base.rev()], inclusive=True)
997 ac = cl.ancestors([base.rev()], inclusive=True)
1008 base._ancestrycontext = ac
998 base._ancestrycontext = ac
1009
999
1010 return dagop.annotate(base, parents, skiprevs=skiprevs,
1000 return dagop.annotate(base, parents, skiprevs=skiprevs,
1011 diffopts=diffopts)
1001 diffopts=diffopts)
1012
1002
1013 def ancestors(self, followfirst=False):
1003 def ancestors(self, followfirst=False):
1014 visit = {}
1004 visit = {}
1015 c = self
1005 c = self
1016 if followfirst:
1006 if followfirst:
1017 cut = 1
1007 cut = 1
1018 else:
1008 else:
1019 cut = None
1009 cut = None
1020
1010
1021 while True:
1011 while True:
1022 for parent in c.parents()[:cut]:
1012 for parent in c.parents()[:cut]:
1023 visit[(parent.linkrev(), parent.filenode())] = parent
1013 visit[(parent.linkrev(), parent.filenode())] = parent
1024 if not visit:
1014 if not visit:
1025 break
1015 break
1026 c = visit.pop(max(visit))
1016 c = visit.pop(max(visit))
1027 yield c
1017 yield c
1028
1018
1029 def decodeddata(self):
1019 def decodeddata(self):
1030 """Returns `data()` after running repository decoding filters.
1020 """Returns `data()` after running repository decoding filters.
1031
1021
1032 This is often equivalent to how the data would be expressed on disk.
1022 This is often equivalent to how the data would be expressed on disk.
1033 """
1023 """
1034 return self._repo.wwritedata(self.path(), self.data())
1024 return self._repo.wwritedata(self.path(), self.data())
1035
1025
1036 class filectx(basefilectx):
1026 class filectx(basefilectx):
1037 """A filecontext object makes access to data related to a particular
1027 """A filecontext object makes access to data related to a particular
1038 filerevision convenient."""
1028 filerevision convenient."""
1039 def __init__(self, repo, path, changeid=None, fileid=None,
1029 def __init__(self, repo, path, changeid=None, fileid=None,
1040 filelog=None, changectx=None):
1030 filelog=None, changectx=None):
1041 """changeid can be a changeset revision, node, or tag.
1031 """changeid can be a changeset revision, node, or tag.
1042 fileid can be a file revision or node."""
1032 fileid can be a file revision or node."""
1043 self._repo = repo
1033 self._repo = repo
1044 self._path = path
1034 self._path = path
1045
1035
1046 assert (changeid is not None
1036 assert (changeid is not None
1047 or fileid is not None
1037 or fileid is not None
1048 or changectx is not None), \
1038 or changectx is not None), \
1049 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1039 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1050 % (changeid, fileid, changectx))
1040 % (changeid, fileid, changectx))
1051
1041
1052 if filelog is not None:
1042 if filelog is not None:
1053 self._filelog = filelog
1043 self._filelog = filelog
1054
1044
1055 if changeid is not None:
1045 if changeid is not None:
1056 self._changeid = changeid
1046 self._changeid = changeid
1057 if changectx is not None:
1047 if changectx is not None:
1058 self._changectx = changectx
1048 self._changectx = changectx
1059 if fileid is not None:
1049 if fileid is not None:
1060 self._fileid = fileid
1050 self._fileid = fileid
1061
1051
1062 @propertycache
1052 @propertycache
1063 def _changectx(self):
1053 def _changectx(self):
1064 try:
1054 try:
1065 return changectx(self._repo, self._changeid)
1055 return changectx(self._repo, self._changeid)
1066 except error.FilteredRepoLookupError:
1056 except error.FilteredRepoLookupError:
1067 # Linkrev may point to any revision in the repository. When the
1057 # Linkrev may point to any revision in the repository. When the
1068 # repository is filtered this may lead to `filectx` trying to build
1058 # repository is filtered this may lead to `filectx` trying to build
1069 # `changectx` for filtered revision. In such case we fallback to
1059 # `changectx` for filtered revision. In such case we fallback to
1070 # creating `changectx` on the unfiltered version of the reposition.
1060 # creating `changectx` on the unfiltered version of the reposition.
1071 # This fallback should not be an issue because `changectx` from
1061 # This fallback should not be an issue because `changectx` from
1072 # `filectx` are not used in complex operations that care about
1062 # `filectx` are not used in complex operations that care about
1073 # filtering.
1063 # filtering.
1074 #
1064 #
1075 # This fallback is a cheap and dirty fix that prevent several
1065 # This fallback is a cheap and dirty fix that prevent several
1076 # crashes. It does not ensure the behavior is correct. However the
1066 # crashes. It does not ensure the behavior is correct. However the
1077 # behavior was not correct before filtering either and "incorrect
1067 # behavior was not correct before filtering either and "incorrect
1078 # behavior" is seen as better as "crash"
1068 # behavior" is seen as better as "crash"
1079 #
1069 #
1080 # Linkrevs have several serious troubles with filtering that are
1070 # Linkrevs have several serious troubles with filtering that are
1081 # complicated to solve. Proper handling of the issue here should be
1071 # complicated to solve. Proper handling of the issue here should be
1082 # considered when solving linkrev issue are on the table.
1072 # considered when solving linkrev issue are on the table.
1083 return changectx(self._repo.unfiltered(), self._changeid)
1073 return changectx(self._repo.unfiltered(), self._changeid)
1084
1074
1085 def filectx(self, fileid, changeid=None):
1075 def filectx(self, fileid, changeid=None):
1086 '''opens an arbitrary revision of the file without
1076 '''opens an arbitrary revision of the file without
1087 opening a new filelog'''
1077 opening a new filelog'''
1088 return filectx(self._repo, self._path, fileid=fileid,
1078 return filectx(self._repo, self._path, fileid=fileid,
1089 filelog=self._filelog, changeid=changeid)
1079 filelog=self._filelog, changeid=changeid)
1090
1080
1091 def rawdata(self):
1081 def rawdata(self):
1092 return self._filelog.revision(self._filenode, raw=True)
1082 return self._filelog.revision(self._filenode, raw=True)
1093
1083
1094 def rawflags(self):
1084 def rawflags(self):
1095 """low-level revlog flags"""
1085 """low-level revlog flags"""
1096 return self._filelog.flags(self._filerev)
1086 return self._filelog.flags(self._filerev)
1097
1087
1098 def data(self):
1088 def data(self):
1099 try:
1089 try:
1100 return self._filelog.read(self._filenode)
1090 return self._filelog.read(self._filenode)
1101 except error.CensoredNodeError:
1091 except error.CensoredNodeError:
1102 if self._repo.ui.config("censor", "policy") == "ignore":
1092 if self._repo.ui.config("censor", "policy") == "ignore":
1103 return ""
1093 return ""
1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1094 raise error.Abort(_("censored node: %s") % short(self._filenode),
1105 hint=_("set censor.policy to ignore errors"))
1095 hint=_("set censor.policy to ignore errors"))
1106
1096
1107 def size(self):
1097 def size(self):
1108 return self._filelog.size(self._filerev)
1098 return self._filelog.size(self._filerev)
1109
1099
1110 @propertycache
1100 @propertycache
1111 def _copied(self):
1101 def _copied(self):
1112 """check if file was actually renamed in this changeset revision
1102 """check if file was actually renamed in this changeset revision
1113
1103
1114 If rename logged in file revision, we report copy for changeset only
1104 If rename logged in file revision, we report copy for changeset only
1115 if file revisions linkrev points back to the changeset in question
1105 if file revisions linkrev points back to the changeset in question
1116 or both changeset parents contain different file revisions.
1106 or both changeset parents contain different file revisions.
1117 """
1107 """
1118
1108
1119 renamed = self._filelog.renamed(self._filenode)
1109 renamed = self._filelog.renamed(self._filenode)
1120 if not renamed:
1110 if not renamed:
1121 return renamed
1111 return renamed
1122
1112
1123 if self.rev() == self.linkrev():
1113 if self.rev() == self.linkrev():
1124 return renamed
1114 return renamed
1125
1115
1126 name = self.path()
1116 name = self.path()
1127 fnode = self._filenode
1117 fnode = self._filenode
1128 for p in self._changectx.parents():
1118 for p in self._changectx.parents():
1129 try:
1119 try:
1130 if fnode == p.filenode(name):
1120 if fnode == p.filenode(name):
1131 return None
1121 return None
1132 except error.LookupError:
1122 except error.LookupError:
1133 pass
1123 pass
1134 return renamed
1124 return renamed
1135
1125
1136 def children(self):
1126 def children(self):
1137 # hard for renames
1127 # hard for renames
1138 c = self._filelog.children(self._filenode)
1128 c = self._filelog.children(self._filenode)
1139 return [filectx(self._repo, self._path, fileid=x,
1129 return [filectx(self._repo, self._path, fileid=x,
1140 filelog=self._filelog) for x in c]
1130 filelog=self._filelog) for x in c]
1141
1131
1142 class committablectx(basectx):
1132 class committablectx(basectx):
1143 """A committablectx object provides common functionality for a context that
1133 """A committablectx object provides common functionality for a context that
1144 wants the ability to commit, e.g. workingctx or memctx."""
1134 wants the ability to commit, e.g. workingctx or memctx."""
1145 def __init__(self, repo, text="", user=None, date=None, extra=None,
1135 def __init__(self, repo, text="", user=None, date=None, extra=None,
1146 changes=None):
1136 changes=None):
1147 self._repo = repo
1137 self._repo = repo
1148 self._rev = None
1138 self._rev = None
1149 self._node = None
1139 self._node = None
1150 self._text = text
1140 self._text = text
1151 if date:
1141 if date:
1152 self._date = dateutil.parsedate(date)
1142 self._date = dateutil.parsedate(date)
1153 if user:
1143 if user:
1154 self._user = user
1144 self._user = user
1155 if changes:
1145 if changes:
1156 self._status = changes
1146 self._status = changes
1157
1147
1158 self._extra = {}
1148 self._extra = {}
1159 if extra:
1149 if extra:
1160 self._extra = extra.copy()
1150 self._extra = extra.copy()
1161 if 'branch' not in self._extra:
1151 if 'branch' not in self._extra:
1162 try:
1152 try:
1163 branch = encoding.fromlocal(self._repo.dirstate.branch())
1153 branch = encoding.fromlocal(self._repo.dirstate.branch())
1164 except UnicodeDecodeError:
1154 except UnicodeDecodeError:
1165 raise error.Abort(_('branch name not in UTF-8!'))
1155 raise error.Abort(_('branch name not in UTF-8!'))
1166 self._extra['branch'] = branch
1156 self._extra['branch'] = branch
1167 if self._extra['branch'] == '':
1157 if self._extra['branch'] == '':
1168 self._extra['branch'] = 'default'
1158 self._extra['branch'] = 'default'
1169
1159
1170 def __bytes__(self):
1160 def __bytes__(self):
1171 return bytes(self._parents[0]) + "+"
1161 return bytes(self._parents[0]) + "+"
1172
1162
1173 __str__ = encoding.strmethod(__bytes__)
1163 __str__ = encoding.strmethod(__bytes__)
1174
1164
1175 def __nonzero__(self):
1165 def __nonzero__(self):
1176 return True
1166 return True
1177
1167
1178 __bool__ = __nonzero__
1168 __bool__ = __nonzero__
1179
1169
1180 def _buildflagfunc(self):
1170 def _buildflagfunc(self):
1181 # Create a fallback function for getting file flags when the
1171 # Create a fallback function for getting file flags when the
1182 # filesystem doesn't support them
1172 # filesystem doesn't support them
1183
1173
1184 copiesget = self._repo.dirstate.copies().get
1174 copiesget = self._repo.dirstate.copies().get
1185 parents = self.parents()
1175 parents = self.parents()
1186 if len(parents) < 2:
1176 if len(parents) < 2:
1187 # when we have one parent, it's easy: copy from parent
1177 # when we have one parent, it's easy: copy from parent
1188 man = parents[0].manifest()
1178 man = parents[0].manifest()
1189 def func(f):
1179 def func(f):
1190 f = copiesget(f, f)
1180 f = copiesget(f, f)
1191 return man.flags(f)
1181 return man.flags(f)
1192 else:
1182 else:
1193 # merges are tricky: we try to reconstruct the unstored
1183 # merges are tricky: we try to reconstruct the unstored
1194 # result from the merge (issue1802)
1184 # result from the merge (issue1802)
1195 p1, p2 = parents
1185 p1, p2 = parents
1196 pa = p1.ancestor(p2)
1186 pa = p1.ancestor(p2)
1197 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1187 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1198
1188
1199 def func(f):
1189 def func(f):
1200 f = copiesget(f, f) # may be wrong for merges with copies
1190 f = copiesget(f, f) # may be wrong for merges with copies
1201 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1191 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1202 if fl1 == fl2:
1192 if fl1 == fl2:
1203 return fl1
1193 return fl1
1204 if fl1 == fla:
1194 if fl1 == fla:
1205 return fl2
1195 return fl2
1206 if fl2 == fla:
1196 if fl2 == fla:
1207 return fl1
1197 return fl1
1208 return '' # punt for conflicts
1198 return '' # punt for conflicts
1209
1199
1210 return func
1200 return func
1211
1201
1212 @propertycache
1202 @propertycache
1213 def _flagfunc(self):
1203 def _flagfunc(self):
1214 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1204 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1215
1205
1216 @propertycache
1206 @propertycache
1217 def _status(self):
1207 def _status(self):
1218 return self._repo.status()
1208 return self._repo.status()
1219
1209
1220 @propertycache
1210 @propertycache
1221 def _user(self):
1211 def _user(self):
1222 return self._repo.ui.username()
1212 return self._repo.ui.username()
1223
1213
1224 @propertycache
1214 @propertycache
1225 def _date(self):
1215 def _date(self):
1226 ui = self._repo.ui
1216 ui = self._repo.ui
1227 date = ui.configdate('devel', 'default-date')
1217 date = ui.configdate('devel', 'default-date')
1228 if date is None:
1218 if date is None:
1229 date = dateutil.makedate()
1219 date = dateutil.makedate()
1230 return date
1220 return date
1231
1221
1232 def subrev(self, subpath):
1222 def subrev(self, subpath):
1233 return None
1223 return None
1234
1224
1235 def manifestnode(self):
1225 def manifestnode(self):
1236 return None
1226 return None
1237 def user(self):
1227 def user(self):
1238 return self._user or self._repo.ui.username()
1228 return self._user or self._repo.ui.username()
1239 def date(self):
1229 def date(self):
1240 return self._date
1230 return self._date
1241 def description(self):
1231 def description(self):
1242 return self._text
1232 return self._text
1243 def files(self):
1233 def files(self):
1244 return sorted(self._status.modified + self._status.added +
1234 return sorted(self._status.modified + self._status.added +
1245 self._status.removed)
1235 self._status.removed)
1246
1236
1247 def modified(self):
1237 def modified(self):
1248 return self._status.modified
1238 return self._status.modified
1249 def added(self):
1239 def added(self):
1250 return self._status.added
1240 return self._status.added
1251 def removed(self):
1241 def removed(self):
1252 return self._status.removed
1242 return self._status.removed
1253 def deleted(self):
1243 def deleted(self):
1254 return self._status.deleted
1244 return self._status.deleted
1255 def branch(self):
1245 def branch(self):
1256 return encoding.tolocal(self._extra['branch'])
1246 return encoding.tolocal(self._extra['branch'])
1257 def closesbranch(self):
1247 def closesbranch(self):
1258 return 'close' in self._extra
1248 return 'close' in self._extra
1259 def extra(self):
1249 def extra(self):
1260 return self._extra
1250 return self._extra
1261
1251
1262 def isinmemory(self):
1252 def isinmemory(self):
1263 return False
1253 return False
1264
1254
1265 def tags(self):
1255 def tags(self):
1266 return []
1256 return []
1267
1257
1268 def bookmarks(self):
1258 def bookmarks(self):
1269 b = []
1259 b = []
1270 for p in self.parents():
1260 for p in self.parents():
1271 b.extend(p.bookmarks())
1261 b.extend(p.bookmarks())
1272 return b
1262 return b
1273
1263
1274 def phase(self):
1264 def phase(self):
1275 phase = phases.draft # default phase to draft
1265 phase = phases.draft # default phase to draft
1276 for p in self.parents():
1266 for p in self.parents():
1277 phase = max(phase, p.phase())
1267 phase = max(phase, p.phase())
1278 return phase
1268 return phase
1279
1269
1280 def hidden(self):
1270 def hidden(self):
1281 return False
1271 return False
1282
1272
1283 def children(self):
1273 def children(self):
1284 return []
1274 return []
1285
1275
1286 def flags(self, path):
1276 def flags(self, path):
1287 if r'_manifest' in self.__dict__:
1277 if r'_manifest' in self.__dict__:
1288 try:
1278 try:
1289 return self._manifest.flags(path)
1279 return self._manifest.flags(path)
1290 except KeyError:
1280 except KeyError:
1291 return ''
1281 return ''
1292
1282
1293 try:
1283 try:
1294 return self._flagfunc(path)
1284 return self._flagfunc(path)
1295 except OSError:
1285 except OSError:
1296 return ''
1286 return ''
1297
1287
1298 def ancestor(self, c2):
1288 def ancestor(self, c2):
1299 """return the "best" ancestor context of self and c2"""
1289 """return the "best" ancestor context of self and c2"""
1300 return self._parents[0].ancestor(c2) # punt on two parents for now
1290 return self._parents[0].ancestor(c2) # punt on two parents for now
1301
1291
1302 def walk(self, match):
1292 def walk(self, match):
1303 '''Generates matching file names.'''
1293 '''Generates matching file names.'''
1304 return sorted(self._repo.dirstate.walk(match,
1294 return sorted(self._repo.dirstate.walk(match,
1305 subrepos=sorted(self.substate),
1295 subrepos=sorted(self.substate),
1306 unknown=True, ignored=False))
1296 unknown=True, ignored=False))
1307
1297
1308 def matches(self, match):
1298 def matches(self, match):
1309 return sorted(self._repo.dirstate.matches(match))
1299 return sorted(self._repo.dirstate.matches(match))
1310
1300
1311 def ancestors(self):
1301 def ancestors(self):
1312 for p in self._parents:
1302 for p in self._parents:
1313 yield p
1303 yield p
1314 for a in self._repo.changelog.ancestors(
1304 for a in self._repo.changelog.ancestors(
1315 [p.rev() for p in self._parents]):
1305 [p.rev() for p in self._parents]):
1316 yield changectx(self._repo, a)
1306 yield changectx(self._repo, a)
1317
1307
1318 def markcommitted(self, node):
1308 def markcommitted(self, node):
1319 """Perform post-commit cleanup necessary after committing this ctx
1309 """Perform post-commit cleanup necessary after committing this ctx
1320
1310
1321 Specifically, this updates backing stores this working context
1311 Specifically, this updates backing stores this working context
1322 wraps to reflect the fact that the changes reflected by this
1312 wraps to reflect the fact that the changes reflected by this
1323 workingctx have been committed. For example, it marks
1313 workingctx have been committed. For example, it marks
1324 modified and added files as normal in the dirstate.
1314 modified and added files as normal in the dirstate.
1325
1315
1326 """
1316 """
1327
1317
1328 with self._repo.dirstate.parentchange():
1318 with self._repo.dirstate.parentchange():
1329 for f in self.modified() + self.added():
1319 for f in self.modified() + self.added():
1330 self._repo.dirstate.normal(f)
1320 self._repo.dirstate.normal(f)
1331 for f in self.removed():
1321 for f in self.removed():
1332 self._repo.dirstate.drop(f)
1322 self._repo.dirstate.drop(f)
1333 self._repo.dirstate.setparents(node)
1323 self._repo.dirstate.setparents(node)
1334
1324
1335 # write changes out explicitly, because nesting wlock at
1325 # write changes out explicitly, because nesting wlock at
1336 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1326 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1337 # from immediately doing so for subsequent changing files
1327 # from immediately doing so for subsequent changing files
1338 self._repo.dirstate.write(self._repo.currenttransaction())
1328 self._repo.dirstate.write(self._repo.currenttransaction())
1339
1329
1340 def dirty(self, missing=False, merge=True, branch=True):
1330 def dirty(self, missing=False, merge=True, branch=True):
1341 return False
1331 return False
1342
1332
1343 class workingctx(committablectx):
1333 class workingctx(committablectx):
1344 """A workingctx object makes access to data related to
1334 """A workingctx object makes access to data related to
1345 the current working directory convenient.
1335 the current working directory convenient.
1346 date - any valid date string or (unixtime, offset), or None.
1336 date - any valid date string or (unixtime, offset), or None.
1347 user - username string, or None.
1337 user - username string, or None.
1348 extra - a dictionary of extra values, or None.
1338 extra - a dictionary of extra values, or None.
1349 changes - a list of file lists as returned by localrepo.status()
1339 changes - a list of file lists as returned by localrepo.status()
1350 or None to use the repository status.
1340 or None to use the repository status.
1351 """
1341 """
1352 def __init__(self, repo, text="", user=None, date=None, extra=None,
1342 def __init__(self, repo, text="", user=None, date=None, extra=None,
1353 changes=None):
1343 changes=None):
1354 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1344 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1355
1345
1356 def __iter__(self):
1346 def __iter__(self):
1357 d = self._repo.dirstate
1347 d = self._repo.dirstate
1358 for f in d:
1348 for f in d:
1359 if d[f] != 'r':
1349 if d[f] != 'r':
1360 yield f
1350 yield f
1361
1351
1362 def __contains__(self, key):
1352 def __contains__(self, key):
1363 return self._repo.dirstate[key] not in "?r"
1353 return self._repo.dirstate[key] not in "?r"
1364
1354
1365 def hex(self):
1355 def hex(self):
1366 return hex(wdirid)
1356 return hex(wdirid)
1367
1357
1368 @propertycache
1358 @propertycache
1369 def _parents(self):
1359 def _parents(self):
1370 p = self._repo.dirstate.parents()
1360 p = self._repo.dirstate.parents()
1371 if p[1] == nullid:
1361 if p[1] == nullid:
1372 p = p[:-1]
1362 p = p[:-1]
1373 return [changectx(self._repo, x) for x in p]
1363 return [changectx(self._repo, x) for x in p]
1374
1364
1375 def filectx(self, path, filelog=None):
1365 def filectx(self, path, filelog=None):
1376 """get a file context from the working directory"""
1366 """get a file context from the working directory"""
1377 return workingfilectx(self._repo, path, workingctx=self,
1367 return workingfilectx(self._repo, path, workingctx=self,
1378 filelog=filelog)
1368 filelog=filelog)
1379
1369
1380 def dirty(self, missing=False, merge=True, branch=True):
1370 def dirty(self, missing=False, merge=True, branch=True):
1381 "check whether a working directory is modified"
1371 "check whether a working directory is modified"
1382 # check subrepos first
1372 # check subrepos first
1383 for s in sorted(self.substate):
1373 for s in sorted(self.substate):
1384 if self.sub(s).dirty(missing=missing):
1374 if self.sub(s).dirty(missing=missing):
1385 return True
1375 return True
1386 # check current working dir
1376 # check current working dir
1387 return ((merge and self.p2()) or
1377 return ((merge and self.p2()) or
1388 (branch and self.branch() != self.p1().branch()) or
1378 (branch and self.branch() != self.p1().branch()) or
1389 self.modified() or self.added() or self.removed() or
1379 self.modified() or self.added() or self.removed() or
1390 (missing and self.deleted()))
1380 (missing and self.deleted()))
1391
1381
1392 def add(self, list, prefix=""):
1382 def add(self, list, prefix=""):
1393 with self._repo.wlock():
1383 with self._repo.wlock():
1394 ui, ds = self._repo.ui, self._repo.dirstate
1384 ui, ds = self._repo.ui, self._repo.dirstate
1395 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1385 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1396 rejected = []
1386 rejected = []
1397 lstat = self._repo.wvfs.lstat
1387 lstat = self._repo.wvfs.lstat
1398 for f in list:
1388 for f in list:
1399 # ds.pathto() returns an absolute file when this is invoked from
1389 # ds.pathto() returns an absolute file when this is invoked from
1400 # the keyword extension. That gets flagged as non-portable on
1390 # the keyword extension. That gets flagged as non-portable on
1401 # Windows, since it contains the drive letter and colon.
1391 # Windows, since it contains the drive letter and colon.
1402 scmutil.checkportable(ui, os.path.join(prefix, f))
1392 scmutil.checkportable(ui, os.path.join(prefix, f))
1403 try:
1393 try:
1404 st = lstat(f)
1394 st = lstat(f)
1405 except OSError:
1395 except OSError:
1406 ui.warn(_("%s does not exist!\n") % uipath(f))
1396 ui.warn(_("%s does not exist!\n") % uipath(f))
1407 rejected.append(f)
1397 rejected.append(f)
1408 continue
1398 continue
1409 if st.st_size > 10000000:
1399 if st.st_size > 10000000:
1410 ui.warn(_("%s: up to %d MB of RAM may be required "
1400 ui.warn(_("%s: up to %d MB of RAM may be required "
1411 "to manage this file\n"
1401 "to manage this file\n"
1412 "(use 'hg revert %s' to cancel the "
1402 "(use 'hg revert %s' to cancel the "
1413 "pending addition)\n")
1403 "pending addition)\n")
1414 % (f, 3 * st.st_size // 1000000, uipath(f)))
1404 % (f, 3 * st.st_size // 1000000, uipath(f)))
1415 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1405 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1416 ui.warn(_("%s not added: only files and symlinks "
1406 ui.warn(_("%s not added: only files and symlinks "
1417 "supported currently\n") % uipath(f))
1407 "supported currently\n") % uipath(f))
1418 rejected.append(f)
1408 rejected.append(f)
1419 elif ds[f] in 'amn':
1409 elif ds[f] in 'amn':
1420 ui.warn(_("%s already tracked!\n") % uipath(f))
1410 ui.warn(_("%s already tracked!\n") % uipath(f))
1421 elif ds[f] == 'r':
1411 elif ds[f] == 'r':
1422 ds.normallookup(f)
1412 ds.normallookup(f)
1423 else:
1413 else:
1424 ds.add(f)
1414 ds.add(f)
1425 return rejected
1415 return rejected
1426
1416
1427 def forget(self, files, prefix=""):
1417 def forget(self, files, prefix=""):
1428 with self._repo.wlock():
1418 with self._repo.wlock():
1429 ds = self._repo.dirstate
1419 ds = self._repo.dirstate
1430 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1420 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1431 rejected = []
1421 rejected = []
1432 for f in files:
1422 for f in files:
1433 if f not in self._repo.dirstate:
1423 if f not in self._repo.dirstate:
1434 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1424 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1435 rejected.append(f)
1425 rejected.append(f)
1436 elif self._repo.dirstate[f] != 'a':
1426 elif self._repo.dirstate[f] != 'a':
1437 self._repo.dirstate.remove(f)
1427 self._repo.dirstate.remove(f)
1438 else:
1428 else:
1439 self._repo.dirstate.drop(f)
1429 self._repo.dirstate.drop(f)
1440 return rejected
1430 return rejected
1441
1431
1442 def undelete(self, list):
1432 def undelete(self, list):
1443 pctxs = self.parents()
1433 pctxs = self.parents()
1444 with self._repo.wlock():
1434 with self._repo.wlock():
1445 ds = self._repo.dirstate
1435 ds = self._repo.dirstate
1446 for f in list:
1436 for f in list:
1447 if self._repo.dirstate[f] != 'r':
1437 if self._repo.dirstate[f] != 'r':
1448 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1438 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1449 else:
1439 else:
1450 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1440 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1451 t = fctx.data()
1441 t = fctx.data()
1452 self._repo.wwrite(f, t, fctx.flags())
1442 self._repo.wwrite(f, t, fctx.flags())
1453 self._repo.dirstate.normal(f)
1443 self._repo.dirstate.normal(f)
1454
1444
1455 def copy(self, source, dest):
1445 def copy(self, source, dest):
1456 try:
1446 try:
1457 st = self._repo.wvfs.lstat(dest)
1447 st = self._repo.wvfs.lstat(dest)
1458 except OSError as err:
1448 except OSError as err:
1459 if err.errno != errno.ENOENT:
1449 if err.errno != errno.ENOENT:
1460 raise
1450 raise
1461 self._repo.ui.warn(_("%s does not exist!\n")
1451 self._repo.ui.warn(_("%s does not exist!\n")
1462 % self._repo.dirstate.pathto(dest))
1452 % self._repo.dirstate.pathto(dest))
1463 return
1453 return
1464 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1454 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1465 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1455 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1466 "symbolic link\n")
1456 "symbolic link\n")
1467 % self._repo.dirstate.pathto(dest))
1457 % self._repo.dirstate.pathto(dest))
1468 else:
1458 else:
1469 with self._repo.wlock():
1459 with self._repo.wlock():
1470 if self._repo.dirstate[dest] in '?':
1460 if self._repo.dirstate[dest] in '?':
1471 self._repo.dirstate.add(dest)
1461 self._repo.dirstate.add(dest)
1472 elif self._repo.dirstate[dest] in 'r':
1462 elif self._repo.dirstate[dest] in 'r':
1473 self._repo.dirstate.normallookup(dest)
1463 self._repo.dirstate.normallookup(dest)
1474 self._repo.dirstate.copy(source, dest)
1464 self._repo.dirstate.copy(source, dest)
1475
1465
1476 def match(self, pats=None, include=None, exclude=None, default='glob',
1466 def match(self, pats=None, include=None, exclude=None, default='glob',
1477 listsubrepos=False, badfn=None):
1467 listsubrepos=False, badfn=None):
1478 r = self._repo
1468 r = self._repo
1479
1469
1480 # Only a case insensitive filesystem needs magic to translate user input
1470 # Only a case insensitive filesystem needs magic to translate user input
1481 # to actual case in the filesystem.
1471 # to actual case in the filesystem.
1482 icasefs = not util.fscasesensitive(r.root)
1472 icasefs = not util.fscasesensitive(r.root)
1483 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1473 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1484 default, auditor=r.auditor, ctx=self,
1474 default, auditor=r.auditor, ctx=self,
1485 listsubrepos=listsubrepos, badfn=badfn,
1475 listsubrepos=listsubrepos, badfn=badfn,
1486 icasefs=icasefs)
1476 icasefs=icasefs)
1487
1477
1488 def _filtersuspectsymlink(self, files):
1478 def _filtersuspectsymlink(self, files):
1489 if not files or self._repo.dirstate._checklink:
1479 if not files or self._repo.dirstate._checklink:
1490 return files
1480 return files
1491
1481
1492 # Symlink placeholders may get non-symlink-like contents
1482 # Symlink placeholders may get non-symlink-like contents
1493 # via user error or dereferencing by NFS or Samba servers,
1483 # via user error or dereferencing by NFS or Samba servers,
1494 # so we filter out any placeholders that don't look like a
1484 # so we filter out any placeholders that don't look like a
1495 # symlink
1485 # symlink
1496 sane = []
1486 sane = []
1497 for f in files:
1487 for f in files:
1498 if self.flags(f) == 'l':
1488 if self.flags(f) == 'l':
1499 d = self[f].data()
1489 d = self[f].data()
1500 if (d == '' or len(d) >= 1024 or '\n' in d
1490 if (d == '' or len(d) >= 1024 or '\n' in d
1501 or stringutil.binary(d)):
1491 or stringutil.binary(d)):
1502 self._repo.ui.debug('ignoring suspect symlink placeholder'
1492 self._repo.ui.debug('ignoring suspect symlink placeholder'
1503 ' "%s"\n' % f)
1493 ' "%s"\n' % f)
1504 continue
1494 continue
1505 sane.append(f)
1495 sane.append(f)
1506 return sane
1496 return sane
1507
1497
1508 def _checklookup(self, files):
1498 def _checklookup(self, files):
1509 # check for any possibly clean files
1499 # check for any possibly clean files
1510 if not files:
1500 if not files:
1511 return [], [], []
1501 return [], [], []
1512
1502
1513 modified = []
1503 modified = []
1514 deleted = []
1504 deleted = []
1515 fixup = []
1505 fixup = []
1516 pctx = self._parents[0]
1506 pctx = self._parents[0]
1517 # do a full compare of any files that might have changed
1507 # do a full compare of any files that might have changed
1518 for f in sorted(files):
1508 for f in sorted(files):
1519 try:
1509 try:
1520 # This will return True for a file that got replaced by a
1510 # This will return True for a file that got replaced by a
1521 # directory in the interim, but fixing that is pretty hard.
1511 # directory in the interim, but fixing that is pretty hard.
1522 if (f not in pctx or self.flags(f) != pctx.flags(f)
1512 if (f not in pctx or self.flags(f) != pctx.flags(f)
1523 or pctx[f].cmp(self[f])):
1513 or pctx[f].cmp(self[f])):
1524 modified.append(f)
1514 modified.append(f)
1525 else:
1515 else:
1526 fixup.append(f)
1516 fixup.append(f)
1527 except (IOError, OSError):
1517 except (IOError, OSError):
1528 # A file become inaccessible in between? Mark it as deleted,
1518 # A file become inaccessible in between? Mark it as deleted,
1529 # matching dirstate behavior (issue5584).
1519 # matching dirstate behavior (issue5584).
1530 # The dirstate has more complex behavior around whether a
1520 # The dirstate has more complex behavior around whether a
1531 # missing file matches a directory, etc, but we don't need to
1521 # missing file matches a directory, etc, but we don't need to
1532 # bother with that: if f has made it to this point, we're sure
1522 # bother with that: if f has made it to this point, we're sure
1533 # it's in the dirstate.
1523 # it's in the dirstate.
1534 deleted.append(f)
1524 deleted.append(f)
1535
1525
1536 return modified, deleted, fixup
1526 return modified, deleted, fixup
1537
1527
1538 def _poststatusfixup(self, status, fixup):
1528 def _poststatusfixup(self, status, fixup):
1539 """update dirstate for files that are actually clean"""
1529 """update dirstate for files that are actually clean"""
1540 poststatus = self._repo.postdsstatus()
1530 poststatus = self._repo.postdsstatus()
1541 if fixup or poststatus:
1531 if fixup or poststatus:
1542 try:
1532 try:
1543 oldid = self._repo.dirstate.identity()
1533 oldid = self._repo.dirstate.identity()
1544
1534
1545 # updating the dirstate is optional
1535 # updating the dirstate is optional
1546 # so we don't wait on the lock
1536 # so we don't wait on the lock
1547 # wlock can invalidate the dirstate, so cache normal _after_
1537 # wlock can invalidate the dirstate, so cache normal _after_
1548 # taking the lock
1538 # taking the lock
1549 with self._repo.wlock(False):
1539 with self._repo.wlock(False):
1550 if self._repo.dirstate.identity() == oldid:
1540 if self._repo.dirstate.identity() == oldid:
1551 if fixup:
1541 if fixup:
1552 normal = self._repo.dirstate.normal
1542 normal = self._repo.dirstate.normal
1553 for f in fixup:
1543 for f in fixup:
1554 normal(f)
1544 normal(f)
1555 # write changes out explicitly, because nesting
1545 # write changes out explicitly, because nesting
1556 # wlock at runtime may prevent 'wlock.release()'
1546 # wlock at runtime may prevent 'wlock.release()'
1557 # after this block from doing so for subsequent
1547 # after this block from doing so for subsequent
1558 # changing files
1548 # changing files
1559 tr = self._repo.currenttransaction()
1549 tr = self._repo.currenttransaction()
1560 self._repo.dirstate.write(tr)
1550 self._repo.dirstate.write(tr)
1561
1551
1562 if poststatus:
1552 if poststatus:
1563 for ps in poststatus:
1553 for ps in poststatus:
1564 ps(self, status)
1554 ps(self, status)
1565 else:
1555 else:
1566 # in this case, writing changes out breaks
1556 # in this case, writing changes out breaks
1567 # consistency, because .hg/dirstate was
1557 # consistency, because .hg/dirstate was
1568 # already changed simultaneously after last
1558 # already changed simultaneously after last
1569 # caching (see also issue5584 for detail)
1559 # caching (see also issue5584 for detail)
1570 self._repo.ui.debug('skip updating dirstate: '
1560 self._repo.ui.debug('skip updating dirstate: '
1571 'identity mismatch\n')
1561 'identity mismatch\n')
1572 except error.LockError:
1562 except error.LockError:
1573 pass
1563 pass
1574 finally:
1564 finally:
1575 # Even if the wlock couldn't be grabbed, clear out the list.
1565 # Even if the wlock couldn't be grabbed, clear out the list.
1576 self._repo.clearpostdsstatus()
1566 self._repo.clearpostdsstatus()
1577
1567
1578 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1568 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1579 '''Gets the status from the dirstate -- internal use only.'''
1569 '''Gets the status from the dirstate -- internal use only.'''
1580 subrepos = []
1570 subrepos = []
1581 if '.hgsub' in self:
1571 if '.hgsub' in self:
1582 subrepos = sorted(self.substate)
1572 subrepos = sorted(self.substate)
1583 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1573 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1584 clean=clean, unknown=unknown)
1574 clean=clean, unknown=unknown)
1585
1575
1586 # check for any possibly clean files
1576 # check for any possibly clean files
1587 fixup = []
1577 fixup = []
1588 if cmp:
1578 if cmp:
1589 modified2, deleted2, fixup = self._checklookup(cmp)
1579 modified2, deleted2, fixup = self._checklookup(cmp)
1590 s.modified.extend(modified2)
1580 s.modified.extend(modified2)
1591 s.deleted.extend(deleted2)
1581 s.deleted.extend(deleted2)
1592
1582
1593 if fixup and clean:
1583 if fixup and clean:
1594 s.clean.extend(fixup)
1584 s.clean.extend(fixup)
1595
1585
1596 self._poststatusfixup(s, fixup)
1586 self._poststatusfixup(s, fixup)
1597
1587
1598 if match.always():
1588 if match.always():
1599 # cache for performance
1589 # cache for performance
1600 if s.unknown or s.ignored or s.clean:
1590 if s.unknown or s.ignored or s.clean:
1601 # "_status" is cached with list*=False in the normal route
1591 # "_status" is cached with list*=False in the normal route
1602 self._status = scmutil.status(s.modified, s.added, s.removed,
1592 self._status = scmutil.status(s.modified, s.added, s.removed,
1603 s.deleted, [], [], [])
1593 s.deleted, [], [], [])
1604 else:
1594 else:
1605 self._status = s
1595 self._status = s
1606
1596
1607 return s
1597 return s
1608
1598
1609 @propertycache
1599 @propertycache
1610 def _manifest(self):
1600 def _manifest(self):
1611 """generate a manifest corresponding to the values in self._status
1601 """generate a manifest corresponding to the values in self._status
1612
1602
1613 This reuse the file nodeid from parent, but we use special node
1603 This reuse the file nodeid from parent, but we use special node
1614 identifiers for added and modified files. This is used by manifests
1604 identifiers for added and modified files. This is used by manifests
1615 merge to see that files are different and by update logic to avoid
1605 merge to see that files are different and by update logic to avoid
1616 deleting newly added files.
1606 deleting newly added files.
1617 """
1607 """
1618 return self._buildstatusmanifest(self._status)
1608 return self._buildstatusmanifest(self._status)
1619
1609
1620 def _buildstatusmanifest(self, status):
1610 def _buildstatusmanifest(self, status):
1621 """Builds a manifest that includes the given status results."""
1611 """Builds a manifest that includes the given status results."""
1622 parents = self.parents()
1612 parents = self.parents()
1623
1613
1624 man = parents[0].manifest().copy()
1614 man = parents[0].manifest().copy()
1625
1615
1626 ff = self._flagfunc
1616 ff = self._flagfunc
1627 for i, l in ((addednodeid, status.added),
1617 for i, l in ((addednodeid, status.added),
1628 (modifiednodeid, status.modified)):
1618 (modifiednodeid, status.modified)):
1629 for f in l:
1619 for f in l:
1630 man[f] = i
1620 man[f] = i
1631 try:
1621 try:
1632 man.setflag(f, ff(f))
1622 man.setflag(f, ff(f))
1633 except OSError:
1623 except OSError:
1634 pass
1624 pass
1635
1625
1636 for f in status.deleted + status.removed:
1626 for f in status.deleted + status.removed:
1637 if f in man:
1627 if f in man:
1638 del man[f]
1628 del man[f]
1639
1629
1640 return man
1630 return man
1641
1631
1642 def _buildstatus(self, other, s, match, listignored, listclean,
1632 def _buildstatus(self, other, s, match, listignored, listclean,
1643 listunknown):
1633 listunknown):
1644 """build a status with respect to another context
1634 """build a status with respect to another context
1645
1635
1646 This includes logic for maintaining the fast path of status when
1636 This includes logic for maintaining the fast path of status when
1647 comparing the working directory against its parent, which is to skip
1637 comparing the working directory against its parent, which is to skip
1648 building a new manifest if self (working directory) is not comparing
1638 building a new manifest if self (working directory) is not comparing
1649 against its parent (repo['.']).
1639 against its parent (repo['.']).
1650 """
1640 """
1651 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1641 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1652 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1642 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1653 # might have accidentally ended up with the entire contents of the file
1643 # might have accidentally ended up with the entire contents of the file
1654 # they are supposed to be linking to.
1644 # they are supposed to be linking to.
1655 s.modified[:] = self._filtersuspectsymlink(s.modified)
1645 s.modified[:] = self._filtersuspectsymlink(s.modified)
1656 if other != self._repo['.']:
1646 if other != self._repo['.']:
1657 s = super(workingctx, self)._buildstatus(other, s, match,
1647 s = super(workingctx, self)._buildstatus(other, s, match,
1658 listignored, listclean,
1648 listignored, listclean,
1659 listunknown)
1649 listunknown)
1660 return s
1650 return s
1661
1651
1662 def _matchstatus(self, other, match):
1652 def _matchstatus(self, other, match):
1663 """override the match method with a filter for directory patterns
1653 """override the match method with a filter for directory patterns
1664
1654
1665 We use inheritance to customize the match.bad method only in cases of
1655 We use inheritance to customize the match.bad method only in cases of
1666 workingctx since it belongs only to the working directory when
1656 workingctx since it belongs only to the working directory when
1667 comparing against the parent changeset.
1657 comparing against the parent changeset.
1668
1658
1669 If we aren't comparing against the working directory's parent, then we
1659 If we aren't comparing against the working directory's parent, then we
1670 just use the default match object sent to us.
1660 just use the default match object sent to us.
1671 """
1661 """
1672 if other != self._repo['.']:
1662 if other != self._repo['.']:
1673 def bad(f, msg):
1663 def bad(f, msg):
1674 # 'f' may be a directory pattern from 'match.files()',
1664 # 'f' may be a directory pattern from 'match.files()',
1675 # so 'f not in ctx1' is not enough
1665 # so 'f not in ctx1' is not enough
1676 if f not in other and not other.hasdir(f):
1666 if f not in other and not other.hasdir(f):
1677 self._repo.ui.warn('%s: %s\n' %
1667 self._repo.ui.warn('%s: %s\n' %
1678 (self._repo.dirstate.pathto(f), msg))
1668 (self._repo.dirstate.pathto(f), msg))
1679 match.bad = bad
1669 match.bad = bad
1680 return match
1670 return match
1681
1671
1682 def markcommitted(self, node):
1672 def markcommitted(self, node):
1683 super(workingctx, self).markcommitted(node)
1673 super(workingctx, self).markcommitted(node)
1684
1674
1685 sparse.aftercommit(self._repo, node)
1675 sparse.aftercommit(self._repo, node)
1686
1676
1687 class committablefilectx(basefilectx):
1677 class committablefilectx(basefilectx):
1688 """A committablefilectx provides common functionality for a file context
1678 """A committablefilectx provides common functionality for a file context
1689 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1679 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1690 def __init__(self, repo, path, filelog=None, ctx=None):
1680 def __init__(self, repo, path, filelog=None, ctx=None):
1691 self._repo = repo
1681 self._repo = repo
1692 self._path = path
1682 self._path = path
1693 self._changeid = None
1683 self._changeid = None
1694 self._filerev = self._filenode = None
1684 self._filerev = self._filenode = None
1695
1685
1696 if filelog is not None:
1686 if filelog is not None:
1697 self._filelog = filelog
1687 self._filelog = filelog
1698 if ctx:
1688 if ctx:
1699 self._changectx = ctx
1689 self._changectx = ctx
1700
1690
1701 def __nonzero__(self):
1691 def __nonzero__(self):
1702 return True
1692 return True
1703
1693
1704 __bool__ = __nonzero__
1694 __bool__ = __nonzero__
1705
1695
1706 def linkrev(self):
1696 def linkrev(self):
1707 # linked to self._changectx no matter if file is modified or not
1697 # linked to self._changectx no matter if file is modified or not
1708 return self.rev()
1698 return self.rev()
1709
1699
1710 def parents(self):
1700 def parents(self):
1711 '''return parent filectxs, following copies if necessary'''
1701 '''return parent filectxs, following copies if necessary'''
1712 def filenode(ctx, path):
1702 def filenode(ctx, path):
1713 return ctx._manifest.get(path, nullid)
1703 return ctx._manifest.get(path, nullid)
1714
1704
1715 path = self._path
1705 path = self._path
1716 fl = self._filelog
1706 fl = self._filelog
1717 pcl = self._changectx._parents
1707 pcl = self._changectx._parents
1718 renamed = self.renamed()
1708 renamed = self.renamed()
1719
1709
1720 if renamed:
1710 if renamed:
1721 pl = [renamed + (None,)]
1711 pl = [renamed + (None,)]
1722 else:
1712 else:
1723 pl = [(path, filenode(pcl[0], path), fl)]
1713 pl = [(path, filenode(pcl[0], path), fl)]
1724
1714
1725 for pc in pcl[1:]:
1715 for pc in pcl[1:]:
1726 pl.append((path, filenode(pc, path), fl))
1716 pl.append((path, filenode(pc, path), fl))
1727
1717
1728 return [self._parentfilectx(p, fileid=n, filelog=l)
1718 return [self._parentfilectx(p, fileid=n, filelog=l)
1729 for p, n, l in pl if n != nullid]
1719 for p, n, l in pl if n != nullid]
1730
1720
1731 def children(self):
1721 def children(self):
1732 return []
1722 return []
1733
1723
1734 class workingfilectx(committablefilectx):
1724 class workingfilectx(committablefilectx):
1735 """A workingfilectx object makes access to data related to a particular
1725 """A workingfilectx object makes access to data related to a particular
1736 file in the working directory convenient."""
1726 file in the working directory convenient."""
1737 def __init__(self, repo, path, filelog=None, workingctx=None):
1727 def __init__(self, repo, path, filelog=None, workingctx=None):
1738 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1728 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1739
1729
1740 @propertycache
1730 @propertycache
1741 def _changectx(self):
1731 def _changectx(self):
1742 return workingctx(self._repo)
1732 return workingctx(self._repo)
1743
1733
1744 def data(self):
1734 def data(self):
1745 return self._repo.wread(self._path)
1735 return self._repo.wread(self._path)
1746 def renamed(self):
1736 def renamed(self):
1747 rp = self._repo.dirstate.copied(self._path)
1737 rp = self._repo.dirstate.copied(self._path)
1748 if not rp:
1738 if not rp:
1749 return None
1739 return None
1750 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1740 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1751
1741
1752 def size(self):
1742 def size(self):
1753 return self._repo.wvfs.lstat(self._path).st_size
1743 return self._repo.wvfs.lstat(self._path).st_size
1754 def date(self):
1744 def date(self):
1755 t, tz = self._changectx.date()
1745 t, tz = self._changectx.date()
1756 try:
1746 try:
1757 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1747 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1758 except OSError as err:
1748 except OSError as err:
1759 if err.errno != errno.ENOENT:
1749 if err.errno != errno.ENOENT:
1760 raise
1750 raise
1761 return (t, tz)
1751 return (t, tz)
1762
1752
1763 def exists(self):
1753 def exists(self):
1764 return self._repo.wvfs.exists(self._path)
1754 return self._repo.wvfs.exists(self._path)
1765
1755
1766 def lexists(self):
1756 def lexists(self):
1767 return self._repo.wvfs.lexists(self._path)
1757 return self._repo.wvfs.lexists(self._path)
1768
1758
1769 def audit(self):
1759 def audit(self):
1770 return self._repo.wvfs.audit(self._path)
1760 return self._repo.wvfs.audit(self._path)
1771
1761
1772 def cmp(self, fctx):
1762 def cmp(self, fctx):
1773 """compare with other file context
1763 """compare with other file context
1774
1764
1775 returns True if different than fctx.
1765 returns True if different than fctx.
1776 """
1766 """
1777 # fctx should be a filectx (not a workingfilectx)
1767 # fctx should be a filectx (not a workingfilectx)
1778 # invert comparison to reuse the same code path
1768 # invert comparison to reuse the same code path
1779 return fctx.cmp(self)
1769 return fctx.cmp(self)
1780
1770
1781 def remove(self, ignoremissing=False):
1771 def remove(self, ignoremissing=False):
1782 """wraps unlink for a repo's working directory"""
1772 """wraps unlink for a repo's working directory"""
1783 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1773 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1784
1774
1785 def write(self, data, flags, backgroundclose=False, **kwargs):
1775 def write(self, data, flags, backgroundclose=False, **kwargs):
1786 """wraps repo.wwrite"""
1776 """wraps repo.wwrite"""
1787 self._repo.wwrite(self._path, data, flags,
1777 self._repo.wwrite(self._path, data, flags,
1788 backgroundclose=backgroundclose,
1778 backgroundclose=backgroundclose,
1789 **kwargs)
1779 **kwargs)
1790
1780
1791 def markcopied(self, src):
1781 def markcopied(self, src):
1792 """marks this file a copy of `src`"""
1782 """marks this file a copy of `src`"""
1793 if self._repo.dirstate[self._path] in "nma":
1783 if self._repo.dirstate[self._path] in "nma":
1794 self._repo.dirstate.copy(src, self._path)
1784 self._repo.dirstate.copy(src, self._path)
1795
1785
1796 def clearunknown(self):
1786 def clearunknown(self):
1797 """Removes conflicting items in the working directory so that
1787 """Removes conflicting items in the working directory so that
1798 ``write()`` can be called successfully.
1788 ``write()`` can be called successfully.
1799 """
1789 """
1800 wvfs = self._repo.wvfs
1790 wvfs = self._repo.wvfs
1801 f = self._path
1791 f = self._path
1802 wvfs.audit(f)
1792 wvfs.audit(f)
1803 if wvfs.isdir(f) and not wvfs.islink(f):
1793 if wvfs.isdir(f) and not wvfs.islink(f):
1804 wvfs.rmtree(f, forcibly=True)
1794 wvfs.rmtree(f, forcibly=True)
1805 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1795 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1806 for p in reversed(list(util.finddirs(f))):
1796 for p in reversed(list(util.finddirs(f))):
1807 if wvfs.isfileorlink(p):
1797 if wvfs.isfileorlink(p):
1808 wvfs.unlink(p)
1798 wvfs.unlink(p)
1809 break
1799 break
1810
1800
1811 def setflags(self, l, x):
1801 def setflags(self, l, x):
1812 self._repo.wvfs.setflags(self._path, l, x)
1802 self._repo.wvfs.setflags(self._path, l, x)
1813
1803
1814 class overlayworkingctx(committablectx):
1804 class overlayworkingctx(committablectx):
1815 """Wraps another mutable context with a write-back cache that can be
1805 """Wraps another mutable context with a write-back cache that can be
1816 converted into a commit context.
1806 converted into a commit context.
1817
1807
1818 self._cache[path] maps to a dict with keys: {
1808 self._cache[path] maps to a dict with keys: {
1819 'exists': bool?
1809 'exists': bool?
1820 'date': date?
1810 'date': date?
1821 'data': str?
1811 'data': str?
1822 'flags': str?
1812 'flags': str?
1823 'copied': str? (path or None)
1813 'copied': str? (path or None)
1824 }
1814 }
1825 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1815 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1826 is `False`, the file was deleted.
1816 is `False`, the file was deleted.
1827 """
1817 """
1828
1818
1829 def __init__(self, repo):
1819 def __init__(self, repo):
1830 super(overlayworkingctx, self).__init__(repo)
1820 super(overlayworkingctx, self).__init__(repo)
1831 self._repo = repo
1821 self._repo = repo
1832 self.clean()
1822 self.clean()
1833
1823
1834 def setbase(self, wrappedctx):
1824 def setbase(self, wrappedctx):
1835 self._wrappedctx = wrappedctx
1825 self._wrappedctx = wrappedctx
1836 self._parents = [wrappedctx]
1826 self._parents = [wrappedctx]
1837 # Drop old manifest cache as it is now out of date.
1827 # Drop old manifest cache as it is now out of date.
1838 # This is necessary when, e.g., rebasing several nodes with one
1828 # This is necessary when, e.g., rebasing several nodes with one
1839 # ``overlayworkingctx`` (e.g. with --collapse).
1829 # ``overlayworkingctx`` (e.g. with --collapse).
1840 util.clearcachedproperty(self, '_manifest')
1830 util.clearcachedproperty(self, '_manifest')
1841
1831
1842 def data(self, path):
1832 def data(self, path):
1843 if self.isdirty(path):
1833 if self.isdirty(path):
1844 if self._cache[path]['exists']:
1834 if self._cache[path]['exists']:
1845 if self._cache[path]['data']:
1835 if self._cache[path]['data']:
1846 return self._cache[path]['data']
1836 return self._cache[path]['data']
1847 else:
1837 else:
1848 # Must fallback here, too, because we only set flags.
1838 # Must fallback here, too, because we only set flags.
1849 return self._wrappedctx[path].data()
1839 return self._wrappedctx[path].data()
1850 else:
1840 else:
1851 raise error.ProgrammingError("No such file or directory: %s" %
1841 raise error.ProgrammingError("No such file or directory: %s" %
1852 path)
1842 path)
1853 else:
1843 else:
1854 return self._wrappedctx[path].data()
1844 return self._wrappedctx[path].data()
1855
1845
1856 @propertycache
1846 @propertycache
1857 def _manifest(self):
1847 def _manifest(self):
1858 parents = self.parents()
1848 parents = self.parents()
1859 man = parents[0].manifest().copy()
1849 man = parents[0].manifest().copy()
1860
1850
1861 flag = self._flagfunc
1851 flag = self._flagfunc
1862 for path in self.added():
1852 for path in self.added():
1863 man[path] = addednodeid
1853 man[path] = addednodeid
1864 man.setflag(path, flag(path))
1854 man.setflag(path, flag(path))
1865 for path in self.modified():
1855 for path in self.modified():
1866 man[path] = modifiednodeid
1856 man[path] = modifiednodeid
1867 man.setflag(path, flag(path))
1857 man.setflag(path, flag(path))
1868 for path in self.removed():
1858 for path in self.removed():
1869 del man[path]
1859 del man[path]
1870 return man
1860 return man
1871
1861
1872 @propertycache
1862 @propertycache
1873 def _flagfunc(self):
1863 def _flagfunc(self):
1874 def f(path):
1864 def f(path):
1875 return self._cache[path]['flags']
1865 return self._cache[path]['flags']
1876 return f
1866 return f
1877
1867
1878 def files(self):
1868 def files(self):
1879 return sorted(self.added() + self.modified() + self.removed())
1869 return sorted(self.added() + self.modified() + self.removed())
1880
1870
1881 def modified(self):
1871 def modified(self):
1882 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1872 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1883 self._existsinparent(f)]
1873 self._existsinparent(f)]
1884
1874
1885 def added(self):
1875 def added(self):
1886 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1876 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1887 not self._existsinparent(f)]
1877 not self._existsinparent(f)]
1888
1878
1889 def removed(self):
1879 def removed(self):
1890 return [f for f in self._cache.keys() if
1880 return [f for f in self._cache.keys() if
1891 not self._cache[f]['exists'] and self._existsinparent(f)]
1881 not self._cache[f]['exists'] and self._existsinparent(f)]
1892
1882
1893 def isinmemory(self):
1883 def isinmemory(self):
1894 return True
1884 return True
1895
1885
1896 def filedate(self, path):
1886 def filedate(self, path):
1897 if self.isdirty(path):
1887 if self.isdirty(path):
1898 return self._cache[path]['date']
1888 return self._cache[path]['date']
1899 else:
1889 else:
1900 return self._wrappedctx[path].date()
1890 return self._wrappedctx[path].date()
1901
1891
1902 def markcopied(self, path, origin):
1892 def markcopied(self, path, origin):
1903 if self.isdirty(path):
1893 if self.isdirty(path):
1904 self._cache[path]['copied'] = origin
1894 self._cache[path]['copied'] = origin
1905 else:
1895 else:
1906 raise error.ProgrammingError('markcopied() called on clean context')
1896 raise error.ProgrammingError('markcopied() called on clean context')
1907
1897
1908 def copydata(self, path):
1898 def copydata(self, path):
1909 if self.isdirty(path):
1899 if self.isdirty(path):
1910 return self._cache[path]['copied']
1900 return self._cache[path]['copied']
1911 else:
1901 else:
1912 raise error.ProgrammingError('copydata() called on clean context')
1902 raise error.ProgrammingError('copydata() called on clean context')
1913
1903
1914 def flags(self, path):
1904 def flags(self, path):
1915 if self.isdirty(path):
1905 if self.isdirty(path):
1916 if self._cache[path]['exists']:
1906 if self._cache[path]['exists']:
1917 return self._cache[path]['flags']
1907 return self._cache[path]['flags']
1918 else:
1908 else:
1919 raise error.ProgrammingError("No such file or directory: %s" %
1909 raise error.ProgrammingError("No such file or directory: %s" %
1920 self._path)
1910 self._path)
1921 else:
1911 else:
1922 return self._wrappedctx[path].flags()
1912 return self._wrappedctx[path].flags()
1923
1913
1924 def _existsinparent(self, path):
1914 def _existsinparent(self, path):
1925 try:
1915 try:
1926 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1916 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1927 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1917 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1928 # with an ``exists()`` function.
1918 # with an ``exists()`` function.
1929 self._wrappedctx[path]
1919 self._wrappedctx[path]
1930 return True
1920 return True
1931 except error.ManifestLookupError:
1921 except error.ManifestLookupError:
1932 return False
1922 return False
1933
1923
1934 def _auditconflicts(self, path):
1924 def _auditconflicts(self, path):
1935 """Replicates conflict checks done by wvfs.write().
1925 """Replicates conflict checks done by wvfs.write().
1936
1926
1937 Since we never write to the filesystem and never call `applyupdates` in
1927 Since we never write to the filesystem and never call `applyupdates` in
1938 IMM, we'll never check that a path is actually writable -- e.g., because
1928 IMM, we'll never check that a path is actually writable -- e.g., because
1939 it adds `a/foo`, but `a` is actually a file in the other commit.
1929 it adds `a/foo`, but `a` is actually a file in the other commit.
1940 """
1930 """
1941 def fail(path, component):
1931 def fail(path, component):
1942 # p1() is the base and we're receiving "writes" for p2()'s
1932 # p1() is the base and we're receiving "writes" for p2()'s
1943 # files.
1933 # files.
1944 if 'l' in self.p1()[component].flags():
1934 if 'l' in self.p1()[component].flags():
1945 raise error.Abort("error: %s conflicts with symlink %s "
1935 raise error.Abort("error: %s conflicts with symlink %s "
1946 "in %s." % (path, component,
1936 "in %s." % (path, component,
1947 self.p1().rev()))
1937 self.p1().rev()))
1948 else:
1938 else:
1949 raise error.Abort("error: '%s' conflicts with file '%s' in "
1939 raise error.Abort("error: '%s' conflicts with file '%s' in "
1950 "%s." % (path, component,
1940 "%s." % (path, component,
1951 self.p1().rev()))
1941 self.p1().rev()))
1952
1942
1953 # Test that each new directory to be created to write this path from p2
1943 # Test that each new directory to be created to write this path from p2
1954 # is not a file in p1.
1944 # is not a file in p1.
1955 components = path.split('/')
1945 components = path.split('/')
1956 for i in xrange(len(components)):
1946 for i in xrange(len(components)):
1957 component = "/".join(components[0:i])
1947 component = "/".join(components[0:i])
1958 if component in self.p1():
1948 if component in self.p1():
1959 fail(path, component)
1949 fail(path, component)
1960
1950
1961 # Test the other direction -- that this path from p2 isn't a directory
1951 # Test the other direction -- that this path from p2 isn't a directory
1962 # in p1 (test that p1 doesn't any paths matching `path/*`).
1952 # in p1 (test that p1 doesn't any paths matching `path/*`).
1963 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1953 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1964 matches = self.p1().manifest().matches(match)
1954 matches = self.p1().manifest().matches(match)
1965 if len(matches) > 0:
1955 if len(matches) > 0:
1966 if len(matches) == 1 and matches.keys()[0] == path:
1956 if len(matches) == 1 and matches.keys()[0] == path:
1967 return
1957 return
1968 raise error.Abort("error: file '%s' cannot be written because "
1958 raise error.Abort("error: file '%s' cannot be written because "
1969 " '%s/' is a folder in %s (containing %d "
1959 " '%s/' is a folder in %s (containing %d "
1970 "entries: %s)"
1960 "entries: %s)"
1971 % (path, path, self.p1(), len(matches),
1961 % (path, path, self.p1(), len(matches),
1972 ', '.join(matches.keys())))
1962 ', '.join(matches.keys())))
1973
1963
1974 def write(self, path, data, flags='', **kwargs):
1964 def write(self, path, data, flags='', **kwargs):
1975 if data is None:
1965 if data is None:
1976 raise error.ProgrammingError("data must be non-None")
1966 raise error.ProgrammingError("data must be non-None")
1977 self._auditconflicts(path)
1967 self._auditconflicts(path)
1978 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1968 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1979 flags=flags)
1969 flags=flags)
1980
1970
1981 def setflags(self, path, l, x):
1971 def setflags(self, path, l, x):
1982 self._markdirty(path, exists=True, date=dateutil.makedate(),
1972 self._markdirty(path, exists=True, date=dateutil.makedate(),
1983 flags=(l and 'l' or '') + (x and 'x' or ''))
1973 flags=(l and 'l' or '') + (x and 'x' or ''))
1984
1974
1985 def remove(self, path):
1975 def remove(self, path):
1986 self._markdirty(path, exists=False)
1976 self._markdirty(path, exists=False)
1987
1977
1988 def exists(self, path):
1978 def exists(self, path):
1989 """exists behaves like `lexists`, but needs to follow symlinks and
1979 """exists behaves like `lexists`, but needs to follow symlinks and
1990 return False if they are broken.
1980 return False if they are broken.
1991 """
1981 """
1992 if self.isdirty(path):
1982 if self.isdirty(path):
1993 # If this path exists and is a symlink, "follow" it by calling
1983 # If this path exists and is a symlink, "follow" it by calling
1994 # exists on the destination path.
1984 # exists on the destination path.
1995 if (self._cache[path]['exists'] and
1985 if (self._cache[path]['exists'] and
1996 'l' in self._cache[path]['flags']):
1986 'l' in self._cache[path]['flags']):
1997 return self.exists(self._cache[path]['data'].strip())
1987 return self.exists(self._cache[path]['data'].strip())
1998 else:
1988 else:
1999 return self._cache[path]['exists']
1989 return self._cache[path]['exists']
2000
1990
2001 return self._existsinparent(path)
1991 return self._existsinparent(path)
2002
1992
2003 def lexists(self, path):
1993 def lexists(self, path):
2004 """lexists returns True if the path exists"""
1994 """lexists returns True if the path exists"""
2005 if self.isdirty(path):
1995 if self.isdirty(path):
2006 return self._cache[path]['exists']
1996 return self._cache[path]['exists']
2007
1997
2008 return self._existsinparent(path)
1998 return self._existsinparent(path)
2009
1999
2010 def size(self, path):
2000 def size(self, path):
2011 if self.isdirty(path):
2001 if self.isdirty(path):
2012 if self._cache[path]['exists']:
2002 if self._cache[path]['exists']:
2013 return len(self._cache[path]['data'])
2003 return len(self._cache[path]['data'])
2014 else:
2004 else:
2015 raise error.ProgrammingError("No such file or directory: %s" %
2005 raise error.ProgrammingError("No such file or directory: %s" %
2016 self._path)
2006 self._path)
2017 return self._wrappedctx[path].size()
2007 return self._wrappedctx[path].size()
2018
2008
2019 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2009 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2020 user=None, editor=None):
2010 user=None, editor=None):
2021 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2011 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2022 committed.
2012 committed.
2023
2013
2024 ``text`` is the commit message.
2014 ``text`` is the commit message.
2025 ``parents`` (optional) are rev numbers.
2015 ``parents`` (optional) are rev numbers.
2026 """
2016 """
2027 # Default parents to the wrapped contexts' if not passed.
2017 # Default parents to the wrapped contexts' if not passed.
2028 if parents is None:
2018 if parents is None:
2029 parents = self._wrappedctx.parents()
2019 parents = self._wrappedctx.parents()
2030 if len(parents) == 1:
2020 if len(parents) == 1:
2031 parents = (parents[0], None)
2021 parents = (parents[0], None)
2032
2022
2033 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2023 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2034 if parents[1] is None:
2024 if parents[1] is None:
2035 parents = (self._repo[parents[0]], None)
2025 parents = (self._repo[parents[0]], None)
2036 else:
2026 else:
2037 parents = (self._repo[parents[0]], self._repo[parents[1]])
2027 parents = (self._repo[parents[0]], self._repo[parents[1]])
2038
2028
2039 files = self._cache.keys()
2029 files = self._cache.keys()
2040 def getfile(repo, memctx, path):
2030 def getfile(repo, memctx, path):
2041 if self._cache[path]['exists']:
2031 if self._cache[path]['exists']:
2042 return memfilectx(repo, memctx, path,
2032 return memfilectx(repo, memctx, path,
2043 self._cache[path]['data'],
2033 self._cache[path]['data'],
2044 'l' in self._cache[path]['flags'],
2034 'l' in self._cache[path]['flags'],
2045 'x' in self._cache[path]['flags'],
2035 'x' in self._cache[path]['flags'],
2046 self._cache[path]['copied'])
2036 self._cache[path]['copied'])
2047 else:
2037 else:
2048 # Returning None, but including the path in `files`, is
2038 # Returning None, but including the path in `files`, is
2049 # necessary for memctx to register a deletion.
2039 # necessary for memctx to register a deletion.
2050 return None
2040 return None
2051 return memctx(self._repo, parents, text, files, getfile, date=date,
2041 return memctx(self._repo, parents, text, files, getfile, date=date,
2052 extra=extra, user=user, branch=branch, editor=editor)
2042 extra=extra, user=user, branch=branch, editor=editor)
2053
2043
2054 def isdirty(self, path):
2044 def isdirty(self, path):
2055 return path in self._cache
2045 return path in self._cache
2056
2046
2057 def isempty(self):
2047 def isempty(self):
2058 # We need to discard any keys that are actually clean before the empty
2048 # We need to discard any keys that are actually clean before the empty
2059 # commit check.
2049 # commit check.
2060 self._compact()
2050 self._compact()
2061 return len(self._cache) == 0
2051 return len(self._cache) == 0
2062
2052
2063 def clean(self):
2053 def clean(self):
2064 self._cache = {}
2054 self._cache = {}
2065
2055
2066 def _compact(self):
2056 def _compact(self):
2067 """Removes keys from the cache that are actually clean, by comparing
2057 """Removes keys from the cache that are actually clean, by comparing
2068 them with the underlying context.
2058 them with the underlying context.
2069
2059
2070 This can occur during the merge process, e.g. by passing --tool :local
2060 This can occur during the merge process, e.g. by passing --tool :local
2071 to resolve a conflict.
2061 to resolve a conflict.
2072 """
2062 """
2073 keys = []
2063 keys = []
2074 for path in self._cache.keys():
2064 for path in self._cache.keys():
2075 cache = self._cache[path]
2065 cache = self._cache[path]
2076 try:
2066 try:
2077 underlying = self._wrappedctx[path]
2067 underlying = self._wrappedctx[path]
2078 if (underlying.data() == cache['data'] and
2068 if (underlying.data() == cache['data'] and
2079 underlying.flags() == cache['flags']):
2069 underlying.flags() == cache['flags']):
2080 keys.append(path)
2070 keys.append(path)
2081 except error.ManifestLookupError:
2071 except error.ManifestLookupError:
2082 # Path not in the underlying manifest (created).
2072 # Path not in the underlying manifest (created).
2083 continue
2073 continue
2084
2074
2085 for path in keys:
2075 for path in keys:
2086 del self._cache[path]
2076 del self._cache[path]
2087 return keys
2077 return keys
2088
2078
2089 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2079 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2090 self._cache[path] = {
2080 self._cache[path] = {
2091 'exists': exists,
2081 'exists': exists,
2092 'data': data,
2082 'data': data,
2093 'date': date,
2083 'date': date,
2094 'flags': flags,
2084 'flags': flags,
2095 'copied': None,
2085 'copied': None,
2096 }
2086 }
2097
2087
2098 def filectx(self, path, filelog=None):
2088 def filectx(self, path, filelog=None):
2099 return overlayworkingfilectx(self._repo, path, parent=self,
2089 return overlayworkingfilectx(self._repo, path, parent=self,
2100 filelog=filelog)
2090 filelog=filelog)
2101
2091
2102 class overlayworkingfilectx(committablefilectx):
2092 class overlayworkingfilectx(committablefilectx):
2103 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2093 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2104 cache, which can be flushed through later by calling ``flush()``."""
2094 cache, which can be flushed through later by calling ``flush()``."""
2105
2095
2106 def __init__(self, repo, path, filelog=None, parent=None):
2096 def __init__(self, repo, path, filelog=None, parent=None):
2107 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2097 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2108 parent)
2098 parent)
2109 self._repo = repo
2099 self._repo = repo
2110 self._parent = parent
2100 self._parent = parent
2111 self._path = path
2101 self._path = path
2112
2102
2113 def cmp(self, fctx):
2103 def cmp(self, fctx):
2114 return self.data() != fctx.data()
2104 return self.data() != fctx.data()
2115
2105
2116 def changectx(self):
2106 def changectx(self):
2117 return self._parent
2107 return self._parent
2118
2108
2119 def data(self):
2109 def data(self):
2120 return self._parent.data(self._path)
2110 return self._parent.data(self._path)
2121
2111
2122 def date(self):
2112 def date(self):
2123 return self._parent.filedate(self._path)
2113 return self._parent.filedate(self._path)
2124
2114
2125 def exists(self):
2115 def exists(self):
2126 return self.lexists()
2116 return self.lexists()
2127
2117
2128 def lexists(self):
2118 def lexists(self):
2129 return self._parent.exists(self._path)
2119 return self._parent.exists(self._path)
2130
2120
2131 def renamed(self):
2121 def renamed(self):
2132 path = self._parent.copydata(self._path)
2122 path = self._parent.copydata(self._path)
2133 if not path:
2123 if not path:
2134 return None
2124 return None
2135 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2125 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2136
2126
2137 def size(self):
2127 def size(self):
2138 return self._parent.size(self._path)
2128 return self._parent.size(self._path)
2139
2129
2140 def markcopied(self, origin):
2130 def markcopied(self, origin):
2141 self._parent.markcopied(self._path, origin)
2131 self._parent.markcopied(self._path, origin)
2142
2132
2143 def audit(self):
2133 def audit(self):
2144 pass
2134 pass
2145
2135
2146 def flags(self):
2136 def flags(self):
2147 return self._parent.flags(self._path)
2137 return self._parent.flags(self._path)
2148
2138
2149 def setflags(self, islink, isexec):
2139 def setflags(self, islink, isexec):
2150 return self._parent.setflags(self._path, islink, isexec)
2140 return self._parent.setflags(self._path, islink, isexec)
2151
2141
2152 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 def write(self, data, flags, backgroundclose=False, **kwargs):
2153 return self._parent.write(self._path, data, flags, **kwargs)
2143 return self._parent.write(self._path, data, flags, **kwargs)
2154
2144
2155 def remove(self, ignoremissing=False):
2145 def remove(self, ignoremissing=False):
2156 return self._parent.remove(self._path)
2146 return self._parent.remove(self._path)
2157
2147
2158 def clearunknown(self):
2148 def clearunknown(self):
2159 pass
2149 pass
2160
2150
2161 class workingcommitctx(workingctx):
2151 class workingcommitctx(workingctx):
2162 """A workingcommitctx object makes access to data related to
2152 """A workingcommitctx object makes access to data related to
2163 the revision being committed convenient.
2153 the revision being committed convenient.
2164
2154
2165 This hides changes in the working directory, if they aren't
2155 This hides changes in the working directory, if they aren't
2166 committed in this context.
2156 committed in this context.
2167 """
2157 """
2168 def __init__(self, repo, changes,
2158 def __init__(self, repo, changes,
2169 text="", user=None, date=None, extra=None):
2159 text="", user=None, date=None, extra=None):
2170 super(workingctx, self).__init__(repo, text, user, date, extra,
2160 super(workingctx, self).__init__(repo, text, user, date, extra,
2171 changes)
2161 changes)
2172
2162
2173 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2163 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2174 """Return matched files only in ``self._status``
2164 """Return matched files only in ``self._status``
2175
2165
2176 Uncommitted files appear "clean" via this context, even if
2166 Uncommitted files appear "clean" via this context, even if
2177 they aren't actually so in the working directory.
2167 they aren't actually so in the working directory.
2178 """
2168 """
2179 if clean:
2169 if clean:
2180 clean = [f for f in self._manifest if f not in self._changedset]
2170 clean = [f for f in self._manifest if f not in self._changedset]
2181 else:
2171 else:
2182 clean = []
2172 clean = []
2183 return scmutil.status([f for f in self._status.modified if match(f)],
2173 return scmutil.status([f for f in self._status.modified if match(f)],
2184 [f for f in self._status.added if match(f)],
2174 [f for f in self._status.added if match(f)],
2185 [f for f in self._status.removed if match(f)],
2175 [f for f in self._status.removed if match(f)],
2186 [], [], [], clean)
2176 [], [], [], clean)
2187
2177
2188 @propertycache
2178 @propertycache
2189 def _changedset(self):
2179 def _changedset(self):
2190 """Return the set of files changed in this context
2180 """Return the set of files changed in this context
2191 """
2181 """
2192 changed = set(self._status.modified)
2182 changed = set(self._status.modified)
2193 changed.update(self._status.added)
2183 changed.update(self._status.added)
2194 changed.update(self._status.removed)
2184 changed.update(self._status.removed)
2195 return changed
2185 return changed
2196
2186
2197 def makecachingfilectxfn(func):
2187 def makecachingfilectxfn(func):
2198 """Create a filectxfn that caches based on the path.
2188 """Create a filectxfn that caches based on the path.
2199
2189
2200 We can't use util.cachefunc because it uses all arguments as the cache
2190 We can't use util.cachefunc because it uses all arguments as the cache
2201 key and this creates a cycle since the arguments include the repo and
2191 key and this creates a cycle since the arguments include the repo and
2202 memctx.
2192 memctx.
2203 """
2193 """
2204 cache = {}
2194 cache = {}
2205
2195
2206 def getfilectx(repo, memctx, path):
2196 def getfilectx(repo, memctx, path):
2207 if path not in cache:
2197 if path not in cache:
2208 cache[path] = func(repo, memctx, path)
2198 cache[path] = func(repo, memctx, path)
2209 return cache[path]
2199 return cache[path]
2210
2200
2211 return getfilectx
2201 return getfilectx
2212
2202
2213 def memfilefromctx(ctx):
2203 def memfilefromctx(ctx):
2214 """Given a context return a memfilectx for ctx[path]
2204 """Given a context return a memfilectx for ctx[path]
2215
2205
2216 This is a convenience method for building a memctx based on another
2206 This is a convenience method for building a memctx based on another
2217 context.
2207 context.
2218 """
2208 """
2219 def getfilectx(repo, memctx, path):
2209 def getfilectx(repo, memctx, path):
2220 fctx = ctx[path]
2210 fctx = ctx[path]
2221 # this is weird but apparently we only keep track of one parent
2211 # this is weird but apparently we only keep track of one parent
2222 # (why not only store that instead of a tuple?)
2212 # (why not only store that instead of a tuple?)
2223 copied = fctx.renamed()
2213 copied = fctx.renamed()
2224 if copied:
2214 if copied:
2225 copied = copied[0]
2215 copied = copied[0]
2226 return memfilectx(repo, memctx, path, fctx.data(),
2216 return memfilectx(repo, memctx, path, fctx.data(),
2227 islink=fctx.islink(), isexec=fctx.isexec(),
2217 islink=fctx.islink(), isexec=fctx.isexec(),
2228 copied=copied)
2218 copied=copied)
2229
2219
2230 return getfilectx
2220 return getfilectx
2231
2221
2232 def memfilefrompatch(patchstore):
2222 def memfilefrompatch(patchstore):
2233 """Given a patch (e.g. patchstore object) return a memfilectx
2223 """Given a patch (e.g. patchstore object) return a memfilectx
2234
2224
2235 This is a convenience method for building a memctx based on a patchstore.
2225 This is a convenience method for building a memctx based on a patchstore.
2236 """
2226 """
2237 def getfilectx(repo, memctx, path):
2227 def getfilectx(repo, memctx, path):
2238 data, mode, copied = patchstore.getfile(path)
2228 data, mode, copied = patchstore.getfile(path)
2239 if data is None:
2229 if data is None:
2240 return None
2230 return None
2241 islink, isexec = mode
2231 islink, isexec = mode
2242 return memfilectx(repo, memctx, path, data, islink=islink,
2232 return memfilectx(repo, memctx, path, data, islink=islink,
2243 isexec=isexec, copied=copied)
2233 isexec=isexec, copied=copied)
2244
2234
2245 return getfilectx
2235 return getfilectx
2246
2236
2247 class memctx(committablectx):
2237 class memctx(committablectx):
2248 """Use memctx to perform in-memory commits via localrepo.commitctx().
2238 """Use memctx to perform in-memory commits via localrepo.commitctx().
2249
2239
2250 Revision information is supplied at initialization time while
2240 Revision information is supplied at initialization time while
2251 related files data and is made available through a callback
2241 related files data and is made available through a callback
2252 mechanism. 'repo' is the current localrepo, 'parents' is a
2242 mechanism. 'repo' is the current localrepo, 'parents' is a
2253 sequence of two parent revisions identifiers (pass None for every
2243 sequence of two parent revisions identifiers (pass None for every
2254 missing parent), 'text' is the commit message and 'files' lists
2244 missing parent), 'text' is the commit message and 'files' lists
2255 names of files touched by the revision (normalized and relative to
2245 names of files touched by the revision (normalized and relative to
2256 repository root).
2246 repository root).
2257
2247
2258 filectxfn(repo, memctx, path) is a callable receiving the
2248 filectxfn(repo, memctx, path) is a callable receiving the
2259 repository, the current memctx object and the normalized path of
2249 repository, the current memctx object and the normalized path of
2260 requested file, relative to repository root. It is fired by the
2250 requested file, relative to repository root. It is fired by the
2261 commit function for every file in 'files', but calls order is
2251 commit function for every file in 'files', but calls order is
2262 undefined. If the file is available in the revision being
2252 undefined. If the file is available in the revision being
2263 committed (updated or added), filectxfn returns a memfilectx
2253 committed (updated or added), filectxfn returns a memfilectx
2264 object. If the file was removed, filectxfn return None for recent
2254 object. If the file was removed, filectxfn return None for recent
2265 Mercurial. Moved files are represented by marking the source file
2255 Mercurial. Moved files are represented by marking the source file
2266 removed and the new file added with copy information (see
2256 removed and the new file added with copy information (see
2267 memfilectx).
2257 memfilectx).
2268
2258
2269 user receives the committer name and defaults to current
2259 user receives the committer name and defaults to current
2270 repository username, date is the commit date in any format
2260 repository username, date is the commit date in any format
2271 supported by dateutil.parsedate() and defaults to current date, extra
2261 supported by dateutil.parsedate() and defaults to current date, extra
2272 is a dictionary of metadata or is left empty.
2262 is a dictionary of metadata or is left empty.
2273 """
2263 """
2274
2264
2275 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2265 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2276 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2266 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2277 # this field to determine what to do in filectxfn.
2267 # this field to determine what to do in filectxfn.
2278 _returnnoneformissingfiles = True
2268 _returnnoneformissingfiles = True
2279
2269
2280 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2270 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2281 date=None, extra=None, branch=None, editor=False):
2271 date=None, extra=None, branch=None, editor=False):
2282 super(memctx, self).__init__(repo, text, user, date, extra)
2272 super(memctx, self).__init__(repo, text, user, date, extra)
2283 self._rev = None
2273 self._rev = None
2284 self._node = None
2274 self._node = None
2285 parents = [(p or nullid) for p in parents]
2275 parents = [(p or nullid) for p in parents]
2286 p1, p2 = parents
2276 p1, p2 = parents
2287 self._parents = [self._repo[p] for p in (p1, p2)]
2277 self._parents = [self._repo[p] for p in (p1, p2)]
2288 files = sorted(set(files))
2278 files = sorted(set(files))
2289 self._files = files
2279 self._files = files
2290 if branch is not None:
2280 if branch is not None:
2291 self._extra['branch'] = encoding.fromlocal(branch)
2281 self._extra['branch'] = encoding.fromlocal(branch)
2292 self.substate = {}
2282 self.substate = {}
2293
2283
2294 if isinstance(filectxfn, patch.filestore):
2284 if isinstance(filectxfn, patch.filestore):
2295 filectxfn = memfilefrompatch(filectxfn)
2285 filectxfn = memfilefrompatch(filectxfn)
2296 elif not callable(filectxfn):
2286 elif not callable(filectxfn):
2297 # if store is not callable, wrap it in a function
2287 # if store is not callable, wrap it in a function
2298 filectxfn = memfilefromctx(filectxfn)
2288 filectxfn = memfilefromctx(filectxfn)
2299
2289
2300 # memoizing increases performance for e.g. vcs convert scenarios.
2290 # memoizing increases performance for e.g. vcs convert scenarios.
2301 self._filectxfn = makecachingfilectxfn(filectxfn)
2291 self._filectxfn = makecachingfilectxfn(filectxfn)
2302
2292
2303 if editor:
2293 if editor:
2304 self._text = editor(self._repo, self, [])
2294 self._text = editor(self._repo, self, [])
2305 self._repo.savecommitmessage(self._text)
2295 self._repo.savecommitmessage(self._text)
2306
2296
2307 def filectx(self, path, filelog=None):
2297 def filectx(self, path, filelog=None):
2308 """get a file context from the working directory
2298 """get a file context from the working directory
2309
2299
2310 Returns None if file doesn't exist and should be removed."""
2300 Returns None if file doesn't exist and should be removed."""
2311 return self._filectxfn(self._repo, self, path)
2301 return self._filectxfn(self._repo, self, path)
2312
2302
2313 def commit(self):
2303 def commit(self):
2314 """commit context to the repo"""
2304 """commit context to the repo"""
2315 return self._repo.commitctx(self)
2305 return self._repo.commitctx(self)
2316
2306
2317 @propertycache
2307 @propertycache
2318 def _manifest(self):
2308 def _manifest(self):
2319 """generate a manifest based on the return values of filectxfn"""
2309 """generate a manifest based on the return values of filectxfn"""
2320
2310
2321 # keep this simple for now; just worry about p1
2311 # keep this simple for now; just worry about p1
2322 pctx = self._parents[0]
2312 pctx = self._parents[0]
2323 man = pctx.manifest().copy()
2313 man = pctx.manifest().copy()
2324
2314
2325 for f in self._status.modified:
2315 for f in self._status.modified:
2326 p1node = nullid
2316 p1node = nullid
2327 p2node = nullid
2317 p2node = nullid
2328 p = pctx[f].parents() # if file isn't in pctx, check p2?
2318 p = pctx[f].parents() # if file isn't in pctx, check p2?
2329 if len(p) > 0:
2319 if len(p) > 0:
2330 p1node = p[0].filenode()
2320 p1node = p[0].filenode()
2331 if len(p) > 1:
2321 if len(p) > 1:
2332 p2node = p[1].filenode()
2322 p2node = p[1].filenode()
2333 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2323 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2334
2324
2335 for f in self._status.added:
2325 for f in self._status.added:
2336 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2326 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2337
2327
2338 for f in self._status.removed:
2328 for f in self._status.removed:
2339 if f in man:
2329 if f in man:
2340 del man[f]
2330 del man[f]
2341
2331
2342 return man
2332 return man
2343
2333
2344 @propertycache
2334 @propertycache
2345 def _status(self):
2335 def _status(self):
2346 """Calculate exact status from ``files`` specified at construction
2336 """Calculate exact status from ``files`` specified at construction
2347 """
2337 """
2348 man1 = self.p1().manifest()
2338 man1 = self.p1().manifest()
2349 p2 = self._parents[1]
2339 p2 = self._parents[1]
2350 # "1 < len(self._parents)" can't be used for checking
2340 # "1 < len(self._parents)" can't be used for checking
2351 # existence of the 2nd parent, because "memctx._parents" is
2341 # existence of the 2nd parent, because "memctx._parents" is
2352 # explicitly initialized by the list, of which length is 2.
2342 # explicitly initialized by the list, of which length is 2.
2353 if p2.node() != nullid:
2343 if p2.node() != nullid:
2354 man2 = p2.manifest()
2344 man2 = p2.manifest()
2355 managing = lambda f: f in man1 or f in man2
2345 managing = lambda f: f in man1 or f in man2
2356 else:
2346 else:
2357 managing = lambda f: f in man1
2347 managing = lambda f: f in man1
2358
2348
2359 modified, added, removed = [], [], []
2349 modified, added, removed = [], [], []
2360 for f in self._files:
2350 for f in self._files:
2361 if not managing(f):
2351 if not managing(f):
2362 added.append(f)
2352 added.append(f)
2363 elif self[f]:
2353 elif self[f]:
2364 modified.append(f)
2354 modified.append(f)
2365 else:
2355 else:
2366 removed.append(f)
2356 removed.append(f)
2367
2357
2368 return scmutil.status(modified, added, removed, [], [], [], [])
2358 return scmutil.status(modified, added, removed, [], [], [], [])
2369
2359
2370 class memfilectx(committablefilectx):
2360 class memfilectx(committablefilectx):
2371 """memfilectx represents an in-memory file to commit.
2361 """memfilectx represents an in-memory file to commit.
2372
2362
2373 See memctx and committablefilectx for more details.
2363 See memctx and committablefilectx for more details.
2374 """
2364 """
2375 def __init__(self, repo, changectx, path, data, islink=False,
2365 def __init__(self, repo, changectx, path, data, islink=False,
2376 isexec=False, copied=None):
2366 isexec=False, copied=None):
2377 """
2367 """
2378 path is the normalized file path relative to repository root.
2368 path is the normalized file path relative to repository root.
2379 data is the file content as a string.
2369 data is the file content as a string.
2380 islink is True if the file is a symbolic link.
2370 islink is True if the file is a symbolic link.
2381 isexec is True if the file is executable.
2371 isexec is True if the file is executable.
2382 copied is the source file path if current file was copied in the
2372 copied is the source file path if current file was copied in the
2383 revision being committed, or None."""
2373 revision being committed, or None."""
2384 super(memfilectx, self).__init__(repo, path, None, changectx)
2374 super(memfilectx, self).__init__(repo, path, None, changectx)
2385 self._data = data
2375 self._data = data
2386 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2376 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2387 self._copied = None
2377 self._copied = None
2388 if copied:
2378 if copied:
2389 self._copied = (copied, nullid)
2379 self._copied = (copied, nullid)
2390
2380
2391 def data(self):
2381 def data(self):
2392 return self._data
2382 return self._data
2393
2383
2394 def remove(self, ignoremissing=False):
2384 def remove(self, ignoremissing=False):
2395 """wraps unlink for a repo's working directory"""
2385 """wraps unlink for a repo's working directory"""
2396 # need to figure out what to do here
2386 # need to figure out what to do here
2397 del self._changectx[self._path]
2387 del self._changectx[self._path]
2398
2388
2399 def write(self, data, flags, **kwargs):
2389 def write(self, data, flags, **kwargs):
2400 """wraps repo.wwrite"""
2390 """wraps repo.wwrite"""
2401 self._data = data
2391 self._data = data
2402
2392
2403 class overlayfilectx(committablefilectx):
2393 class overlayfilectx(committablefilectx):
2404 """Like memfilectx but take an original filectx and optional parameters to
2394 """Like memfilectx but take an original filectx and optional parameters to
2405 override parts of it. This is useful when fctx.data() is expensive (i.e.
2395 override parts of it. This is useful when fctx.data() is expensive (i.e.
2406 flag processor is expensive) and raw data, flags, and filenode could be
2396 flag processor is expensive) and raw data, flags, and filenode could be
2407 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2397 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2408 """
2398 """
2409
2399
2410 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2400 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2411 copied=None, ctx=None):
2401 copied=None, ctx=None):
2412 """originalfctx: filecontext to duplicate
2402 """originalfctx: filecontext to duplicate
2413
2403
2414 datafunc: None or a function to override data (file content). It is a
2404 datafunc: None or a function to override data (file content). It is a
2415 function to be lazy. path, flags, copied, ctx: None or overridden value
2405 function to be lazy. path, flags, copied, ctx: None or overridden value
2416
2406
2417 copied could be (path, rev), or False. copied could also be just path,
2407 copied could be (path, rev), or False. copied could also be just path,
2418 and will be converted to (path, nullid). This simplifies some callers.
2408 and will be converted to (path, nullid). This simplifies some callers.
2419 """
2409 """
2420
2410
2421 if path is None:
2411 if path is None:
2422 path = originalfctx.path()
2412 path = originalfctx.path()
2423 if ctx is None:
2413 if ctx is None:
2424 ctx = originalfctx.changectx()
2414 ctx = originalfctx.changectx()
2425 ctxmatch = lambda: True
2415 ctxmatch = lambda: True
2426 else:
2416 else:
2427 ctxmatch = lambda: ctx == originalfctx.changectx()
2417 ctxmatch = lambda: ctx == originalfctx.changectx()
2428
2418
2429 repo = originalfctx.repo()
2419 repo = originalfctx.repo()
2430 flog = originalfctx.filelog()
2420 flog = originalfctx.filelog()
2431 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2421 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2432
2422
2433 if copied is None:
2423 if copied is None:
2434 copied = originalfctx.renamed()
2424 copied = originalfctx.renamed()
2435 copiedmatch = lambda: True
2425 copiedmatch = lambda: True
2436 else:
2426 else:
2437 if copied and not isinstance(copied, tuple):
2427 if copied and not isinstance(copied, tuple):
2438 # repo._filecommit will recalculate copyrev so nullid is okay
2428 # repo._filecommit will recalculate copyrev so nullid is okay
2439 copied = (copied, nullid)
2429 copied = (copied, nullid)
2440 copiedmatch = lambda: copied == originalfctx.renamed()
2430 copiedmatch = lambda: copied == originalfctx.renamed()
2441
2431
2442 # When data, copied (could affect data), ctx (could affect filelog
2432 # When data, copied (could affect data), ctx (could affect filelog
2443 # parents) are not overridden, rawdata, rawflags, and filenode may be
2433 # parents) are not overridden, rawdata, rawflags, and filenode may be
2444 # reused (repo._filecommit should double check filelog parents).
2434 # reused (repo._filecommit should double check filelog parents).
2445 #
2435 #
2446 # path, flags are not hashed in filelog (but in manifestlog) so they do
2436 # path, flags are not hashed in filelog (but in manifestlog) so they do
2447 # not affect reusable here.
2437 # not affect reusable here.
2448 #
2438 #
2449 # If ctx or copied is overridden to a same value with originalfctx,
2439 # If ctx or copied is overridden to a same value with originalfctx,
2450 # still consider it's reusable. originalfctx.renamed() may be a bit
2440 # still consider it's reusable. originalfctx.renamed() may be a bit
2451 # expensive so it's not called unless necessary. Assuming datafunc is
2441 # expensive so it's not called unless necessary. Assuming datafunc is
2452 # always expensive, do not call it for this "reusable" test.
2442 # always expensive, do not call it for this "reusable" test.
2453 reusable = datafunc is None and ctxmatch() and copiedmatch()
2443 reusable = datafunc is None and ctxmatch() and copiedmatch()
2454
2444
2455 if datafunc is None:
2445 if datafunc is None:
2456 datafunc = originalfctx.data
2446 datafunc = originalfctx.data
2457 if flags is None:
2447 if flags is None:
2458 flags = originalfctx.flags()
2448 flags = originalfctx.flags()
2459
2449
2460 self._datafunc = datafunc
2450 self._datafunc = datafunc
2461 self._flags = flags
2451 self._flags = flags
2462 self._copied = copied
2452 self._copied = copied
2463
2453
2464 if reusable:
2454 if reusable:
2465 # copy extra fields from originalfctx
2455 # copy extra fields from originalfctx
2466 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2456 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2467 for attr_ in attrs:
2457 for attr_ in attrs:
2468 if util.safehasattr(originalfctx, attr_):
2458 if util.safehasattr(originalfctx, attr_):
2469 setattr(self, attr_, getattr(originalfctx, attr_))
2459 setattr(self, attr_, getattr(originalfctx, attr_))
2470
2460
2471 def data(self):
2461 def data(self):
2472 return self._datafunc()
2462 return self._datafunc()
2473
2463
2474 class metadataonlyctx(committablectx):
2464 class metadataonlyctx(committablectx):
2475 """Like memctx but it's reusing the manifest of different commit.
2465 """Like memctx but it's reusing the manifest of different commit.
2476 Intended to be used by lightweight operations that are creating
2466 Intended to be used by lightweight operations that are creating
2477 metadata-only changes.
2467 metadata-only changes.
2478
2468
2479 Revision information is supplied at initialization time. 'repo' is the
2469 Revision information is supplied at initialization time. 'repo' is the
2480 current localrepo, 'ctx' is original revision which manifest we're reuisng
2470 current localrepo, 'ctx' is original revision which manifest we're reuisng
2481 'parents' is a sequence of two parent revisions identifiers (pass None for
2471 'parents' is a sequence of two parent revisions identifiers (pass None for
2482 every missing parent), 'text' is the commit.
2472 every missing parent), 'text' is the commit.
2483
2473
2484 user receives the committer name and defaults to current repository
2474 user receives the committer name and defaults to current repository
2485 username, date is the commit date in any format supported by
2475 username, date is the commit date in any format supported by
2486 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2476 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2487 metadata or is left empty.
2477 metadata or is left empty.
2488 """
2478 """
2489 def __new__(cls, repo, originalctx, *args, **kwargs):
2490 return super(metadataonlyctx, cls).__new__(cls, repo)
2491
2492 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2479 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2493 date=None, extra=None, editor=False):
2480 date=None, extra=None, editor=False):
2494 if text is None:
2481 if text is None:
2495 text = originalctx.description()
2482 text = originalctx.description()
2496 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2483 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2497 self._rev = None
2484 self._rev = None
2498 self._node = None
2485 self._node = None
2499 self._originalctx = originalctx
2486 self._originalctx = originalctx
2500 self._manifestnode = originalctx.manifestnode()
2487 self._manifestnode = originalctx.manifestnode()
2501 if parents is None:
2488 if parents is None:
2502 parents = originalctx.parents()
2489 parents = originalctx.parents()
2503 else:
2490 else:
2504 parents = [repo[p] for p in parents if p is not None]
2491 parents = [repo[p] for p in parents if p is not None]
2505 parents = parents[:]
2492 parents = parents[:]
2506 while len(parents) < 2:
2493 while len(parents) < 2:
2507 parents.append(repo[nullid])
2494 parents.append(repo[nullid])
2508 p1, p2 = self._parents = parents
2495 p1, p2 = self._parents = parents
2509
2496
2510 # sanity check to ensure that the reused manifest parents are
2497 # sanity check to ensure that the reused manifest parents are
2511 # manifests of our commit parents
2498 # manifests of our commit parents
2512 mp1, mp2 = self.manifestctx().parents
2499 mp1, mp2 = self.manifestctx().parents
2513 if p1 != nullid and p1.manifestnode() != mp1:
2500 if p1 != nullid and p1.manifestnode() != mp1:
2514 raise RuntimeError('can\'t reuse the manifest: '
2501 raise RuntimeError('can\'t reuse the manifest: '
2515 'its p1 doesn\'t match the new ctx p1')
2502 'its p1 doesn\'t match the new ctx p1')
2516 if p2 != nullid and p2.manifestnode() != mp2:
2503 if p2 != nullid and p2.manifestnode() != mp2:
2517 raise RuntimeError('can\'t reuse the manifest: '
2504 raise RuntimeError('can\'t reuse the manifest: '
2518 'its p2 doesn\'t match the new ctx p2')
2505 'its p2 doesn\'t match the new ctx p2')
2519
2506
2520 self._files = originalctx.files()
2507 self._files = originalctx.files()
2521 self.substate = {}
2508 self.substate = {}
2522
2509
2523 if editor:
2510 if editor:
2524 self._text = editor(self._repo, self, [])
2511 self._text = editor(self._repo, self, [])
2525 self._repo.savecommitmessage(self._text)
2512 self._repo.savecommitmessage(self._text)
2526
2513
2527 def manifestnode(self):
2514 def manifestnode(self):
2528 return self._manifestnode
2515 return self._manifestnode
2529
2516
2530 @property
2517 @property
2531 def _manifestctx(self):
2518 def _manifestctx(self):
2532 return self._repo.manifestlog[self._manifestnode]
2519 return self._repo.manifestlog[self._manifestnode]
2533
2520
2534 def filectx(self, path, filelog=None):
2521 def filectx(self, path, filelog=None):
2535 return self._originalctx.filectx(path, filelog=filelog)
2522 return self._originalctx.filectx(path, filelog=filelog)
2536
2523
2537 def commit(self):
2524 def commit(self):
2538 """commit context to the repo"""
2525 """commit context to the repo"""
2539 return self._repo.commitctx(self)
2526 return self._repo.commitctx(self)
2540
2527
2541 @property
2528 @property
2542 def _manifest(self):
2529 def _manifest(self):
2543 return self._originalctx.manifest()
2530 return self._originalctx.manifest()
2544
2531
2545 @propertycache
2532 @propertycache
2546 def _status(self):
2533 def _status(self):
2547 """Calculate exact status from ``files`` specified in the ``origctx``
2534 """Calculate exact status from ``files`` specified in the ``origctx``
2548 and parents manifests.
2535 and parents manifests.
2549 """
2536 """
2550 man1 = self.p1().manifest()
2537 man1 = self.p1().manifest()
2551 p2 = self._parents[1]
2538 p2 = self._parents[1]
2552 # "1 < len(self._parents)" can't be used for checking
2539 # "1 < len(self._parents)" can't be used for checking
2553 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2540 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2554 # explicitly initialized by the list, of which length is 2.
2541 # explicitly initialized by the list, of which length is 2.
2555 if p2.node() != nullid:
2542 if p2.node() != nullid:
2556 man2 = p2.manifest()
2543 man2 = p2.manifest()
2557 managing = lambda f: f in man1 or f in man2
2544 managing = lambda f: f in man1 or f in man2
2558 else:
2545 else:
2559 managing = lambda f: f in man1
2546 managing = lambda f: f in man1
2560
2547
2561 modified, added, removed = [], [], []
2548 modified, added, removed = [], [], []
2562 for f in self._files:
2549 for f in self._files:
2563 if not managing(f):
2550 if not managing(f):
2564 added.append(f)
2551 added.append(f)
2565 elif f in self:
2552 elif f in self:
2566 modified.append(f)
2553 modified.append(f)
2567 else:
2554 else:
2568 removed.append(f)
2555 removed.append(f)
2569
2556
2570 return scmutil.status(modified, added, removed, [], [], [], [])
2557 return scmutil.status(modified, added, removed, [], [], [], [])
2571
2558
2572 class arbitraryfilectx(object):
2559 class arbitraryfilectx(object):
2573 """Allows you to use filectx-like functions on a file in an arbitrary
2560 """Allows you to use filectx-like functions on a file in an arbitrary
2574 location on disk, possibly not in the working directory.
2561 location on disk, possibly not in the working directory.
2575 """
2562 """
2576 def __init__(self, path, repo=None):
2563 def __init__(self, path, repo=None):
2577 # Repo is optional because contrib/simplemerge uses this class.
2564 # Repo is optional because contrib/simplemerge uses this class.
2578 self._repo = repo
2565 self._repo = repo
2579 self._path = path
2566 self._path = path
2580
2567
2581 def cmp(self, fctx):
2568 def cmp(self, fctx):
2582 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2569 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2583 # path if either side is a symlink.
2570 # path if either side is a symlink.
2584 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2571 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2585 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2572 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2586 # Add a fast-path for merge if both sides are disk-backed.
2573 # Add a fast-path for merge if both sides are disk-backed.
2587 # Note that filecmp uses the opposite return values (True if same)
2574 # Note that filecmp uses the opposite return values (True if same)
2588 # from our cmp functions (True if different).
2575 # from our cmp functions (True if different).
2589 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2576 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2590 return self.data() != fctx.data()
2577 return self.data() != fctx.data()
2591
2578
2592 def path(self):
2579 def path(self):
2593 return self._path
2580 return self._path
2594
2581
2595 def flags(self):
2582 def flags(self):
2596 return ''
2583 return ''
2597
2584
2598 def data(self):
2585 def data(self):
2599 return util.readfile(self._path)
2586 return util.readfile(self._path)
2600
2587
2601 def decodeddata(self):
2588 def decodeddata(self):
2602 with open(self._path, "rb") as f:
2589 with open(self._path, "rb") as f:
2603 return f.read()
2590 return f.read()
2604
2591
2605 def remove(self):
2592 def remove(self):
2606 util.unlink(self._path)
2593 util.unlink(self._path)
2607
2594
2608 def write(self, data, flags, **kwargs):
2595 def write(self, data, flags, **kwargs):
2609 assert not flags
2596 assert not flags
2610 with open(self._path, "w") as f:
2597 with open(self._path, "w") as f:
2611 f.write(data)
2598 f.write(data)
@@ -1,2333 +1,2335 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks,
24 bookmarks,
25 branchmap,
25 branchmap,
26 bundle2,
26 bundle2,
27 changegroup,
27 changegroup,
28 changelog,
28 changelog,
29 color,
29 color,
30 context,
30 context,
31 dirstate,
31 dirstate,
32 dirstateguard,
32 dirstateguard,
33 discovery,
33 discovery,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 narrowspec,
46 narrowspec,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 procutil,
68 procutil,
69 stringutil,
69 stringutil,
70 )
70 )
71
71
72 release = lockmod.release
72 release = lockmod.release
73 urlerr = util.urlerr
73 urlerr = util.urlerr
74 urlreq = util.urlreq
74 urlreq = util.urlreq
75
75
76 # set of (path, vfs-location) tuples. vfs-location is:
76 # set of (path, vfs-location) tuples. vfs-location is:
77 # - 'plain for vfs relative paths
77 # - 'plain for vfs relative paths
78 # - '' for svfs relative paths
78 # - '' for svfs relative paths
79 _cachedfiles = set()
79 _cachedfiles = set()
80
80
81 class _basefilecache(scmutil.filecache):
81 class _basefilecache(scmutil.filecache):
82 """All filecache usage on repo are done for logic that should be unfiltered
82 """All filecache usage on repo are done for logic that should be unfiltered
83 """
83 """
84 def __get__(self, repo, type=None):
84 def __get__(self, repo, type=None):
85 if repo is None:
85 if repo is None:
86 return self
86 return self
87 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
87 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 def __set__(self, repo, value):
88 def __set__(self, repo, value):
89 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
89 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 def __delete__(self, repo):
90 def __delete__(self, repo):
91 return super(_basefilecache, self).__delete__(repo.unfiltered())
91 return super(_basefilecache, self).__delete__(repo.unfiltered())
92
92
93 class repofilecache(_basefilecache):
93 class repofilecache(_basefilecache):
94 """filecache for files in .hg but outside of .hg/store"""
94 """filecache for files in .hg but outside of .hg/store"""
95 def __init__(self, *paths):
95 def __init__(self, *paths):
96 super(repofilecache, self).__init__(*paths)
96 super(repofilecache, self).__init__(*paths)
97 for path in paths:
97 for path in paths:
98 _cachedfiles.add((path, 'plain'))
98 _cachedfiles.add((path, 'plain'))
99
99
100 def join(self, obj, fname):
100 def join(self, obj, fname):
101 return obj.vfs.join(fname)
101 return obj.vfs.join(fname)
102
102
103 class storecache(_basefilecache):
103 class storecache(_basefilecache):
104 """filecache for files in the store"""
104 """filecache for files in the store"""
105 def __init__(self, *paths):
105 def __init__(self, *paths):
106 super(storecache, self).__init__(*paths)
106 super(storecache, self).__init__(*paths)
107 for path in paths:
107 for path in paths:
108 _cachedfiles.add((path, ''))
108 _cachedfiles.add((path, ''))
109
109
110 def join(self, obj, fname):
110 def join(self, obj, fname):
111 return obj.sjoin(fname)
111 return obj.sjoin(fname)
112
112
113 def isfilecached(repo, name):
113 def isfilecached(repo, name):
114 """check if a repo has already cached "name" filecache-ed property
114 """check if a repo has already cached "name" filecache-ed property
115
115
116 This returns (cachedobj-or-None, iscached) tuple.
116 This returns (cachedobj-or-None, iscached) tuple.
117 """
117 """
118 cacheentry = repo.unfiltered()._filecache.get(name, None)
118 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 if not cacheentry:
119 if not cacheentry:
120 return None, False
120 return None, False
121 return cacheentry.obj, True
121 return cacheentry.obj, True
122
122
123 class unfilteredpropertycache(util.propertycache):
123 class unfilteredpropertycache(util.propertycache):
124 """propertycache that apply to unfiltered repo only"""
124 """propertycache that apply to unfiltered repo only"""
125
125
126 def __get__(self, repo, type=None):
126 def __get__(self, repo, type=None):
127 unfi = repo.unfiltered()
127 unfi = repo.unfiltered()
128 if unfi is repo:
128 if unfi is repo:
129 return super(unfilteredpropertycache, self).__get__(unfi)
129 return super(unfilteredpropertycache, self).__get__(unfi)
130 return getattr(unfi, self.name)
130 return getattr(unfi, self.name)
131
131
132 class filteredpropertycache(util.propertycache):
132 class filteredpropertycache(util.propertycache):
133 """propertycache that must take filtering in account"""
133 """propertycache that must take filtering in account"""
134
134
135 def cachevalue(self, obj, value):
135 def cachevalue(self, obj, value):
136 object.__setattr__(obj, self.name, value)
136 object.__setattr__(obj, self.name, value)
137
137
138
138
139 def hasunfilteredcache(repo, name):
139 def hasunfilteredcache(repo, name):
140 """check if a repo has an unfilteredpropertycache value for <name>"""
140 """check if a repo has an unfilteredpropertycache value for <name>"""
141 return name in vars(repo.unfiltered())
141 return name in vars(repo.unfiltered())
142
142
143 def unfilteredmethod(orig):
143 def unfilteredmethod(orig):
144 """decorate method that always need to be run on unfiltered version"""
144 """decorate method that always need to be run on unfiltered version"""
145 def wrapper(repo, *args, **kwargs):
145 def wrapper(repo, *args, **kwargs):
146 return orig(repo.unfiltered(), *args, **kwargs)
146 return orig(repo.unfiltered(), *args, **kwargs)
147 return wrapper
147 return wrapper
148
148
149 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
149 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 'unbundle'}
150 'unbundle'}
151 legacycaps = moderncaps.union({'changegroupsubset'})
151 legacycaps = moderncaps.union({'changegroupsubset'})
152
152
153 class localpeer(repository.peer):
153 class localpeer(repository.peer):
154 '''peer for a local repo; reflects only the most recent API'''
154 '''peer for a local repo; reflects only the most recent API'''
155
155
156 def __init__(self, repo, caps=None):
156 def __init__(self, repo, caps=None):
157 super(localpeer, self).__init__()
157 super(localpeer, self).__init__()
158
158
159 if caps is None:
159 if caps is None:
160 caps = moderncaps.copy()
160 caps = moderncaps.copy()
161 self._repo = repo.filtered('served')
161 self._repo = repo.filtered('served')
162 self._ui = repo.ui
162 self._ui = repo.ui
163 self._caps = repo._restrictcapabilities(caps)
163 self._caps = repo._restrictcapabilities(caps)
164
164
165 # Begin of _basepeer interface.
165 # Begin of _basepeer interface.
166
166
167 @util.propertycache
167 @util.propertycache
168 def ui(self):
168 def ui(self):
169 return self._ui
169 return self._ui
170
170
171 def url(self):
171 def url(self):
172 return self._repo.url()
172 return self._repo.url()
173
173
174 def local(self):
174 def local(self):
175 return self._repo
175 return self._repo
176
176
177 def peer(self):
177 def peer(self):
178 return self
178 return self
179
179
180 def canpush(self):
180 def canpush(self):
181 return True
181 return True
182
182
183 def close(self):
183 def close(self):
184 self._repo.close()
184 self._repo.close()
185
185
186 # End of _basepeer interface.
186 # End of _basepeer interface.
187
187
188 # Begin of _basewirecommands interface.
188 # Begin of _basewirecommands interface.
189
189
190 def branchmap(self):
190 def branchmap(self):
191 return self._repo.branchmap()
191 return self._repo.branchmap()
192
192
193 def capabilities(self):
193 def capabilities(self):
194 return self._caps
194 return self._caps
195
195
196 def debugwireargs(self, one, two, three=None, four=None, five=None):
196 def debugwireargs(self, one, two, three=None, four=None, five=None):
197 """Used to test argument passing over the wire"""
197 """Used to test argument passing over the wire"""
198 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
198 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
199 pycompat.bytestr(four),
199 pycompat.bytestr(four),
200 pycompat.bytestr(five))
200 pycompat.bytestr(five))
201
201
202 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
202 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
203 **kwargs):
203 **kwargs):
204 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
204 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
205 common=common, bundlecaps=bundlecaps,
205 common=common, bundlecaps=bundlecaps,
206 **kwargs)[1]
206 **kwargs)[1]
207 cb = util.chunkbuffer(chunks)
207 cb = util.chunkbuffer(chunks)
208
208
209 if exchange.bundle2requested(bundlecaps):
209 if exchange.bundle2requested(bundlecaps):
210 # When requesting a bundle2, getbundle returns a stream to make the
210 # When requesting a bundle2, getbundle returns a stream to make the
211 # wire level function happier. We need to build a proper object
211 # wire level function happier. We need to build a proper object
212 # from it in local peer.
212 # from it in local peer.
213 return bundle2.getunbundler(self.ui, cb)
213 return bundle2.getunbundler(self.ui, cb)
214 else:
214 else:
215 return changegroup.getunbundler('01', cb, None)
215 return changegroup.getunbundler('01', cb, None)
216
216
217 def heads(self):
217 def heads(self):
218 return self._repo.heads()
218 return self._repo.heads()
219
219
220 def known(self, nodes):
220 def known(self, nodes):
221 return self._repo.known(nodes)
221 return self._repo.known(nodes)
222
222
223 def listkeys(self, namespace):
223 def listkeys(self, namespace):
224 return self._repo.listkeys(namespace)
224 return self._repo.listkeys(namespace)
225
225
226 def lookup(self, key):
226 def lookup(self, key):
227 return self._repo.lookup(key)
227 return self._repo.lookup(key)
228
228
229 def pushkey(self, namespace, key, old, new):
229 def pushkey(self, namespace, key, old, new):
230 return self._repo.pushkey(namespace, key, old, new)
230 return self._repo.pushkey(namespace, key, old, new)
231
231
232 def stream_out(self):
232 def stream_out(self):
233 raise error.Abort(_('cannot perform stream clone against local '
233 raise error.Abort(_('cannot perform stream clone against local '
234 'peer'))
234 'peer'))
235
235
236 def unbundle(self, cg, heads, url):
236 def unbundle(self, cg, heads, url):
237 """apply a bundle on a repo
237 """apply a bundle on a repo
238
238
239 This function handles the repo locking itself."""
239 This function handles the repo locking itself."""
240 try:
240 try:
241 try:
241 try:
242 cg = exchange.readbundle(self.ui, cg, None)
242 cg = exchange.readbundle(self.ui, cg, None)
243 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
243 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
244 if util.safehasattr(ret, 'getchunks'):
244 if util.safehasattr(ret, 'getchunks'):
245 # This is a bundle20 object, turn it into an unbundler.
245 # This is a bundle20 object, turn it into an unbundler.
246 # This little dance should be dropped eventually when the
246 # This little dance should be dropped eventually when the
247 # API is finally improved.
247 # API is finally improved.
248 stream = util.chunkbuffer(ret.getchunks())
248 stream = util.chunkbuffer(ret.getchunks())
249 ret = bundle2.getunbundler(self.ui, stream)
249 ret = bundle2.getunbundler(self.ui, stream)
250 return ret
250 return ret
251 except Exception as exc:
251 except Exception as exc:
252 # If the exception contains output salvaged from a bundle2
252 # If the exception contains output salvaged from a bundle2
253 # reply, we need to make sure it is printed before continuing
253 # reply, we need to make sure it is printed before continuing
254 # to fail. So we build a bundle2 with such output and consume
254 # to fail. So we build a bundle2 with such output and consume
255 # it directly.
255 # it directly.
256 #
256 #
257 # This is not very elegant but allows a "simple" solution for
257 # This is not very elegant but allows a "simple" solution for
258 # issue4594
258 # issue4594
259 output = getattr(exc, '_bundle2salvagedoutput', ())
259 output = getattr(exc, '_bundle2salvagedoutput', ())
260 if output:
260 if output:
261 bundler = bundle2.bundle20(self._repo.ui)
261 bundler = bundle2.bundle20(self._repo.ui)
262 for out in output:
262 for out in output:
263 bundler.addpart(out)
263 bundler.addpart(out)
264 stream = util.chunkbuffer(bundler.getchunks())
264 stream = util.chunkbuffer(bundler.getchunks())
265 b = bundle2.getunbundler(self.ui, stream)
265 b = bundle2.getunbundler(self.ui, stream)
266 bundle2.processbundle(self._repo, b)
266 bundle2.processbundle(self._repo, b)
267 raise
267 raise
268 except error.PushRaced as exc:
268 except error.PushRaced as exc:
269 raise error.ResponseError(_('push failed:'),
269 raise error.ResponseError(_('push failed:'),
270 stringutil.forcebytestr(exc))
270 stringutil.forcebytestr(exc))
271
271
272 # End of _basewirecommands interface.
272 # End of _basewirecommands interface.
273
273
274 # Begin of peer interface.
274 # Begin of peer interface.
275
275
276 def iterbatch(self):
276 def iterbatch(self):
277 return peer.localiterbatcher(self)
277 return peer.localiterbatcher(self)
278
278
279 # End of peer interface.
279 # End of peer interface.
280
280
281 class locallegacypeer(repository.legacypeer, localpeer):
281 class locallegacypeer(repository.legacypeer, localpeer):
282 '''peer extension which implements legacy methods too; used for tests with
282 '''peer extension which implements legacy methods too; used for tests with
283 restricted capabilities'''
283 restricted capabilities'''
284
284
285 def __init__(self, repo):
285 def __init__(self, repo):
286 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
286 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
287
287
288 # Begin of baselegacywirecommands interface.
288 # Begin of baselegacywirecommands interface.
289
289
290 def between(self, pairs):
290 def between(self, pairs):
291 return self._repo.between(pairs)
291 return self._repo.between(pairs)
292
292
293 def branches(self, nodes):
293 def branches(self, nodes):
294 return self._repo.branches(nodes)
294 return self._repo.branches(nodes)
295
295
296 def changegroup(self, basenodes, source):
296 def changegroup(self, basenodes, source):
297 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
297 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
298 missingheads=self._repo.heads())
298 missingheads=self._repo.heads())
299 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
299 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
300
300
301 def changegroupsubset(self, bases, heads, source):
301 def changegroupsubset(self, bases, heads, source):
302 outgoing = discovery.outgoing(self._repo, missingroots=bases,
302 outgoing = discovery.outgoing(self._repo, missingroots=bases,
303 missingheads=heads)
303 missingheads=heads)
304 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
304 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
305
305
306 # End of baselegacywirecommands interface.
306 # End of baselegacywirecommands interface.
307
307
308 # Increment the sub-version when the revlog v2 format changes to lock out old
308 # Increment the sub-version when the revlog v2 format changes to lock out old
309 # clients.
309 # clients.
310 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
310 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
311
311
312 # Functions receiving (ui, features) that extensions can register to impact
312 # Functions receiving (ui, features) that extensions can register to impact
313 # the ability to load repositories with custom requirements. Only
313 # the ability to load repositories with custom requirements. Only
314 # functions defined in loaded extensions are called.
314 # functions defined in loaded extensions are called.
315 #
315 #
316 # The function receives a set of requirement strings that the repository
316 # The function receives a set of requirement strings that the repository
317 # is capable of opening. Functions will typically add elements to the
317 # is capable of opening. Functions will typically add elements to the
318 # set to reflect that the extension knows how to handle that requirements.
318 # set to reflect that the extension knows how to handle that requirements.
319 featuresetupfuncs = set()
319 featuresetupfuncs = set()
320
320
321 class localrepository(object):
321 class localrepository(object):
322
322
323 # obsolete experimental requirements:
323 # obsolete experimental requirements:
324 # - manifestv2: An experimental new manifest format that allowed
324 # - manifestv2: An experimental new manifest format that allowed
325 # for stem compression of long paths. Experiment ended up not
325 # for stem compression of long paths. Experiment ended up not
326 # being successful (repository sizes went up due to worse delta
326 # being successful (repository sizes went up due to worse delta
327 # chains), and the code was deleted in 4.6.
327 # chains), and the code was deleted in 4.6.
328 supportedformats = {
328 supportedformats = {
329 'revlogv1',
329 'revlogv1',
330 'generaldelta',
330 'generaldelta',
331 'treemanifest',
331 'treemanifest',
332 REVLOGV2_REQUIREMENT,
332 REVLOGV2_REQUIREMENT,
333 }
333 }
334 _basesupported = supportedformats | {
334 _basesupported = supportedformats | {
335 'store',
335 'store',
336 'fncache',
336 'fncache',
337 'shared',
337 'shared',
338 'relshared',
338 'relshared',
339 'dotencode',
339 'dotencode',
340 'exp-sparse',
340 'exp-sparse',
341 }
341 }
342 openerreqs = {
342 openerreqs = {
343 'revlogv1',
343 'revlogv1',
344 'generaldelta',
344 'generaldelta',
345 'treemanifest',
345 'treemanifest',
346 }
346 }
347
347
348 # list of prefix for file which can be written without 'wlock'
348 # list of prefix for file which can be written without 'wlock'
349 # Extensions should extend this list when needed
349 # Extensions should extend this list when needed
350 _wlockfreeprefix = {
350 _wlockfreeprefix = {
351 # We migh consider requiring 'wlock' for the next
351 # We migh consider requiring 'wlock' for the next
352 # two, but pretty much all the existing code assume
352 # two, but pretty much all the existing code assume
353 # wlock is not needed so we keep them excluded for
353 # wlock is not needed so we keep them excluded for
354 # now.
354 # now.
355 'hgrc',
355 'hgrc',
356 'requires',
356 'requires',
357 # XXX cache is a complicatged business someone
357 # XXX cache is a complicatged business someone
358 # should investigate this in depth at some point
358 # should investigate this in depth at some point
359 'cache/',
359 'cache/',
360 # XXX shouldn't be dirstate covered by the wlock?
360 # XXX shouldn't be dirstate covered by the wlock?
361 'dirstate',
361 'dirstate',
362 # XXX bisect was still a bit too messy at the time
362 # XXX bisect was still a bit too messy at the time
363 # this changeset was introduced. Someone should fix
363 # this changeset was introduced. Someone should fix
364 # the remainig bit and drop this line
364 # the remainig bit and drop this line
365 'bisect.state',
365 'bisect.state',
366 }
366 }
367
367
368 def __init__(self, baseui, path, create=False):
368 def __init__(self, baseui, path, create=False):
369 self.requirements = set()
369 self.requirements = set()
370 self.filtername = None
370 self.filtername = None
371 # wvfs: rooted at the repository root, used to access the working copy
371 # wvfs: rooted at the repository root, used to access the working copy
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
374 self.vfs = None
374 self.vfs = None
375 # svfs: usually rooted at .hg/store, used to access repository history
375 # svfs: usually rooted at .hg/store, used to access repository history
376 # If this is a shared repository, this vfs may point to another
376 # If this is a shared repository, this vfs may point to another
377 # repository's .hg/store directory.
377 # repository's .hg/store directory.
378 self.svfs = None
378 self.svfs = None
379 self.root = self.wvfs.base
379 self.root = self.wvfs.base
380 self.path = self.wvfs.join(".hg")
380 self.path = self.wvfs.join(".hg")
381 self.origroot = path
381 self.origroot = path
382 # This is only used by context.workingctx.match in order to
382 # This is only used by context.workingctx.match in order to
383 # detect files in subrepos.
383 # detect files in subrepos.
384 self.auditor = pathutil.pathauditor(
384 self.auditor = pathutil.pathauditor(
385 self.root, callback=self._checknested)
385 self.root, callback=self._checknested)
386 # This is only used by context.basectx.match in order to detect
386 # This is only used by context.basectx.match in order to detect
387 # files in subrepos.
387 # files in subrepos.
388 self.nofsauditor = pathutil.pathauditor(
388 self.nofsauditor = pathutil.pathauditor(
389 self.root, callback=self._checknested, realfs=False, cached=True)
389 self.root, callback=self._checknested, realfs=False, cached=True)
390 self.baseui = baseui
390 self.baseui = baseui
391 self.ui = baseui.copy()
391 self.ui = baseui.copy()
392 self.ui.copy = baseui.copy # prevent copying repo configuration
392 self.ui.copy = baseui.copy # prevent copying repo configuration
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
394 if (self.ui.configbool('devel', 'all-warnings') or
394 if (self.ui.configbool('devel', 'all-warnings') or
395 self.ui.configbool('devel', 'check-locks')):
395 self.ui.configbool('devel', 'check-locks')):
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
397 # A list of callback to shape the phase if no data were found.
397 # A list of callback to shape the phase if no data were found.
398 # Callback are in the form: func(repo, roots) --> processed root.
398 # Callback are in the form: func(repo, roots) --> processed root.
399 # This list it to be filled by extension during repo setup
399 # This list it to be filled by extension during repo setup
400 self._phasedefaults = []
400 self._phasedefaults = []
401 try:
401 try:
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
403 self._loadextensions()
403 self._loadextensions()
404 except IOError:
404 except IOError:
405 pass
405 pass
406
406
407 if featuresetupfuncs:
407 if featuresetupfuncs:
408 self.supported = set(self._basesupported) # use private copy
408 self.supported = set(self._basesupported) # use private copy
409 extmods = set(m.__name__ for n, m
409 extmods = set(m.__name__ for n, m
410 in extensions.extensions(self.ui))
410 in extensions.extensions(self.ui))
411 for setupfunc in featuresetupfuncs:
411 for setupfunc in featuresetupfuncs:
412 if setupfunc.__module__ in extmods:
412 if setupfunc.__module__ in extmods:
413 setupfunc(self.ui, self.supported)
413 setupfunc(self.ui, self.supported)
414 else:
414 else:
415 self.supported = self._basesupported
415 self.supported = self._basesupported
416 color.setup(self.ui)
416 color.setup(self.ui)
417
417
418 # Add compression engines.
418 # Add compression engines.
419 for name in util.compengines:
419 for name in util.compengines:
420 engine = util.compengines[name]
420 engine = util.compengines[name]
421 if engine.revlogheader():
421 if engine.revlogheader():
422 self.supported.add('exp-compression-%s' % name)
422 self.supported.add('exp-compression-%s' % name)
423
423
424 if not self.vfs.isdir():
424 if not self.vfs.isdir():
425 if create:
425 if create:
426 self.requirements = newreporequirements(self)
426 self.requirements = newreporequirements(self)
427
427
428 if not self.wvfs.exists():
428 if not self.wvfs.exists():
429 self.wvfs.makedirs()
429 self.wvfs.makedirs()
430 self.vfs.makedir(notindexed=True)
430 self.vfs.makedir(notindexed=True)
431
431
432 if 'store' in self.requirements:
432 if 'store' in self.requirements:
433 self.vfs.mkdir("store")
433 self.vfs.mkdir("store")
434
434
435 # create an invalid changelog
435 # create an invalid changelog
436 self.vfs.append(
436 self.vfs.append(
437 "00changelog.i",
437 "00changelog.i",
438 '\0\0\0\2' # represents revlogv2
438 '\0\0\0\2' # represents revlogv2
439 ' dummy changelog to prevent using the old repo layout'
439 ' dummy changelog to prevent using the old repo layout'
440 )
440 )
441 else:
441 else:
442 raise error.RepoError(_("repository %s not found") % path)
442 raise error.RepoError(_("repository %s not found") % path)
443 elif create:
443 elif create:
444 raise error.RepoError(_("repository %s already exists") % path)
444 raise error.RepoError(_("repository %s already exists") % path)
445 else:
445 else:
446 try:
446 try:
447 self.requirements = scmutil.readrequires(
447 self.requirements = scmutil.readrequires(
448 self.vfs, self.supported)
448 self.vfs, self.supported)
449 except IOError as inst:
449 except IOError as inst:
450 if inst.errno != errno.ENOENT:
450 if inst.errno != errno.ENOENT:
451 raise
451 raise
452
452
453 cachepath = self.vfs.join('cache')
453 cachepath = self.vfs.join('cache')
454 self.sharedpath = self.path
454 self.sharedpath = self.path
455 try:
455 try:
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
457 if 'relshared' in self.requirements:
457 if 'relshared' in self.requirements:
458 sharedpath = self.vfs.join(sharedpath)
458 sharedpath = self.vfs.join(sharedpath)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
460 cachepath = vfs.join('cache')
460 cachepath = vfs.join('cache')
461 s = vfs.base
461 s = vfs.base
462 if not vfs.exists():
462 if not vfs.exists():
463 raise error.RepoError(
463 raise error.RepoError(
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
465 self.sharedpath = s
465 self.sharedpath = s
466 except IOError as inst:
466 except IOError as inst:
467 if inst.errno != errno.ENOENT:
467 if inst.errno != errno.ENOENT:
468 raise
468 raise
469
469
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
471 raise error.RepoError(_('repository is using sparse feature but '
471 raise error.RepoError(_('repository is using sparse feature but '
472 'sparse is not enabled; enable the '
472 'sparse is not enabled; enable the '
473 '"sparse" extensions to access'))
473 '"sparse" extensions to access'))
474
474
475 self.store = store.store(
475 self.store = store.store(
476 self.requirements, self.sharedpath,
476 self.requirements, self.sharedpath,
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
478 self.spath = self.store.path
478 self.spath = self.store.path
479 self.svfs = self.store.vfs
479 self.svfs = self.store.vfs
480 self.sjoin = self.store.join
480 self.sjoin = self.store.join
481 self.vfs.createmode = self.store.createmode
481 self.vfs.createmode = self.store.createmode
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
483 self.cachevfs.createmode = self.store.createmode
483 self.cachevfs.createmode = self.store.createmode
484 if (self.ui.configbool('devel', 'all-warnings') or
484 if (self.ui.configbool('devel', 'all-warnings') or
485 self.ui.configbool('devel', 'check-locks')):
485 self.ui.configbool('devel', 'check-locks')):
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
488 else: # standard vfs
488 else: # standard vfs
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
490 self._applyopenerreqs()
490 self._applyopenerreqs()
491 if create:
491 if create:
492 self._writerequirements()
492 self._writerequirements()
493
493
494 self._dirstatevalidatewarned = False
494 self._dirstatevalidatewarned = False
495
495
496 self._branchcaches = {}
496 self._branchcaches = {}
497 self._revbranchcache = None
497 self._revbranchcache = None
498 self._filterpats = {}
498 self._filterpats = {}
499 self._datafilters = {}
499 self._datafilters = {}
500 self._transref = self._lockref = self._wlockref = None
500 self._transref = self._lockref = self._wlockref = None
501
501
502 # A cache for various files under .hg/ that tracks file changes,
502 # A cache for various files under .hg/ that tracks file changes,
503 # (used by the filecache decorator)
503 # (used by the filecache decorator)
504 #
504 #
505 # Maps a property name to its util.filecacheentry
505 # Maps a property name to its util.filecacheentry
506 self._filecache = {}
506 self._filecache = {}
507
507
508 # hold sets of revision to be filtered
508 # hold sets of revision to be filtered
509 # should be cleared when something might have changed the filter value:
509 # should be cleared when something might have changed the filter value:
510 # - new changesets,
510 # - new changesets,
511 # - phase change,
511 # - phase change,
512 # - new obsolescence marker,
512 # - new obsolescence marker,
513 # - working directory parent change,
513 # - working directory parent change,
514 # - bookmark changes
514 # - bookmark changes
515 self.filteredrevcache = {}
515 self.filteredrevcache = {}
516
516
517 # post-dirstate-status hooks
517 # post-dirstate-status hooks
518 self._postdsstatus = []
518 self._postdsstatus = []
519
519
520 # generic mapping between names and nodes
520 # generic mapping between names and nodes
521 self.names = namespaces.namespaces()
521 self.names = namespaces.namespaces()
522
522
523 # Key to signature value.
523 # Key to signature value.
524 self._sparsesignaturecache = {}
524 self._sparsesignaturecache = {}
525 # Signature to cached matcher instance.
525 # Signature to cached matcher instance.
526 self._sparsematchercache = {}
526 self._sparsematchercache = {}
527
527
528 def _getvfsward(self, origfunc):
528 def _getvfsward(self, origfunc):
529 """build a ward for self.vfs"""
529 """build a ward for self.vfs"""
530 rref = weakref.ref(self)
530 rref = weakref.ref(self)
531 def checkvfs(path, mode=None):
531 def checkvfs(path, mode=None):
532 ret = origfunc(path, mode=mode)
532 ret = origfunc(path, mode=mode)
533 repo = rref()
533 repo = rref()
534 if (repo is None
534 if (repo is None
535 or not util.safehasattr(repo, '_wlockref')
535 or not util.safehasattr(repo, '_wlockref')
536 or not util.safehasattr(repo, '_lockref')):
536 or not util.safehasattr(repo, '_lockref')):
537 return
537 return
538 if mode in (None, 'r', 'rb'):
538 if mode in (None, 'r', 'rb'):
539 return
539 return
540 if path.startswith(repo.path):
540 if path.startswith(repo.path):
541 # truncate name relative to the repository (.hg)
541 # truncate name relative to the repository (.hg)
542 path = path[len(repo.path) + 1:]
542 path = path[len(repo.path) + 1:]
543 if path.startswith('cache/'):
543 if path.startswith('cache/'):
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
546 if path.startswith('journal.'):
546 if path.startswith('journal.'):
547 # journal is covered by 'lock'
547 # journal is covered by 'lock'
548 if repo._currentlock(repo._lockref) is None:
548 if repo._currentlock(repo._lockref) is None:
549 repo.ui.develwarn('write with no lock: "%s"' % path,
549 repo.ui.develwarn('write with no lock: "%s"' % path,
550 stacklevel=2, config='check-locks')
550 stacklevel=2, config='check-locks')
551 elif repo._currentlock(repo._wlockref) is None:
551 elif repo._currentlock(repo._wlockref) is None:
552 # rest of vfs files are covered by 'wlock'
552 # rest of vfs files are covered by 'wlock'
553 #
553 #
554 # exclude special files
554 # exclude special files
555 for prefix in self._wlockfreeprefix:
555 for prefix in self._wlockfreeprefix:
556 if path.startswith(prefix):
556 if path.startswith(prefix):
557 return
557 return
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
559 stacklevel=2, config='check-locks')
559 stacklevel=2, config='check-locks')
560 return ret
560 return ret
561 return checkvfs
561 return checkvfs
562
562
563 def _getsvfsward(self, origfunc):
563 def _getsvfsward(self, origfunc):
564 """build a ward for self.svfs"""
564 """build a ward for self.svfs"""
565 rref = weakref.ref(self)
565 rref = weakref.ref(self)
566 def checksvfs(path, mode=None):
566 def checksvfs(path, mode=None):
567 ret = origfunc(path, mode=mode)
567 ret = origfunc(path, mode=mode)
568 repo = rref()
568 repo = rref()
569 if repo is None or not util.safehasattr(repo, '_lockref'):
569 if repo is None or not util.safehasattr(repo, '_lockref'):
570 return
570 return
571 if mode in (None, 'r', 'rb'):
571 if mode in (None, 'r', 'rb'):
572 return
572 return
573 if path.startswith(repo.sharedpath):
573 if path.startswith(repo.sharedpath):
574 # truncate name relative to the repository (.hg)
574 # truncate name relative to the repository (.hg)
575 path = path[len(repo.sharedpath) + 1:]
575 path = path[len(repo.sharedpath) + 1:]
576 if repo._currentlock(repo._lockref) is None:
576 if repo._currentlock(repo._lockref) is None:
577 repo.ui.develwarn('write with no lock: "%s"' % path,
577 repo.ui.develwarn('write with no lock: "%s"' % path,
578 stacklevel=3)
578 stacklevel=3)
579 return ret
579 return ret
580 return checksvfs
580 return checksvfs
581
581
582 def close(self):
582 def close(self):
583 self._writecaches()
583 self._writecaches()
584
584
585 def _loadextensions(self):
585 def _loadextensions(self):
586 extensions.loadall(self.ui)
586 extensions.loadall(self.ui)
587
587
588 def _writecaches(self):
588 def _writecaches(self):
589 if self._revbranchcache:
589 if self._revbranchcache:
590 self._revbranchcache.write()
590 self._revbranchcache.write()
591
591
592 def _restrictcapabilities(self, caps):
592 def _restrictcapabilities(self, caps):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
594 caps = set(caps)
594 caps = set(caps)
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
596 role='client'))
596 role='client'))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
598 return caps
598 return caps
599
599
600 def _applyopenerreqs(self):
600 def _applyopenerreqs(self):
601 self.svfs.options = dict((r, 1) for r in self.requirements
601 self.svfs.options = dict((r, 1) for r in self.requirements
602 if r in self.openerreqs)
602 if r in self.openerreqs)
603 # experimental config: format.chunkcachesize
603 # experimental config: format.chunkcachesize
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
605 if chunkcachesize is not None:
605 if chunkcachesize is not None:
606 self.svfs.options['chunkcachesize'] = chunkcachesize
606 self.svfs.options['chunkcachesize'] = chunkcachesize
607 # experimental config: format.maxchainlen
607 # experimental config: format.maxchainlen
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
609 if maxchainlen is not None:
609 if maxchainlen is not None:
610 self.svfs.options['maxchainlen'] = maxchainlen
610 self.svfs.options['maxchainlen'] = maxchainlen
611 # experimental config: format.manifestcachesize
611 # experimental config: format.manifestcachesize
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
613 if manifestcachesize is not None:
613 if manifestcachesize is not None:
614 self.svfs.options['manifestcachesize'] = manifestcachesize
614 self.svfs.options['manifestcachesize'] = manifestcachesize
615 # experimental config: format.aggressivemergedeltas
615 # experimental config: format.aggressivemergedeltas
616 aggressivemergedeltas = self.ui.configbool('format',
616 aggressivemergedeltas = self.ui.configbool('format',
617 'aggressivemergedeltas')
617 'aggressivemergedeltas')
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
621 if 0 <= chainspan:
621 if 0 <= chainspan:
622 self.svfs.options['maxdeltachainspan'] = chainspan
622 self.svfs.options['maxdeltachainspan'] = chainspan
623 mmapindexthreshold = self.ui.configbytes('experimental',
623 mmapindexthreshold = self.ui.configbytes('experimental',
624 'mmapindexthreshold')
624 'mmapindexthreshold')
625 if mmapindexthreshold is not None:
625 if mmapindexthreshold is not None:
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
628 srdensitythres = float(self.ui.config('experimental',
628 srdensitythres = float(self.ui.config('experimental',
629 'sparse-read.density-threshold'))
629 'sparse-read.density-threshold'))
630 srmingapsize = self.ui.configbytes('experimental',
630 srmingapsize = self.ui.configbytes('experimental',
631 'sparse-read.min-gap-size')
631 'sparse-read.min-gap-size')
632 self.svfs.options['with-sparse-read'] = withsparseread
632 self.svfs.options['with-sparse-read'] = withsparseread
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
635
635
636 for r in self.requirements:
636 for r in self.requirements:
637 if r.startswith('exp-compression-'):
637 if r.startswith('exp-compression-'):
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
639
639
640 # TODO move "revlogv2" to openerreqs once finalized.
640 # TODO move "revlogv2" to openerreqs once finalized.
641 if REVLOGV2_REQUIREMENT in self.requirements:
641 if REVLOGV2_REQUIREMENT in self.requirements:
642 self.svfs.options['revlogv2'] = True
642 self.svfs.options['revlogv2'] = True
643
643
644 def _writerequirements(self):
644 def _writerequirements(self):
645 scmutil.writerequires(self.vfs, self.requirements)
645 scmutil.writerequires(self.vfs, self.requirements)
646
646
647 def _checknested(self, path):
647 def _checknested(self, path):
648 """Determine if path is a legal nested repository."""
648 """Determine if path is a legal nested repository."""
649 if not path.startswith(self.root):
649 if not path.startswith(self.root):
650 return False
650 return False
651 subpath = path[len(self.root) + 1:]
651 subpath = path[len(self.root) + 1:]
652 normsubpath = util.pconvert(subpath)
652 normsubpath = util.pconvert(subpath)
653
653
654 # XXX: Checking against the current working copy is wrong in
654 # XXX: Checking against the current working copy is wrong in
655 # the sense that it can reject things like
655 # the sense that it can reject things like
656 #
656 #
657 # $ hg cat -r 10 sub/x.txt
657 # $ hg cat -r 10 sub/x.txt
658 #
658 #
659 # if sub/ is no longer a subrepository in the working copy
659 # if sub/ is no longer a subrepository in the working copy
660 # parent revision.
660 # parent revision.
661 #
661 #
662 # However, it can of course also allow things that would have
662 # However, it can of course also allow things that would have
663 # been rejected before, such as the above cat command if sub/
663 # been rejected before, such as the above cat command if sub/
664 # is a subrepository now, but was a normal directory before.
664 # is a subrepository now, but was a normal directory before.
665 # The old path auditor would have rejected by mistake since it
665 # The old path auditor would have rejected by mistake since it
666 # panics when it sees sub/.hg/.
666 # panics when it sees sub/.hg/.
667 #
667 #
668 # All in all, checking against the working copy seems sensible
668 # All in all, checking against the working copy seems sensible
669 # since we want to prevent access to nested repositories on
669 # since we want to prevent access to nested repositories on
670 # the filesystem *now*.
670 # the filesystem *now*.
671 ctx = self[None]
671 ctx = self[None]
672 parts = util.splitpath(subpath)
672 parts = util.splitpath(subpath)
673 while parts:
673 while parts:
674 prefix = '/'.join(parts)
674 prefix = '/'.join(parts)
675 if prefix in ctx.substate:
675 if prefix in ctx.substate:
676 if prefix == normsubpath:
676 if prefix == normsubpath:
677 return True
677 return True
678 else:
678 else:
679 sub = ctx.sub(prefix)
679 sub = ctx.sub(prefix)
680 return sub.checknested(subpath[len(prefix) + 1:])
680 return sub.checknested(subpath[len(prefix) + 1:])
681 else:
681 else:
682 parts.pop()
682 parts.pop()
683 return False
683 return False
684
684
685 def peer(self):
685 def peer(self):
686 return localpeer(self) # not cached to avoid reference cycle
686 return localpeer(self) # not cached to avoid reference cycle
687
687
688 def unfiltered(self):
688 def unfiltered(self):
689 """Return unfiltered version of the repository
689 """Return unfiltered version of the repository
690
690
691 Intended to be overwritten by filtered repo."""
691 Intended to be overwritten by filtered repo."""
692 return self
692 return self
693
693
694 def filtered(self, name, visibilityexceptions=None):
694 def filtered(self, name, visibilityexceptions=None):
695 """Return a filtered version of a repository"""
695 """Return a filtered version of a repository"""
696 cls = repoview.newtype(self.unfiltered().__class__)
696 cls = repoview.newtype(self.unfiltered().__class__)
697 return cls(self, name, visibilityexceptions)
697 return cls(self, name, visibilityexceptions)
698
698
699 @repofilecache('bookmarks', 'bookmarks.current')
699 @repofilecache('bookmarks', 'bookmarks.current')
700 def _bookmarks(self):
700 def _bookmarks(self):
701 return bookmarks.bmstore(self)
701 return bookmarks.bmstore(self)
702
702
703 @property
703 @property
704 def _activebookmark(self):
704 def _activebookmark(self):
705 return self._bookmarks.active
705 return self._bookmarks.active
706
706
707 # _phasesets depend on changelog. what we need is to call
707 # _phasesets depend on changelog. what we need is to call
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
709 # can't be easily expressed in filecache mechanism.
709 # can't be easily expressed in filecache mechanism.
710 @storecache('phaseroots', '00changelog.i')
710 @storecache('phaseroots', '00changelog.i')
711 def _phasecache(self):
711 def _phasecache(self):
712 return phases.phasecache(self, self._phasedefaults)
712 return phases.phasecache(self, self._phasedefaults)
713
713
714 @storecache('obsstore')
714 @storecache('obsstore')
715 def obsstore(self):
715 def obsstore(self):
716 return obsolete.makestore(self.ui, self)
716 return obsolete.makestore(self.ui, self)
717
717
718 @storecache('00changelog.i')
718 @storecache('00changelog.i')
719 def changelog(self):
719 def changelog(self):
720 return changelog.changelog(self.svfs,
720 return changelog.changelog(self.svfs,
721 trypending=txnutil.mayhavepending(self.root))
721 trypending=txnutil.mayhavepending(self.root))
722
722
723 def _constructmanifest(self):
723 def _constructmanifest(self):
724 # This is a temporary function while we migrate from manifest to
724 # This is a temporary function while we migrate from manifest to
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 # manifest creation.
726 # manifest creation.
727 return manifest.manifestrevlog(self.svfs)
727 return manifest.manifestrevlog(self.svfs)
728
728
729 @storecache('00manifest.i')
729 @storecache('00manifest.i')
730 def manifestlog(self):
730 def manifestlog(self):
731 return manifest.manifestlog(self.svfs, self)
731 return manifest.manifestlog(self.svfs, self)
732
732
733 @repofilecache('dirstate')
733 @repofilecache('dirstate')
734 def dirstate(self):
734 def dirstate(self):
735 sparsematchfn = lambda: sparse.matcher(self)
735 sparsematchfn = lambda: sparse.matcher(self)
736
736
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 self._dirstatevalidate, sparsematchfn)
738 self._dirstatevalidate, sparsematchfn)
739
739
740 def _dirstatevalidate(self, node):
740 def _dirstatevalidate(self, node):
741 try:
741 try:
742 self.changelog.rev(node)
742 self.changelog.rev(node)
743 return node
743 return node
744 except error.LookupError:
744 except error.LookupError:
745 if not self._dirstatevalidatewarned:
745 if not self._dirstatevalidatewarned:
746 self._dirstatevalidatewarned = True
746 self._dirstatevalidatewarned = True
747 self.ui.warn(_("warning: ignoring unknown"
747 self.ui.warn(_("warning: ignoring unknown"
748 " working parent %s!\n") % short(node))
748 " working parent %s!\n") % short(node))
749 return nullid
749 return nullid
750
750
751 @repofilecache(narrowspec.FILENAME)
751 @repofilecache(narrowspec.FILENAME)
752 def narrowpats(self):
752 def narrowpats(self):
753 """matcher patterns for this repository's narrowspec
753 """matcher patterns for this repository's narrowspec
754
754
755 A tuple of (includes, excludes).
755 A tuple of (includes, excludes).
756 """
756 """
757 source = self
757 source = self
758 if self.shared():
758 if self.shared():
759 from . import hg
759 from . import hg
760 source = hg.sharedreposource(self)
760 source = hg.sharedreposource(self)
761 return narrowspec.load(source)
761 return narrowspec.load(source)
762
762
763 @repofilecache(narrowspec.FILENAME)
763 @repofilecache(narrowspec.FILENAME)
764 def _narrowmatch(self):
764 def _narrowmatch(self):
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
766 return matchmod.always(self.root, '')
766 return matchmod.always(self.root, '')
767 include, exclude = self.narrowpats
767 include, exclude = self.narrowpats
768 return narrowspec.match(self.root, include=include, exclude=exclude)
768 return narrowspec.match(self.root, include=include, exclude=exclude)
769
769
770 # TODO(martinvonz): make this property-like instead?
770 # TODO(martinvonz): make this property-like instead?
771 def narrowmatch(self):
771 def narrowmatch(self):
772 return self._narrowmatch
772 return self._narrowmatch
773
773
774 def setnarrowpats(self, newincludes, newexcludes):
774 def setnarrowpats(self, newincludes, newexcludes):
775 target = self
775 target = self
776 if self.shared():
776 if self.shared():
777 from . import hg
777 from . import hg
778 target = hg.sharedreposource(self)
778 target = hg.sharedreposource(self)
779 narrowspec.save(target, newincludes, newexcludes)
779 narrowspec.save(target, newincludes, newexcludes)
780 self.invalidate(clearfilecache=True)
780 self.invalidate(clearfilecache=True)
781
781
782 def __getitem__(self, changeid):
782 def __getitem__(self, changeid):
783 if changeid is None:
783 if changeid is None:
784 return context.workingctx(self)
784 return context.workingctx(self)
785 if isinstance(changeid, context.basectx):
786 return changeid
785 if isinstance(changeid, slice):
787 if isinstance(changeid, slice):
786 # wdirrev isn't contiguous so the slice shouldn't include it
788 # wdirrev isn't contiguous so the slice shouldn't include it
787 return [context.changectx(self, i)
789 return [context.changectx(self, i)
788 for i in xrange(*changeid.indices(len(self)))
790 for i in xrange(*changeid.indices(len(self)))
789 if i not in self.changelog.filteredrevs]
791 if i not in self.changelog.filteredrevs]
790 try:
792 try:
791 return context.changectx(self, changeid)
793 return context.changectx(self, changeid)
792 except error.WdirUnsupported:
794 except error.WdirUnsupported:
793 return context.workingctx(self)
795 return context.workingctx(self)
794
796
795 def __contains__(self, changeid):
797 def __contains__(self, changeid):
796 """True if the given changeid exists
798 """True if the given changeid exists
797
799
798 error.LookupError is raised if an ambiguous node specified.
800 error.LookupError is raised if an ambiguous node specified.
799 """
801 """
800 try:
802 try:
801 self[changeid]
803 self[changeid]
802 return True
804 return True
803 except error.RepoLookupError:
805 except error.RepoLookupError:
804 return False
806 return False
805
807
806 def __nonzero__(self):
808 def __nonzero__(self):
807 return True
809 return True
808
810
809 __bool__ = __nonzero__
811 __bool__ = __nonzero__
810
812
811 def __len__(self):
813 def __len__(self):
812 # no need to pay the cost of repoview.changelog
814 # no need to pay the cost of repoview.changelog
813 unfi = self.unfiltered()
815 unfi = self.unfiltered()
814 return len(unfi.changelog)
816 return len(unfi.changelog)
815
817
816 def __iter__(self):
818 def __iter__(self):
817 return iter(self.changelog)
819 return iter(self.changelog)
818
820
819 def revs(self, expr, *args):
821 def revs(self, expr, *args):
820 '''Find revisions matching a revset.
822 '''Find revisions matching a revset.
821
823
822 The revset is specified as a string ``expr`` that may contain
824 The revset is specified as a string ``expr`` that may contain
823 %-formatting to escape certain types. See ``revsetlang.formatspec``.
825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
824
826
825 Revset aliases from the configuration are not expanded. To expand
827 Revset aliases from the configuration are not expanded. To expand
826 user aliases, consider calling ``scmutil.revrange()`` or
828 user aliases, consider calling ``scmutil.revrange()`` or
827 ``repo.anyrevs([expr], user=True)``.
829 ``repo.anyrevs([expr], user=True)``.
828
830
829 Returns a revset.abstractsmartset, which is a list-like interface
831 Returns a revset.abstractsmartset, which is a list-like interface
830 that contains integer revisions.
832 that contains integer revisions.
831 '''
833 '''
832 expr = revsetlang.formatspec(expr, *args)
834 expr = revsetlang.formatspec(expr, *args)
833 m = revset.match(None, expr)
835 m = revset.match(None, expr)
834 return m(self)
836 return m(self)
835
837
836 def set(self, expr, *args):
838 def set(self, expr, *args):
837 '''Find revisions matching a revset and emit changectx instances.
839 '''Find revisions matching a revset and emit changectx instances.
838
840
839 This is a convenience wrapper around ``revs()`` that iterates the
841 This is a convenience wrapper around ``revs()`` that iterates the
840 result and is a generator of changectx instances.
842 result and is a generator of changectx instances.
841
843
842 Revset aliases from the configuration are not expanded. To expand
844 Revset aliases from the configuration are not expanded. To expand
843 user aliases, consider calling ``scmutil.revrange()``.
845 user aliases, consider calling ``scmutil.revrange()``.
844 '''
846 '''
845 for r in self.revs(expr, *args):
847 for r in self.revs(expr, *args):
846 yield self[r]
848 yield self[r]
847
849
848 def anyrevs(self, specs, user=False, localalias=None):
850 def anyrevs(self, specs, user=False, localalias=None):
849 '''Find revisions matching one of the given revsets.
851 '''Find revisions matching one of the given revsets.
850
852
851 Revset aliases from the configuration are not expanded by default. To
853 Revset aliases from the configuration are not expanded by default. To
852 expand user aliases, specify ``user=True``. To provide some local
854 expand user aliases, specify ``user=True``. To provide some local
853 definitions overriding user aliases, set ``localalias`` to
855 definitions overriding user aliases, set ``localalias`` to
854 ``{name: definitionstring}``.
856 ``{name: definitionstring}``.
855 '''
857 '''
856 if user:
858 if user:
857 m = revset.matchany(self.ui, specs, repo=self,
859 m = revset.matchany(self.ui, specs, repo=self,
858 localalias=localalias)
860 localalias=localalias)
859 else:
861 else:
860 m = revset.matchany(None, specs, localalias=localalias)
862 m = revset.matchany(None, specs, localalias=localalias)
861 return m(self)
863 return m(self)
862
864
863 def url(self):
865 def url(self):
864 return 'file:' + self.root
866 return 'file:' + self.root
865
867
866 def hook(self, name, throw=False, **args):
868 def hook(self, name, throw=False, **args):
867 """Call a hook, passing this repo instance.
869 """Call a hook, passing this repo instance.
868
870
869 This a convenience method to aid invoking hooks. Extensions likely
871 This a convenience method to aid invoking hooks. Extensions likely
870 won't call this unless they have registered a custom hook or are
872 won't call this unless they have registered a custom hook or are
871 replacing code that is expected to call a hook.
873 replacing code that is expected to call a hook.
872 """
874 """
873 return hook.hook(self.ui, self, name, throw, **args)
875 return hook.hook(self.ui, self, name, throw, **args)
874
876
875 @filteredpropertycache
877 @filteredpropertycache
876 def _tagscache(self):
878 def _tagscache(self):
877 '''Returns a tagscache object that contains various tags related
879 '''Returns a tagscache object that contains various tags related
878 caches.'''
880 caches.'''
879
881
880 # This simplifies its cache management by having one decorated
882 # This simplifies its cache management by having one decorated
881 # function (this one) and the rest simply fetch things from it.
883 # function (this one) and the rest simply fetch things from it.
882 class tagscache(object):
884 class tagscache(object):
883 def __init__(self):
885 def __init__(self):
884 # These two define the set of tags for this repository. tags
886 # These two define the set of tags for this repository. tags
885 # maps tag name to node; tagtypes maps tag name to 'global' or
887 # maps tag name to node; tagtypes maps tag name to 'global' or
886 # 'local'. (Global tags are defined by .hgtags across all
888 # 'local'. (Global tags are defined by .hgtags across all
887 # heads, and local tags are defined in .hg/localtags.)
889 # heads, and local tags are defined in .hg/localtags.)
888 # They constitute the in-memory cache of tags.
890 # They constitute the in-memory cache of tags.
889 self.tags = self.tagtypes = None
891 self.tags = self.tagtypes = None
890
892
891 self.nodetagscache = self.tagslist = None
893 self.nodetagscache = self.tagslist = None
892
894
893 cache = tagscache()
895 cache = tagscache()
894 cache.tags, cache.tagtypes = self._findtags()
896 cache.tags, cache.tagtypes = self._findtags()
895
897
896 return cache
898 return cache
897
899
898 def tags(self):
900 def tags(self):
899 '''return a mapping of tag to node'''
901 '''return a mapping of tag to node'''
900 t = {}
902 t = {}
901 if self.changelog.filteredrevs:
903 if self.changelog.filteredrevs:
902 tags, tt = self._findtags()
904 tags, tt = self._findtags()
903 else:
905 else:
904 tags = self._tagscache.tags
906 tags = self._tagscache.tags
905 for k, v in tags.iteritems():
907 for k, v in tags.iteritems():
906 try:
908 try:
907 # ignore tags to unknown nodes
909 # ignore tags to unknown nodes
908 self.changelog.rev(v)
910 self.changelog.rev(v)
909 t[k] = v
911 t[k] = v
910 except (error.LookupError, ValueError):
912 except (error.LookupError, ValueError):
911 pass
913 pass
912 return t
914 return t
913
915
914 def _findtags(self):
916 def _findtags(self):
915 '''Do the hard work of finding tags. Return a pair of dicts
917 '''Do the hard work of finding tags. Return a pair of dicts
916 (tags, tagtypes) where tags maps tag name to node, and tagtypes
918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
917 maps tag name to a string like \'global\' or \'local\'.
919 maps tag name to a string like \'global\' or \'local\'.
918 Subclasses or extensions are free to add their own tags, but
920 Subclasses or extensions are free to add their own tags, but
919 should be aware that the returned dicts will be retained for the
921 should be aware that the returned dicts will be retained for the
920 duration of the localrepo object.'''
922 duration of the localrepo object.'''
921
923
922 # XXX what tagtype should subclasses/extensions use? Currently
924 # XXX what tagtype should subclasses/extensions use? Currently
923 # mq and bookmarks add tags, but do not set the tagtype at all.
925 # mq and bookmarks add tags, but do not set the tagtype at all.
924 # Should each extension invent its own tag type? Should there
926 # Should each extension invent its own tag type? Should there
925 # be one tagtype for all such "virtual" tags? Or is the status
927 # be one tagtype for all such "virtual" tags? Or is the status
926 # quo fine?
928 # quo fine?
927
929
928
930
929 # map tag name to (node, hist)
931 # map tag name to (node, hist)
930 alltags = tagsmod.findglobaltags(self.ui, self)
932 alltags = tagsmod.findglobaltags(self.ui, self)
931 # map tag name to tag type
933 # map tag name to tag type
932 tagtypes = dict((tag, 'global') for tag in alltags)
934 tagtypes = dict((tag, 'global') for tag in alltags)
933
935
934 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
935
937
936 # Build the return dicts. Have to re-encode tag names because
938 # Build the return dicts. Have to re-encode tag names because
937 # the tags module always uses UTF-8 (in order not to lose info
939 # the tags module always uses UTF-8 (in order not to lose info
938 # writing to the cache), but the rest of Mercurial wants them in
940 # writing to the cache), but the rest of Mercurial wants them in
939 # local encoding.
941 # local encoding.
940 tags = {}
942 tags = {}
941 for (name, (node, hist)) in alltags.iteritems():
943 for (name, (node, hist)) in alltags.iteritems():
942 if node != nullid:
944 if node != nullid:
943 tags[encoding.tolocal(name)] = node
945 tags[encoding.tolocal(name)] = node
944 tags['tip'] = self.changelog.tip()
946 tags['tip'] = self.changelog.tip()
945 tagtypes = dict([(encoding.tolocal(name), value)
947 tagtypes = dict([(encoding.tolocal(name), value)
946 for (name, value) in tagtypes.iteritems()])
948 for (name, value) in tagtypes.iteritems()])
947 return (tags, tagtypes)
949 return (tags, tagtypes)
948
950
949 def tagtype(self, tagname):
951 def tagtype(self, tagname):
950 '''
952 '''
951 return the type of the given tag. result can be:
953 return the type of the given tag. result can be:
952
954
953 'local' : a local tag
955 'local' : a local tag
954 'global' : a global tag
956 'global' : a global tag
955 None : tag does not exist
957 None : tag does not exist
956 '''
958 '''
957
959
958 return self._tagscache.tagtypes.get(tagname)
960 return self._tagscache.tagtypes.get(tagname)
959
961
960 def tagslist(self):
962 def tagslist(self):
961 '''return a list of tags ordered by revision'''
963 '''return a list of tags ordered by revision'''
962 if not self._tagscache.tagslist:
964 if not self._tagscache.tagslist:
963 l = []
965 l = []
964 for t, n in self.tags().iteritems():
966 for t, n in self.tags().iteritems():
965 l.append((self.changelog.rev(n), t, n))
967 l.append((self.changelog.rev(n), t, n))
966 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
967
969
968 return self._tagscache.tagslist
970 return self._tagscache.tagslist
969
971
970 def nodetags(self, node):
972 def nodetags(self, node):
971 '''return the tags associated with a node'''
973 '''return the tags associated with a node'''
972 if not self._tagscache.nodetagscache:
974 if not self._tagscache.nodetagscache:
973 nodetagscache = {}
975 nodetagscache = {}
974 for t, n in self._tagscache.tags.iteritems():
976 for t, n in self._tagscache.tags.iteritems():
975 nodetagscache.setdefault(n, []).append(t)
977 nodetagscache.setdefault(n, []).append(t)
976 for tags in nodetagscache.itervalues():
978 for tags in nodetagscache.itervalues():
977 tags.sort()
979 tags.sort()
978 self._tagscache.nodetagscache = nodetagscache
980 self._tagscache.nodetagscache = nodetagscache
979 return self._tagscache.nodetagscache.get(node, [])
981 return self._tagscache.nodetagscache.get(node, [])
980
982
981 def nodebookmarks(self, node):
983 def nodebookmarks(self, node):
982 """return the list of bookmarks pointing to the specified node"""
984 """return the list of bookmarks pointing to the specified node"""
983 marks = []
985 marks = []
984 for bookmark, n in self._bookmarks.iteritems():
986 for bookmark, n in self._bookmarks.iteritems():
985 if n == node:
987 if n == node:
986 marks.append(bookmark)
988 marks.append(bookmark)
987 return sorted(marks)
989 return sorted(marks)
988
990
989 def branchmap(self):
991 def branchmap(self):
990 '''returns a dictionary {branch: [branchheads]} with branchheads
992 '''returns a dictionary {branch: [branchheads]} with branchheads
991 ordered by increasing revision number'''
993 ordered by increasing revision number'''
992 branchmap.updatecache(self)
994 branchmap.updatecache(self)
993 return self._branchcaches[self.filtername]
995 return self._branchcaches[self.filtername]
994
996
995 @unfilteredmethod
997 @unfilteredmethod
996 def revbranchcache(self):
998 def revbranchcache(self):
997 if not self._revbranchcache:
999 if not self._revbranchcache:
998 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
999 return self._revbranchcache
1001 return self._revbranchcache
1000
1002
1001 def branchtip(self, branch, ignoremissing=False):
1003 def branchtip(self, branch, ignoremissing=False):
1002 '''return the tip node for a given branch
1004 '''return the tip node for a given branch
1003
1005
1004 If ignoremissing is True, then this method will not raise an error.
1006 If ignoremissing is True, then this method will not raise an error.
1005 This is helpful for callers that only expect None for a missing branch
1007 This is helpful for callers that only expect None for a missing branch
1006 (e.g. namespace).
1008 (e.g. namespace).
1007
1009
1008 '''
1010 '''
1009 try:
1011 try:
1010 return self.branchmap().branchtip(branch)
1012 return self.branchmap().branchtip(branch)
1011 except KeyError:
1013 except KeyError:
1012 if not ignoremissing:
1014 if not ignoremissing:
1013 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1014 else:
1016 else:
1015 pass
1017 pass
1016
1018
1017 def lookup(self, key):
1019 def lookup(self, key):
1018 return self[key].node()
1020 return self[key].node()
1019
1021
1020 def lookupbranch(self, key, remote=None):
1022 def lookupbranch(self, key, remote=None):
1021 repo = remote or self
1023 repo = remote or self
1022 if key in repo.branchmap():
1024 if key in repo.branchmap():
1023 return key
1025 return key
1024
1026
1025 repo = (remote and remote.local()) and remote or self
1027 repo = (remote and remote.local()) and remote or self
1026 return repo[key].branch()
1028 return repo[key].branch()
1027
1029
1028 def known(self, nodes):
1030 def known(self, nodes):
1029 cl = self.changelog
1031 cl = self.changelog
1030 nm = cl.nodemap
1032 nm = cl.nodemap
1031 filtered = cl.filteredrevs
1033 filtered = cl.filteredrevs
1032 result = []
1034 result = []
1033 for n in nodes:
1035 for n in nodes:
1034 r = nm.get(n)
1036 r = nm.get(n)
1035 resp = not (r is None or r in filtered)
1037 resp = not (r is None or r in filtered)
1036 result.append(resp)
1038 result.append(resp)
1037 return result
1039 return result
1038
1040
1039 def local(self):
1041 def local(self):
1040 return self
1042 return self
1041
1043
1042 def publishing(self):
1044 def publishing(self):
1043 # it's safe (and desirable) to trust the publish flag unconditionally
1045 # it's safe (and desirable) to trust the publish flag unconditionally
1044 # so that we don't finalize changes shared between users via ssh or nfs
1046 # so that we don't finalize changes shared between users via ssh or nfs
1045 return self.ui.configbool('phases', 'publish', untrusted=True)
1047 return self.ui.configbool('phases', 'publish', untrusted=True)
1046
1048
1047 def cancopy(self):
1049 def cancopy(self):
1048 # so statichttprepo's override of local() works
1050 # so statichttprepo's override of local() works
1049 if not self.local():
1051 if not self.local():
1050 return False
1052 return False
1051 if not self.publishing():
1053 if not self.publishing():
1052 return True
1054 return True
1053 # if publishing we can't copy if there is filtered content
1055 # if publishing we can't copy if there is filtered content
1054 return not self.filtered('visible').changelog.filteredrevs
1056 return not self.filtered('visible').changelog.filteredrevs
1055
1057
1056 def shared(self):
1058 def shared(self):
1057 '''the type of shared repository (None if not shared)'''
1059 '''the type of shared repository (None if not shared)'''
1058 if self.sharedpath != self.path:
1060 if self.sharedpath != self.path:
1059 return 'store'
1061 return 'store'
1060 return None
1062 return None
1061
1063
1062 def wjoin(self, f, *insidef):
1064 def wjoin(self, f, *insidef):
1063 return self.vfs.reljoin(self.root, f, *insidef)
1065 return self.vfs.reljoin(self.root, f, *insidef)
1064
1066
1065 def file(self, f):
1067 def file(self, f):
1066 if f[0] == '/':
1068 if f[0] == '/':
1067 f = f[1:]
1069 f = f[1:]
1068 return filelog.filelog(self.svfs, f)
1070 return filelog.filelog(self.svfs, f)
1069
1071
1070 def changectx(self, changeid):
1072 def changectx(self, changeid):
1071 return self[changeid]
1073 return self[changeid]
1072
1074
1073 def setparents(self, p1, p2=nullid):
1075 def setparents(self, p1, p2=nullid):
1074 with self.dirstate.parentchange():
1076 with self.dirstate.parentchange():
1075 copies = self.dirstate.setparents(p1, p2)
1077 copies = self.dirstate.setparents(p1, p2)
1076 pctx = self[p1]
1078 pctx = self[p1]
1077 if copies:
1079 if copies:
1078 # Adjust copy records, the dirstate cannot do it, it
1080 # Adjust copy records, the dirstate cannot do it, it
1079 # requires access to parents manifests. Preserve them
1081 # requires access to parents manifests. Preserve them
1080 # only for entries added to first parent.
1082 # only for entries added to first parent.
1081 for f in copies:
1083 for f in copies:
1082 if f not in pctx and copies[f] in pctx:
1084 if f not in pctx and copies[f] in pctx:
1083 self.dirstate.copy(copies[f], f)
1085 self.dirstate.copy(copies[f], f)
1084 if p2 == nullid:
1086 if p2 == nullid:
1085 for f, s in sorted(self.dirstate.copies().items()):
1087 for f, s in sorted(self.dirstate.copies().items()):
1086 if f not in pctx and s not in pctx:
1088 if f not in pctx and s not in pctx:
1087 self.dirstate.copy(None, f)
1089 self.dirstate.copy(None, f)
1088
1090
1089 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1091 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1090 """changeid can be a changeset revision, node, or tag.
1092 """changeid can be a changeset revision, node, or tag.
1091 fileid can be a file revision or node."""
1093 fileid can be a file revision or node."""
1092 return context.filectx(self, path, changeid, fileid,
1094 return context.filectx(self, path, changeid, fileid,
1093 changectx=changectx)
1095 changectx=changectx)
1094
1096
1095 def getcwd(self):
1097 def getcwd(self):
1096 return self.dirstate.getcwd()
1098 return self.dirstate.getcwd()
1097
1099
1098 def pathto(self, f, cwd=None):
1100 def pathto(self, f, cwd=None):
1099 return self.dirstate.pathto(f, cwd)
1101 return self.dirstate.pathto(f, cwd)
1100
1102
1101 def _loadfilter(self, filter):
1103 def _loadfilter(self, filter):
1102 if filter not in self._filterpats:
1104 if filter not in self._filterpats:
1103 l = []
1105 l = []
1104 for pat, cmd in self.ui.configitems(filter):
1106 for pat, cmd in self.ui.configitems(filter):
1105 if cmd == '!':
1107 if cmd == '!':
1106 continue
1108 continue
1107 mf = matchmod.match(self.root, '', [pat])
1109 mf = matchmod.match(self.root, '', [pat])
1108 fn = None
1110 fn = None
1109 params = cmd
1111 params = cmd
1110 for name, filterfn in self._datafilters.iteritems():
1112 for name, filterfn in self._datafilters.iteritems():
1111 if cmd.startswith(name):
1113 if cmd.startswith(name):
1112 fn = filterfn
1114 fn = filterfn
1113 params = cmd[len(name):].lstrip()
1115 params = cmd[len(name):].lstrip()
1114 break
1116 break
1115 if not fn:
1117 if not fn:
1116 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1118 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1117 # Wrap old filters not supporting keyword arguments
1119 # Wrap old filters not supporting keyword arguments
1118 if not pycompat.getargspec(fn)[2]:
1120 if not pycompat.getargspec(fn)[2]:
1119 oldfn = fn
1121 oldfn = fn
1120 fn = lambda s, c, **kwargs: oldfn(s, c)
1122 fn = lambda s, c, **kwargs: oldfn(s, c)
1121 l.append((mf, fn, params))
1123 l.append((mf, fn, params))
1122 self._filterpats[filter] = l
1124 self._filterpats[filter] = l
1123 return self._filterpats[filter]
1125 return self._filterpats[filter]
1124
1126
1125 def _filter(self, filterpats, filename, data):
1127 def _filter(self, filterpats, filename, data):
1126 for mf, fn, cmd in filterpats:
1128 for mf, fn, cmd in filterpats:
1127 if mf(filename):
1129 if mf(filename):
1128 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1130 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1129 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1131 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1130 break
1132 break
1131
1133
1132 return data
1134 return data
1133
1135
1134 @unfilteredpropertycache
1136 @unfilteredpropertycache
1135 def _encodefilterpats(self):
1137 def _encodefilterpats(self):
1136 return self._loadfilter('encode')
1138 return self._loadfilter('encode')
1137
1139
1138 @unfilteredpropertycache
1140 @unfilteredpropertycache
1139 def _decodefilterpats(self):
1141 def _decodefilterpats(self):
1140 return self._loadfilter('decode')
1142 return self._loadfilter('decode')
1141
1143
1142 def adddatafilter(self, name, filter):
1144 def adddatafilter(self, name, filter):
1143 self._datafilters[name] = filter
1145 self._datafilters[name] = filter
1144
1146
1145 def wread(self, filename):
1147 def wread(self, filename):
1146 if self.wvfs.islink(filename):
1148 if self.wvfs.islink(filename):
1147 data = self.wvfs.readlink(filename)
1149 data = self.wvfs.readlink(filename)
1148 else:
1150 else:
1149 data = self.wvfs.read(filename)
1151 data = self.wvfs.read(filename)
1150 return self._filter(self._encodefilterpats, filename, data)
1152 return self._filter(self._encodefilterpats, filename, data)
1151
1153
1152 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1154 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1153 """write ``data`` into ``filename`` in the working directory
1155 """write ``data`` into ``filename`` in the working directory
1154
1156
1155 This returns length of written (maybe decoded) data.
1157 This returns length of written (maybe decoded) data.
1156 """
1158 """
1157 data = self._filter(self._decodefilterpats, filename, data)
1159 data = self._filter(self._decodefilterpats, filename, data)
1158 if 'l' in flags:
1160 if 'l' in flags:
1159 self.wvfs.symlink(data, filename)
1161 self.wvfs.symlink(data, filename)
1160 else:
1162 else:
1161 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1163 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1162 **kwargs)
1164 **kwargs)
1163 if 'x' in flags:
1165 if 'x' in flags:
1164 self.wvfs.setflags(filename, False, True)
1166 self.wvfs.setflags(filename, False, True)
1165 else:
1167 else:
1166 self.wvfs.setflags(filename, False, False)
1168 self.wvfs.setflags(filename, False, False)
1167 return len(data)
1169 return len(data)
1168
1170
1169 def wwritedata(self, filename, data):
1171 def wwritedata(self, filename, data):
1170 return self._filter(self._decodefilterpats, filename, data)
1172 return self._filter(self._decodefilterpats, filename, data)
1171
1173
1172 def currenttransaction(self):
1174 def currenttransaction(self):
1173 """return the current transaction or None if non exists"""
1175 """return the current transaction or None if non exists"""
1174 if self._transref:
1176 if self._transref:
1175 tr = self._transref()
1177 tr = self._transref()
1176 else:
1178 else:
1177 tr = None
1179 tr = None
1178
1180
1179 if tr and tr.running():
1181 if tr and tr.running():
1180 return tr
1182 return tr
1181 return None
1183 return None
1182
1184
1183 def transaction(self, desc, report=None):
1185 def transaction(self, desc, report=None):
1184 if (self.ui.configbool('devel', 'all-warnings')
1186 if (self.ui.configbool('devel', 'all-warnings')
1185 or self.ui.configbool('devel', 'check-locks')):
1187 or self.ui.configbool('devel', 'check-locks')):
1186 if self._currentlock(self._lockref) is None:
1188 if self._currentlock(self._lockref) is None:
1187 raise error.ProgrammingError('transaction requires locking')
1189 raise error.ProgrammingError('transaction requires locking')
1188 tr = self.currenttransaction()
1190 tr = self.currenttransaction()
1189 if tr is not None:
1191 if tr is not None:
1190 return tr.nest(name=desc)
1192 return tr.nest(name=desc)
1191
1193
1192 # abort here if the journal already exists
1194 # abort here if the journal already exists
1193 if self.svfs.exists("journal"):
1195 if self.svfs.exists("journal"):
1194 raise error.RepoError(
1196 raise error.RepoError(
1195 _("abandoned transaction found"),
1197 _("abandoned transaction found"),
1196 hint=_("run 'hg recover' to clean up transaction"))
1198 hint=_("run 'hg recover' to clean up transaction"))
1197
1199
1198 idbase = "%.40f#%f" % (random.random(), time.time())
1200 idbase = "%.40f#%f" % (random.random(), time.time())
1199 ha = hex(hashlib.sha1(idbase).digest())
1201 ha = hex(hashlib.sha1(idbase).digest())
1200 txnid = 'TXN:' + ha
1202 txnid = 'TXN:' + ha
1201 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1203 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1202
1204
1203 self._writejournal(desc)
1205 self._writejournal(desc)
1204 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1206 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1205 if report:
1207 if report:
1206 rp = report
1208 rp = report
1207 else:
1209 else:
1208 rp = self.ui.warn
1210 rp = self.ui.warn
1209 vfsmap = {'plain': self.vfs} # root of .hg/
1211 vfsmap = {'plain': self.vfs} # root of .hg/
1210 # we must avoid cyclic reference between repo and transaction.
1212 # we must avoid cyclic reference between repo and transaction.
1211 reporef = weakref.ref(self)
1213 reporef = weakref.ref(self)
1212 # Code to track tag movement
1214 # Code to track tag movement
1213 #
1215 #
1214 # Since tags are all handled as file content, it is actually quite hard
1216 # Since tags are all handled as file content, it is actually quite hard
1215 # to track these movement from a code perspective. So we fallback to a
1217 # to track these movement from a code perspective. So we fallback to a
1216 # tracking at the repository level. One could envision to track changes
1218 # tracking at the repository level. One could envision to track changes
1217 # to the '.hgtags' file through changegroup apply but that fails to
1219 # to the '.hgtags' file through changegroup apply but that fails to
1218 # cope with case where transaction expose new heads without changegroup
1220 # cope with case where transaction expose new heads without changegroup
1219 # being involved (eg: phase movement).
1221 # being involved (eg: phase movement).
1220 #
1222 #
1221 # For now, We gate the feature behind a flag since this likely comes
1223 # For now, We gate the feature behind a flag since this likely comes
1222 # with performance impacts. The current code run more often than needed
1224 # with performance impacts. The current code run more often than needed
1223 # and do not use caches as much as it could. The current focus is on
1225 # and do not use caches as much as it could. The current focus is on
1224 # the behavior of the feature so we disable it by default. The flag
1226 # the behavior of the feature so we disable it by default. The flag
1225 # will be removed when we are happy with the performance impact.
1227 # will be removed when we are happy with the performance impact.
1226 #
1228 #
1227 # Once this feature is no longer experimental move the following
1229 # Once this feature is no longer experimental move the following
1228 # documentation to the appropriate help section:
1230 # documentation to the appropriate help section:
1229 #
1231 #
1230 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1232 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1231 # tags (new or changed or deleted tags). In addition the details of
1233 # tags (new or changed or deleted tags). In addition the details of
1232 # these changes are made available in a file at:
1234 # these changes are made available in a file at:
1233 # ``REPOROOT/.hg/changes/tags.changes``.
1235 # ``REPOROOT/.hg/changes/tags.changes``.
1234 # Make sure you check for HG_TAG_MOVED before reading that file as it
1236 # Make sure you check for HG_TAG_MOVED before reading that file as it
1235 # might exist from a previous transaction even if no tag were touched
1237 # might exist from a previous transaction even if no tag were touched
1236 # in this one. Changes are recorded in a line base format::
1238 # in this one. Changes are recorded in a line base format::
1237 #
1239 #
1238 # <action> <hex-node> <tag-name>\n
1240 # <action> <hex-node> <tag-name>\n
1239 #
1241 #
1240 # Actions are defined as follow:
1242 # Actions are defined as follow:
1241 # "-R": tag is removed,
1243 # "-R": tag is removed,
1242 # "+A": tag is added,
1244 # "+A": tag is added,
1243 # "-M": tag is moved (old value),
1245 # "-M": tag is moved (old value),
1244 # "+M": tag is moved (new value),
1246 # "+M": tag is moved (new value),
1245 tracktags = lambda x: None
1247 tracktags = lambda x: None
1246 # experimental config: experimental.hook-track-tags
1248 # experimental config: experimental.hook-track-tags
1247 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1249 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1248 if desc != 'strip' and shouldtracktags:
1250 if desc != 'strip' and shouldtracktags:
1249 oldheads = self.changelog.headrevs()
1251 oldheads = self.changelog.headrevs()
1250 def tracktags(tr2):
1252 def tracktags(tr2):
1251 repo = reporef()
1253 repo = reporef()
1252 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1254 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1253 newheads = repo.changelog.headrevs()
1255 newheads = repo.changelog.headrevs()
1254 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1256 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1255 # notes: we compare lists here.
1257 # notes: we compare lists here.
1256 # As we do it only once buiding set would not be cheaper
1258 # As we do it only once buiding set would not be cheaper
1257 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1259 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1258 if changes:
1260 if changes:
1259 tr2.hookargs['tag_moved'] = '1'
1261 tr2.hookargs['tag_moved'] = '1'
1260 with repo.vfs('changes/tags.changes', 'w',
1262 with repo.vfs('changes/tags.changes', 'w',
1261 atomictemp=True) as changesfile:
1263 atomictemp=True) as changesfile:
1262 # note: we do not register the file to the transaction
1264 # note: we do not register the file to the transaction
1263 # because we needs it to still exist on the transaction
1265 # because we needs it to still exist on the transaction
1264 # is close (for txnclose hooks)
1266 # is close (for txnclose hooks)
1265 tagsmod.writediff(changesfile, changes)
1267 tagsmod.writediff(changesfile, changes)
1266 def validate(tr2):
1268 def validate(tr2):
1267 """will run pre-closing hooks"""
1269 """will run pre-closing hooks"""
1268 # XXX the transaction API is a bit lacking here so we take a hacky
1270 # XXX the transaction API is a bit lacking here so we take a hacky
1269 # path for now
1271 # path for now
1270 #
1272 #
1271 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1273 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1272 # dict is copied before these run. In addition we needs the data
1274 # dict is copied before these run. In addition we needs the data
1273 # available to in memory hooks too.
1275 # available to in memory hooks too.
1274 #
1276 #
1275 # Moreover, we also need to make sure this runs before txnclose
1277 # Moreover, we also need to make sure this runs before txnclose
1276 # hooks and there is no "pending" mechanism that would execute
1278 # hooks and there is no "pending" mechanism that would execute
1277 # logic only if hooks are about to run.
1279 # logic only if hooks are about to run.
1278 #
1280 #
1279 # Fixing this limitation of the transaction is also needed to track
1281 # Fixing this limitation of the transaction is also needed to track
1280 # other families of changes (bookmarks, phases, obsolescence).
1282 # other families of changes (bookmarks, phases, obsolescence).
1281 #
1283 #
1282 # This will have to be fixed before we remove the experimental
1284 # This will have to be fixed before we remove the experimental
1283 # gating.
1285 # gating.
1284 tracktags(tr2)
1286 tracktags(tr2)
1285 repo = reporef()
1287 repo = reporef()
1286 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1288 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1287 scmutil.enforcesinglehead(repo, tr2, desc)
1289 scmutil.enforcesinglehead(repo, tr2, desc)
1288 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1290 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1289 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1291 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1290 args = tr.hookargs.copy()
1292 args = tr.hookargs.copy()
1291 args.update(bookmarks.preparehookargs(name, old, new))
1293 args.update(bookmarks.preparehookargs(name, old, new))
1292 repo.hook('pretxnclose-bookmark', throw=True,
1294 repo.hook('pretxnclose-bookmark', throw=True,
1293 txnname=desc,
1295 txnname=desc,
1294 **pycompat.strkwargs(args))
1296 **pycompat.strkwargs(args))
1295 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1297 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1296 cl = repo.unfiltered().changelog
1298 cl = repo.unfiltered().changelog
1297 for rev, (old, new) in tr.changes['phases'].items():
1299 for rev, (old, new) in tr.changes['phases'].items():
1298 args = tr.hookargs.copy()
1300 args = tr.hookargs.copy()
1299 node = hex(cl.node(rev))
1301 node = hex(cl.node(rev))
1300 args.update(phases.preparehookargs(node, old, new))
1302 args.update(phases.preparehookargs(node, old, new))
1301 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1303 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1302 **pycompat.strkwargs(args))
1304 **pycompat.strkwargs(args))
1303
1305
1304 repo.hook('pretxnclose', throw=True,
1306 repo.hook('pretxnclose', throw=True,
1305 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1307 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1306 def releasefn(tr, success):
1308 def releasefn(tr, success):
1307 repo = reporef()
1309 repo = reporef()
1308 if success:
1310 if success:
1309 # this should be explicitly invoked here, because
1311 # this should be explicitly invoked here, because
1310 # in-memory changes aren't written out at closing
1312 # in-memory changes aren't written out at closing
1311 # transaction, if tr.addfilegenerator (via
1313 # transaction, if tr.addfilegenerator (via
1312 # dirstate.write or so) isn't invoked while
1314 # dirstate.write or so) isn't invoked while
1313 # transaction running
1315 # transaction running
1314 repo.dirstate.write(None)
1316 repo.dirstate.write(None)
1315 else:
1317 else:
1316 # discard all changes (including ones already written
1318 # discard all changes (including ones already written
1317 # out) in this transaction
1319 # out) in this transaction
1318 repo.dirstate.restorebackup(None, 'journal.dirstate')
1320 repo.dirstate.restorebackup(None, 'journal.dirstate')
1319
1321
1320 repo.invalidate(clearfilecache=True)
1322 repo.invalidate(clearfilecache=True)
1321
1323
1322 tr = transaction.transaction(rp, self.svfs, vfsmap,
1324 tr = transaction.transaction(rp, self.svfs, vfsmap,
1323 "journal",
1325 "journal",
1324 "undo",
1326 "undo",
1325 aftertrans(renames),
1327 aftertrans(renames),
1326 self.store.createmode,
1328 self.store.createmode,
1327 validator=validate,
1329 validator=validate,
1328 releasefn=releasefn,
1330 releasefn=releasefn,
1329 checkambigfiles=_cachedfiles,
1331 checkambigfiles=_cachedfiles,
1330 name=desc)
1332 name=desc)
1331 tr.changes['revs'] = xrange(0, 0)
1333 tr.changes['revs'] = xrange(0, 0)
1332 tr.changes['obsmarkers'] = set()
1334 tr.changes['obsmarkers'] = set()
1333 tr.changes['phases'] = {}
1335 tr.changes['phases'] = {}
1334 tr.changes['bookmarks'] = {}
1336 tr.changes['bookmarks'] = {}
1335
1337
1336 tr.hookargs['txnid'] = txnid
1338 tr.hookargs['txnid'] = txnid
1337 # note: writing the fncache only during finalize mean that the file is
1339 # note: writing the fncache only during finalize mean that the file is
1338 # outdated when running hooks. As fncache is used for streaming clone,
1340 # outdated when running hooks. As fncache is used for streaming clone,
1339 # this is not expected to break anything that happen during the hooks.
1341 # this is not expected to break anything that happen during the hooks.
1340 tr.addfinalize('flush-fncache', self.store.write)
1342 tr.addfinalize('flush-fncache', self.store.write)
1341 def txnclosehook(tr2):
1343 def txnclosehook(tr2):
1342 """To be run if transaction is successful, will schedule a hook run
1344 """To be run if transaction is successful, will schedule a hook run
1343 """
1345 """
1344 # Don't reference tr2 in hook() so we don't hold a reference.
1346 # Don't reference tr2 in hook() so we don't hold a reference.
1345 # This reduces memory consumption when there are multiple
1347 # This reduces memory consumption when there are multiple
1346 # transactions per lock. This can likely go away if issue5045
1348 # transactions per lock. This can likely go away if issue5045
1347 # fixes the function accumulation.
1349 # fixes the function accumulation.
1348 hookargs = tr2.hookargs
1350 hookargs = tr2.hookargs
1349
1351
1350 def hookfunc():
1352 def hookfunc():
1351 repo = reporef()
1353 repo = reporef()
1352 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1354 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1353 bmchanges = sorted(tr.changes['bookmarks'].items())
1355 bmchanges = sorted(tr.changes['bookmarks'].items())
1354 for name, (old, new) in bmchanges:
1356 for name, (old, new) in bmchanges:
1355 args = tr.hookargs.copy()
1357 args = tr.hookargs.copy()
1356 args.update(bookmarks.preparehookargs(name, old, new))
1358 args.update(bookmarks.preparehookargs(name, old, new))
1357 repo.hook('txnclose-bookmark', throw=False,
1359 repo.hook('txnclose-bookmark', throw=False,
1358 txnname=desc, **pycompat.strkwargs(args))
1360 txnname=desc, **pycompat.strkwargs(args))
1359
1361
1360 if hook.hashook(repo.ui, 'txnclose-phase'):
1362 if hook.hashook(repo.ui, 'txnclose-phase'):
1361 cl = repo.unfiltered().changelog
1363 cl = repo.unfiltered().changelog
1362 phasemv = sorted(tr.changes['phases'].items())
1364 phasemv = sorted(tr.changes['phases'].items())
1363 for rev, (old, new) in phasemv:
1365 for rev, (old, new) in phasemv:
1364 args = tr.hookargs.copy()
1366 args = tr.hookargs.copy()
1365 node = hex(cl.node(rev))
1367 node = hex(cl.node(rev))
1366 args.update(phases.preparehookargs(node, old, new))
1368 args.update(phases.preparehookargs(node, old, new))
1367 repo.hook('txnclose-phase', throw=False, txnname=desc,
1369 repo.hook('txnclose-phase', throw=False, txnname=desc,
1368 **pycompat.strkwargs(args))
1370 **pycompat.strkwargs(args))
1369
1371
1370 repo.hook('txnclose', throw=False, txnname=desc,
1372 repo.hook('txnclose', throw=False, txnname=desc,
1371 **pycompat.strkwargs(hookargs))
1373 **pycompat.strkwargs(hookargs))
1372 reporef()._afterlock(hookfunc)
1374 reporef()._afterlock(hookfunc)
1373 tr.addfinalize('txnclose-hook', txnclosehook)
1375 tr.addfinalize('txnclose-hook', txnclosehook)
1374 # Include a leading "-" to make it happen before the transaction summary
1376 # Include a leading "-" to make it happen before the transaction summary
1375 # reports registered via scmutil.registersummarycallback() whose names
1377 # reports registered via scmutil.registersummarycallback() whose names
1376 # are 00-txnreport etc. That way, the caches will be warm when the
1378 # are 00-txnreport etc. That way, the caches will be warm when the
1377 # callbacks run.
1379 # callbacks run.
1378 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1380 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1379 def txnaborthook(tr2):
1381 def txnaborthook(tr2):
1380 """To be run if transaction is aborted
1382 """To be run if transaction is aborted
1381 """
1383 """
1382 reporef().hook('txnabort', throw=False, txnname=desc,
1384 reporef().hook('txnabort', throw=False, txnname=desc,
1383 **pycompat.strkwargs(tr2.hookargs))
1385 **pycompat.strkwargs(tr2.hookargs))
1384 tr.addabort('txnabort-hook', txnaborthook)
1386 tr.addabort('txnabort-hook', txnaborthook)
1385 # avoid eager cache invalidation. in-memory data should be identical
1387 # avoid eager cache invalidation. in-memory data should be identical
1386 # to stored data if transaction has no error.
1388 # to stored data if transaction has no error.
1387 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1389 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1388 self._transref = weakref.ref(tr)
1390 self._transref = weakref.ref(tr)
1389 scmutil.registersummarycallback(self, tr, desc)
1391 scmutil.registersummarycallback(self, tr, desc)
1390 return tr
1392 return tr
1391
1393
1392 def _journalfiles(self):
1394 def _journalfiles(self):
1393 return ((self.svfs, 'journal'),
1395 return ((self.svfs, 'journal'),
1394 (self.vfs, 'journal.dirstate'),
1396 (self.vfs, 'journal.dirstate'),
1395 (self.vfs, 'journal.branch'),
1397 (self.vfs, 'journal.branch'),
1396 (self.vfs, 'journal.desc'),
1398 (self.vfs, 'journal.desc'),
1397 (self.vfs, 'journal.bookmarks'),
1399 (self.vfs, 'journal.bookmarks'),
1398 (self.svfs, 'journal.phaseroots'))
1400 (self.svfs, 'journal.phaseroots'))
1399
1401
1400 def undofiles(self):
1402 def undofiles(self):
1401 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1403 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1402
1404
1403 @unfilteredmethod
1405 @unfilteredmethod
1404 def _writejournal(self, desc):
1406 def _writejournal(self, desc):
1405 self.dirstate.savebackup(None, 'journal.dirstate')
1407 self.dirstate.savebackup(None, 'journal.dirstate')
1406 self.vfs.write("journal.branch",
1408 self.vfs.write("journal.branch",
1407 encoding.fromlocal(self.dirstate.branch()))
1409 encoding.fromlocal(self.dirstate.branch()))
1408 self.vfs.write("journal.desc",
1410 self.vfs.write("journal.desc",
1409 "%d\n%s\n" % (len(self), desc))
1411 "%d\n%s\n" % (len(self), desc))
1410 self.vfs.write("journal.bookmarks",
1412 self.vfs.write("journal.bookmarks",
1411 self.vfs.tryread("bookmarks"))
1413 self.vfs.tryread("bookmarks"))
1412 self.svfs.write("journal.phaseroots",
1414 self.svfs.write("journal.phaseroots",
1413 self.svfs.tryread("phaseroots"))
1415 self.svfs.tryread("phaseroots"))
1414
1416
1415 def recover(self):
1417 def recover(self):
1416 with self.lock():
1418 with self.lock():
1417 if self.svfs.exists("journal"):
1419 if self.svfs.exists("journal"):
1418 self.ui.status(_("rolling back interrupted transaction\n"))
1420 self.ui.status(_("rolling back interrupted transaction\n"))
1419 vfsmap = {'': self.svfs,
1421 vfsmap = {'': self.svfs,
1420 'plain': self.vfs,}
1422 'plain': self.vfs,}
1421 transaction.rollback(self.svfs, vfsmap, "journal",
1423 transaction.rollback(self.svfs, vfsmap, "journal",
1422 self.ui.warn,
1424 self.ui.warn,
1423 checkambigfiles=_cachedfiles)
1425 checkambigfiles=_cachedfiles)
1424 self.invalidate()
1426 self.invalidate()
1425 return True
1427 return True
1426 else:
1428 else:
1427 self.ui.warn(_("no interrupted transaction available\n"))
1429 self.ui.warn(_("no interrupted transaction available\n"))
1428 return False
1430 return False
1429
1431
1430 def rollback(self, dryrun=False, force=False):
1432 def rollback(self, dryrun=False, force=False):
1431 wlock = lock = dsguard = None
1433 wlock = lock = dsguard = None
1432 try:
1434 try:
1433 wlock = self.wlock()
1435 wlock = self.wlock()
1434 lock = self.lock()
1436 lock = self.lock()
1435 if self.svfs.exists("undo"):
1437 if self.svfs.exists("undo"):
1436 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1438 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1437
1439
1438 return self._rollback(dryrun, force, dsguard)
1440 return self._rollback(dryrun, force, dsguard)
1439 else:
1441 else:
1440 self.ui.warn(_("no rollback information available\n"))
1442 self.ui.warn(_("no rollback information available\n"))
1441 return 1
1443 return 1
1442 finally:
1444 finally:
1443 release(dsguard, lock, wlock)
1445 release(dsguard, lock, wlock)
1444
1446
1445 @unfilteredmethod # Until we get smarter cache management
1447 @unfilteredmethod # Until we get smarter cache management
1446 def _rollback(self, dryrun, force, dsguard):
1448 def _rollback(self, dryrun, force, dsguard):
1447 ui = self.ui
1449 ui = self.ui
1448 try:
1450 try:
1449 args = self.vfs.read('undo.desc').splitlines()
1451 args = self.vfs.read('undo.desc').splitlines()
1450 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1452 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1451 if len(args) >= 3:
1453 if len(args) >= 3:
1452 detail = args[2]
1454 detail = args[2]
1453 oldtip = oldlen - 1
1455 oldtip = oldlen - 1
1454
1456
1455 if detail and ui.verbose:
1457 if detail and ui.verbose:
1456 msg = (_('repository tip rolled back to revision %d'
1458 msg = (_('repository tip rolled back to revision %d'
1457 ' (undo %s: %s)\n')
1459 ' (undo %s: %s)\n')
1458 % (oldtip, desc, detail))
1460 % (oldtip, desc, detail))
1459 else:
1461 else:
1460 msg = (_('repository tip rolled back to revision %d'
1462 msg = (_('repository tip rolled back to revision %d'
1461 ' (undo %s)\n')
1463 ' (undo %s)\n')
1462 % (oldtip, desc))
1464 % (oldtip, desc))
1463 except IOError:
1465 except IOError:
1464 msg = _('rolling back unknown transaction\n')
1466 msg = _('rolling back unknown transaction\n')
1465 desc = None
1467 desc = None
1466
1468
1467 if not force and self['.'] != self['tip'] and desc == 'commit':
1469 if not force and self['.'] != self['tip'] and desc == 'commit':
1468 raise error.Abort(
1470 raise error.Abort(
1469 _('rollback of last commit while not checked out '
1471 _('rollback of last commit while not checked out '
1470 'may lose data'), hint=_('use -f to force'))
1472 'may lose data'), hint=_('use -f to force'))
1471
1473
1472 ui.status(msg)
1474 ui.status(msg)
1473 if dryrun:
1475 if dryrun:
1474 return 0
1476 return 0
1475
1477
1476 parents = self.dirstate.parents()
1478 parents = self.dirstate.parents()
1477 self.destroying()
1479 self.destroying()
1478 vfsmap = {'plain': self.vfs, '': self.svfs}
1480 vfsmap = {'plain': self.vfs, '': self.svfs}
1479 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1481 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1480 checkambigfiles=_cachedfiles)
1482 checkambigfiles=_cachedfiles)
1481 if self.vfs.exists('undo.bookmarks'):
1483 if self.vfs.exists('undo.bookmarks'):
1482 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1484 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1483 if self.svfs.exists('undo.phaseroots'):
1485 if self.svfs.exists('undo.phaseroots'):
1484 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1486 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1485 self.invalidate()
1487 self.invalidate()
1486
1488
1487 parentgone = (parents[0] not in self.changelog.nodemap or
1489 parentgone = (parents[0] not in self.changelog.nodemap or
1488 parents[1] not in self.changelog.nodemap)
1490 parents[1] not in self.changelog.nodemap)
1489 if parentgone:
1491 if parentgone:
1490 # prevent dirstateguard from overwriting already restored one
1492 # prevent dirstateguard from overwriting already restored one
1491 dsguard.close()
1493 dsguard.close()
1492
1494
1493 self.dirstate.restorebackup(None, 'undo.dirstate')
1495 self.dirstate.restorebackup(None, 'undo.dirstate')
1494 try:
1496 try:
1495 branch = self.vfs.read('undo.branch')
1497 branch = self.vfs.read('undo.branch')
1496 self.dirstate.setbranch(encoding.tolocal(branch))
1498 self.dirstate.setbranch(encoding.tolocal(branch))
1497 except IOError:
1499 except IOError:
1498 ui.warn(_('named branch could not be reset: '
1500 ui.warn(_('named branch could not be reset: '
1499 'current branch is still \'%s\'\n')
1501 'current branch is still \'%s\'\n')
1500 % self.dirstate.branch())
1502 % self.dirstate.branch())
1501
1503
1502 parents = tuple([p.rev() for p in self[None].parents()])
1504 parents = tuple([p.rev() for p in self[None].parents()])
1503 if len(parents) > 1:
1505 if len(parents) > 1:
1504 ui.status(_('working directory now based on '
1506 ui.status(_('working directory now based on '
1505 'revisions %d and %d\n') % parents)
1507 'revisions %d and %d\n') % parents)
1506 else:
1508 else:
1507 ui.status(_('working directory now based on '
1509 ui.status(_('working directory now based on '
1508 'revision %d\n') % parents)
1510 'revision %d\n') % parents)
1509 mergemod.mergestate.clean(self, self['.'].node())
1511 mergemod.mergestate.clean(self, self['.'].node())
1510
1512
1511 # TODO: if we know which new heads may result from this rollback, pass
1513 # TODO: if we know which new heads may result from this rollback, pass
1512 # them to destroy(), which will prevent the branchhead cache from being
1514 # them to destroy(), which will prevent the branchhead cache from being
1513 # invalidated.
1515 # invalidated.
1514 self.destroyed()
1516 self.destroyed()
1515 return 0
1517 return 0
1516
1518
1517 def _buildcacheupdater(self, newtransaction):
1519 def _buildcacheupdater(self, newtransaction):
1518 """called during transaction to build the callback updating cache
1520 """called during transaction to build the callback updating cache
1519
1521
1520 Lives on the repository to help extension who might want to augment
1522 Lives on the repository to help extension who might want to augment
1521 this logic. For this purpose, the created transaction is passed to the
1523 this logic. For this purpose, the created transaction is passed to the
1522 method.
1524 method.
1523 """
1525 """
1524 # we must avoid cyclic reference between repo and transaction.
1526 # we must avoid cyclic reference between repo and transaction.
1525 reporef = weakref.ref(self)
1527 reporef = weakref.ref(self)
1526 def updater(tr):
1528 def updater(tr):
1527 repo = reporef()
1529 repo = reporef()
1528 repo.updatecaches(tr)
1530 repo.updatecaches(tr)
1529 return updater
1531 return updater
1530
1532
1531 @unfilteredmethod
1533 @unfilteredmethod
1532 def updatecaches(self, tr=None, full=False):
1534 def updatecaches(self, tr=None, full=False):
1533 """warm appropriate caches
1535 """warm appropriate caches
1534
1536
1535 If this function is called after a transaction closed. The transaction
1537 If this function is called after a transaction closed. The transaction
1536 will be available in the 'tr' argument. This can be used to selectively
1538 will be available in the 'tr' argument. This can be used to selectively
1537 update caches relevant to the changes in that transaction.
1539 update caches relevant to the changes in that transaction.
1538
1540
1539 If 'full' is set, make sure all caches the function knows about have
1541 If 'full' is set, make sure all caches the function knows about have
1540 up-to-date data. Even the ones usually loaded more lazily.
1542 up-to-date data. Even the ones usually loaded more lazily.
1541 """
1543 """
1542 if tr is not None and tr.hookargs.get('source') == 'strip':
1544 if tr is not None and tr.hookargs.get('source') == 'strip':
1543 # During strip, many caches are invalid but
1545 # During strip, many caches are invalid but
1544 # later call to `destroyed` will refresh them.
1546 # later call to `destroyed` will refresh them.
1545 return
1547 return
1546
1548
1547 if tr is None or tr.changes['revs']:
1549 if tr is None or tr.changes['revs']:
1548 # updating the unfiltered branchmap should refresh all the others,
1550 # updating the unfiltered branchmap should refresh all the others,
1549 self.ui.debug('updating the branch cache\n')
1551 self.ui.debug('updating the branch cache\n')
1550 branchmap.updatecache(self.filtered('served'))
1552 branchmap.updatecache(self.filtered('served'))
1551
1553
1552 if full:
1554 if full:
1553 rbc = self.revbranchcache()
1555 rbc = self.revbranchcache()
1554 for r in self.changelog:
1556 for r in self.changelog:
1555 rbc.branchinfo(r)
1557 rbc.branchinfo(r)
1556 rbc.write()
1558 rbc.write()
1557
1559
1558 def invalidatecaches(self):
1560 def invalidatecaches(self):
1559
1561
1560 if '_tagscache' in vars(self):
1562 if '_tagscache' in vars(self):
1561 # can't use delattr on proxy
1563 # can't use delattr on proxy
1562 del self.__dict__['_tagscache']
1564 del self.__dict__['_tagscache']
1563
1565
1564 self.unfiltered()._branchcaches.clear()
1566 self.unfiltered()._branchcaches.clear()
1565 self.invalidatevolatilesets()
1567 self.invalidatevolatilesets()
1566 self._sparsesignaturecache.clear()
1568 self._sparsesignaturecache.clear()
1567
1569
1568 def invalidatevolatilesets(self):
1570 def invalidatevolatilesets(self):
1569 self.filteredrevcache.clear()
1571 self.filteredrevcache.clear()
1570 obsolete.clearobscaches(self)
1572 obsolete.clearobscaches(self)
1571
1573
1572 def invalidatedirstate(self):
1574 def invalidatedirstate(self):
1573 '''Invalidates the dirstate, causing the next call to dirstate
1575 '''Invalidates the dirstate, causing the next call to dirstate
1574 to check if it was modified since the last time it was read,
1576 to check if it was modified since the last time it was read,
1575 rereading it if it has.
1577 rereading it if it has.
1576
1578
1577 This is different to dirstate.invalidate() that it doesn't always
1579 This is different to dirstate.invalidate() that it doesn't always
1578 rereads the dirstate. Use dirstate.invalidate() if you want to
1580 rereads the dirstate. Use dirstate.invalidate() if you want to
1579 explicitly read the dirstate again (i.e. restoring it to a previous
1581 explicitly read the dirstate again (i.e. restoring it to a previous
1580 known good state).'''
1582 known good state).'''
1581 if hasunfilteredcache(self, 'dirstate'):
1583 if hasunfilteredcache(self, 'dirstate'):
1582 for k in self.dirstate._filecache:
1584 for k in self.dirstate._filecache:
1583 try:
1585 try:
1584 delattr(self.dirstate, k)
1586 delattr(self.dirstate, k)
1585 except AttributeError:
1587 except AttributeError:
1586 pass
1588 pass
1587 delattr(self.unfiltered(), 'dirstate')
1589 delattr(self.unfiltered(), 'dirstate')
1588
1590
1589 def invalidate(self, clearfilecache=False):
1591 def invalidate(self, clearfilecache=False):
1590 '''Invalidates both store and non-store parts other than dirstate
1592 '''Invalidates both store and non-store parts other than dirstate
1591
1593
1592 If a transaction is running, invalidation of store is omitted,
1594 If a transaction is running, invalidation of store is omitted,
1593 because discarding in-memory changes might cause inconsistency
1595 because discarding in-memory changes might cause inconsistency
1594 (e.g. incomplete fncache causes unintentional failure, but
1596 (e.g. incomplete fncache causes unintentional failure, but
1595 redundant one doesn't).
1597 redundant one doesn't).
1596 '''
1598 '''
1597 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1599 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1598 for k in list(self._filecache.keys()):
1600 for k in list(self._filecache.keys()):
1599 # dirstate is invalidated separately in invalidatedirstate()
1601 # dirstate is invalidated separately in invalidatedirstate()
1600 if k == 'dirstate':
1602 if k == 'dirstate':
1601 continue
1603 continue
1602 if (k == 'changelog' and
1604 if (k == 'changelog' and
1603 self.currenttransaction() and
1605 self.currenttransaction() and
1604 self.changelog._delayed):
1606 self.changelog._delayed):
1605 # The changelog object may store unwritten revisions. We don't
1607 # The changelog object may store unwritten revisions. We don't
1606 # want to lose them.
1608 # want to lose them.
1607 # TODO: Solve the problem instead of working around it.
1609 # TODO: Solve the problem instead of working around it.
1608 continue
1610 continue
1609
1611
1610 if clearfilecache:
1612 if clearfilecache:
1611 del self._filecache[k]
1613 del self._filecache[k]
1612 try:
1614 try:
1613 delattr(unfiltered, k)
1615 delattr(unfiltered, k)
1614 except AttributeError:
1616 except AttributeError:
1615 pass
1617 pass
1616 self.invalidatecaches()
1618 self.invalidatecaches()
1617 if not self.currenttransaction():
1619 if not self.currenttransaction():
1618 # TODO: Changing contents of store outside transaction
1620 # TODO: Changing contents of store outside transaction
1619 # causes inconsistency. We should make in-memory store
1621 # causes inconsistency. We should make in-memory store
1620 # changes detectable, and abort if changed.
1622 # changes detectable, and abort if changed.
1621 self.store.invalidatecaches()
1623 self.store.invalidatecaches()
1622
1624
1623 def invalidateall(self):
1625 def invalidateall(self):
1624 '''Fully invalidates both store and non-store parts, causing the
1626 '''Fully invalidates both store and non-store parts, causing the
1625 subsequent operation to reread any outside changes.'''
1627 subsequent operation to reread any outside changes.'''
1626 # extension should hook this to invalidate its caches
1628 # extension should hook this to invalidate its caches
1627 self.invalidate()
1629 self.invalidate()
1628 self.invalidatedirstate()
1630 self.invalidatedirstate()
1629
1631
1630 @unfilteredmethod
1632 @unfilteredmethod
1631 def _refreshfilecachestats(self, tr):
1633 def _refreshfilecachestats(self, tr):
1632 """Reload stats of cached files so that they are flagged as valid"""
1634 """Reload stats of cached files so that they are flagged as valid"""
1633 for k, ce in self._filecache.items():
1635 for k, ce in self._filecache.items():
1634 k = pycompat.sysstr(k)
1636 k = pycompat.sysstr(k)
1635 if k == r'dirstate' or k not in self.__dict__:
1637 if k == r'dirstate' or k not in self.__dict__:
1636 continue
1638 continue
1637 ce.refresh()
1639 ce.refresh()
1638
1640
1639 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1641 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1640 inheritchecker=None, parentenvvar=None):
1642 inheritchecker=None, parentenvvar=None):
1641 parentlock = None
1643 parentlock = None
1642 # the contents of parentenvvar are used by the underlying lock to
1644 # the contents of parentenvvar are used by the underlying lock to
1643 # determine whether it can be inherited
1645 # determine whether it can be inherited
1644 if parentenvvar is not None:
1646 if parentenvvar is not None:
1645 parentlock = encoding.environ.get(parentenvvar)
1647 parentlock = encoding.environ.get(parentenvvar)
1646
1648
1647 timeout = 0
1649 timeout = 0
1648 warntimeout = 0
1650 warntimeout = 0
1649 if wait:
1651 if wait:
1650 timeout = self.ui.configint("ui", "timeout")
1652 timeout = self.ui.configint("ui", "timeout")
1651 warntimeout = self.ui.configint("ui", "timeout.warn")
1653 warntimeout = self.ui.configint("ui", "timeout.warn")
1652
1654
1653 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1655 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1654 releasefn=releasefn,
1656 releasefn=releasefn,
1655 acquirefn=acquirefn, desc=desc,
1657 acquirefn=acquirefn, desc=desc,
1656 inheritchecker=inheritchecker,
1658 inheritchecker=inheritchecker,
1657 parentlock=parentlock)
1659 parentlock=parentlock)
1658 return l
1660 return l
1659
1661
1660 def _afterlock(self, callback):
1662 def _afterlock(self, callback):
1661 """add a callback to be run when the repository is fully unlocked
1663 """add a callback to be run when the repository is fully unlocked
1662
1664
1663 The callback will be executed when the outermost lock is released
1665 The callback will be executed when the outermost lock is released
1664 (with wlock being higher level than 'lock')."""
1666 (with wlock being higher level than 'lock')."""
1665 for ref in (self._wlockref, self._lockref):
1667 for ref in (self._wlockref, self._lockref):
1666 l = ref and ref()
1668 l = ref and ref()
1667 if l and l.held:
1669 if l and l.held:
1668 l.postrelease.append(callback)
1670 l.postrelease.append(callback)
1669 break
1671 break
1670 else: # no lock have been found.
1672 else: # no lock have been found.
1671 callback()
1673 callback()
1672
1674
1673 def lock(self, wait=True):
1675 def lock(self, wait=True):
1674 '''Lock the repository store (.hg/store) and return a weak reference
1676 '''Lock the repository store (.hg/store) and return a weak reference
1675 to the lock. Use this before modifying the store (e.g. committing or
1677 to the lock. Use this before modifying the store (e.g. committing or
1676 stripping). If you are opening a transaction, get a lock as well.)
1678 stripping). If you are opening a transaction, get a lock as well.)
1677
1679
1678 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1680 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1679 'wlock' first to avoid a dead-lock hazard.'''
1681 'wlock' first to avoid a dead-lock hazard.'''
1680 l = self._currentlock(self._lockref)
1682 l = self._currentlock(self._lockref)
1681 if l is not None:
1683 if l is not None:
1682 l.lock()
1684 l.lock()
1683 return l
1685 return l
1684
1686
1685 l = self._lock(self.svfs, "lock", wait, None,
1687 l = self._lock(self.svfs, "lock", wait, None,
1686 self.invalidate, _('repository %s') % self.origroot)
1688 self.invalidate, _('repository %s') % self.origroot)
1687 self._lockref = weakref.ref(l)
1689 self._lockref = weakref.ref(l)
1688 return l
1690 return l
1689
1691
1690 def _wlockchecktransaction(self):
1692 def _wlockchecktransaction(self):
1691 if self.currenttransaction() is not None:
1693 if self.currenttransaction() is not None:
1692 raise error.LockInheritanceContractViolation(
1694 raise error.LockInheritanceContractViolation(
1693 'wlock cannot be inherited in the middle of a transaction')
1695 'wlock cannot be inherited in the middle of a transaction')
1694
1696
1695 def wlock(self, wait=True):
1697 def wlock(self, wait=True):
1696 '''Lock the non-store parts of the repository (everything under
1698 '''Lock the non-store parts of the repository (everything under
1697 .hg except .hg/store) and return a weak reference to the lock.
1699 .hg except .hg/store) and return a weak reference to the lock.
1698
1700
1699 Use this before modifying files in .hg.
1701 Use this before modifying files in .hg.
1700
1702
1701 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1703 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1702 'wlock' first to avoid a dead-lock hazard.'''
1704 'wlock' first to avoid a dead-lock hazard.'''
1703 l = self._wlockref and self._wlockref()
1705 l = self._wlockref and self._wlockref()
1704 if l is not None and l.held:
1706 if l is not None and l.held:
1705 l.lock()
1707 l.lock()
1706 return l
1708 return l
1707
1709
1708 # We do not need to check for non-waiting lock acquisition. Such
1710 # We do not need to check for non-waiting lock acquisition. Such
1709 # acquisition would not cause dead-lock as they would just fail.
1711 # acquisition would not cause dead-lock as they would just fail.
1710 if wait and (self.ui.configbool('devel', 'all-warnings')
1712 if wait and (self.ui.configbool('devel', 'all-warnings')
1711 or self.ui.configbool('devel', 'check-locks')):
1713 or self.ui.configbool('devel', 'check-locks')):
1712 if self._currentlock(self._lockref) is not None:
1714 if self._currentlock(self._lockref) is not None:
1713 self.ui.develwarn('"wlock" acquired after "lock"')
1715 self.ui.develwarn('"wlock" acquired after "lock"')
1714
1716
1715 def unlock():
1717 def unlock():
1716 if self.dirstate.pendingparentchange():
1718 if self.dirstate.pendingparentchange():
1717 self.dirstate.invalidate()
1719 self.dirstate.invalidate()
1718 else:
1720 else:
1719 self.dirstate.write(None)
1721 self.dirstate.write(None)
1720
1722
1721 self._filecache['dirstate'].refresh()
1723 self._filecache['dirstate'].refresh()
1722
1724
1723 l = self._lock(self.vfs, "wlock", wait, unlock,
1725 l = self._lock(self.vfs, "wlock", wait, unlock,
1724 self.invalidatedirstate, _('working directory of %s') %
1726 self.invalidatedirstate, _('working directory of %s') %
1725 self.origroot,
1727 self.origroot,
1726 inheritchecker=self._wlockchecktransaction,
1728 inheritchecker=self._wlockchecktransaction,
1727 parentenvvar='HG_WLOCK_LOCKER')
1729 parentenvvar='HG_WLOCK_LOCKER')
1728 self._wlockref = weakref.ref(l)
1730 self._wlockref = weakref.ref(l)
1729 return l
1731 return l
1730
1732
1731 def _currentlock(self, lockref):
1733 def _currentlock(self, lockref):
1732 """Returns the lock if it's held, or None if it's not."""
1734 """Returns the lock if it's held, or None if it's not."""
1733 if lockref is None:
1735 if lockref is None:
1734 return None
1736 return None
1735 l = lockref()
1737 l = lockref()
1736 if l is None or not l.held:
1738 if l is None or not l.held:
1737 return None
1739 return None
1738 return l
1740 return l
1739
1741
1740 def currentwlock(self):
1742 def currentwlock(self):
1741 """Returns the wlock if it's held, or None if it's not."""
1743 """Returns the wlock if it's held, or None if it's not."""
1742 return self._currentlock(self._wlockref)
1744 return self._currentlock(self._wlockref)
1743
1745
1744 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1746 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1745 """
1747 """
1746 commit an individual file as part of a larger transaction
1748 commit an individual file as part of a larger transaction
1747 """
1749 """
1748
1750
1749 fname = fctx.path()
1751 fname = fctx.path()
1750 fparent1 = manifest1.get(fname, nullid)
1752 fparent1 = manifest1.get(fname, nullid)
1751 fparent2 = manifest2.get(fname, nullid)
1753 fparent2 = manifest2.get(fname, nullid)
1752 if isinstance(fctx, context.filectx):
1754 if isinstance(fctx, context.filectx):
1753 node = fctx.filenode()
1755 node = fctx.filenode()
1754 if node in [fparent1, fparent2]:
1756 if node in [fparent1, fparent2]:
1755 self.ui.debug('reusing %s filelog entry\n' % fname)
1757 self.ui.debug('reusing %s filelog entry\n' % fname)
1756 if manifest1.flags(fname) != fctx.flags():
1758 if manifest1.flags(fname) != fctx.flags():
1757 changelist.append(fname)
1759 changelist.append(fname)
1758 return node
1760 return node
1759
1761
1760 flog = self.file(fname)
1762 flog = self.file(fname)
1761 meta = {}
1763 meta = {}
1762 copy = fctx.renamed()
1764 copy = fctx.renamed()
1763 if copy and copy[0] != fname:
1765 if copy and copy[0] != fname:
1764 # Mark the new revision of this file as a copy of another
1766 # Mark the new revision of this file as a copy of another
1765 # file. This copy data will effectively act as a parent
1767 # file. This copy data will effectively act as a parent
1766 # of this new revision. If this is a merge, the first
1768 # of this new revision. If this is a merge, the first
1767 # parent will be the nullid (meaning "look up the copy data")
1769 # parent will be the nullid (meaning "look up the copy data")
1768 # and the second one will be the other parent. For example:
1770 # and the second one will be the other parent. For example:
1769 #
1771 #
1770 # 0 --- 1 --- 3 rev1 changes file foo
1772 # 0 --- 1 --- 3 rev1 changes file foo
1771 # \ / rev2 renames foo to bar and changes it
1773 # \ / rev2 renames foo to bar and changes it
1772 # \- 2 -/ rev3 should have bar with all changes and
1774 # \- 2 -/ rev3 should have bar with all changes and
1773 # should record that bar descends from
1775 # should record that bar descends from
1774 # bar in rev2 and foo in rev1
1776 # bar in rev2 and foo in rev1
1775 #
1777 #
1776 # this allows this merge to succeed:
1778 # this allows this merge to succeed:
1777 #
1779 #
1778 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1780 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1779 # \ / merging rev3 and rev4 should use bar@rev2
1781 # \ / merging rev3 and rev4 should use bar@rev2
1780 # \- 2 --- 4 as the merge base
1782 # \- 2 --- 4 as the merge base
1781 #
1783 #
1782
1784
1783 cfname = copy[0]
1785 cfname = copy[0]
1784 crev = manifest1.get(cfname)
1786 crev = manifest1.get(cfname)
1785 newfparent = fparent2
1787 newfparent = fparent2
1786
1788
1787 if manifest2: # branch merge
1789 if manifest2: # branch merge
1788 if fparent2 == nullid or crev is None: # copied on remote side
1790 if fparent2 == nullid or crev is None: # copied on remote side
1789 if cfname in manifest2:
1791 if cfname in manifest2:
1790 crev = manifest2[cfname]
1792 crev = manifest2[cfname]
1791 newfparent = fparent1
1793 newfparent = fparent1
1792
1794
1793 # Here, we used to search backwards through history to try to find
1795 # Here, we used to search backwards through history to try to find
1794 # where the file copy came from if the source of a copy was not in
1796 # where the file copy came from if the source of a copy was not in
1795 # the parent directory. However, this doesn't actually make sense to
1797 # the parent directory. However, this doesn't actually make sense to
1796 # do (what does a copy from something not in your working copy even
1798 # do (what does a copy from something not in your working copy even
1797 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1799 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1798 # the user that copy information was dropped, so if they didn't
1800 # the user that copy information was dropped, so if they didn't
1799 # expect this outcome it can be fixed, but this is the correct
1801 # expect this outcome it can be fixed, but this is the correct
1800 # behavior in this circumstance.
1802 # behavior in this circumstance.
1801
1803
1802 if crev:
1804 if crev:
1803 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1805 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1804 meta["copy"] = cfname
1806 meta["copy"] = cfname
1805 meta["copyrev"] = hex(crev)
1807 meta["copyrev"] = hex(crev)
1806 fparent1, fparent2 = nullid, newfparent
1808 fparent1, fparent2 = nullid, newfparent
1807 else:
1809 else:
1808 self.ui.warn(_("warning: can't find ancestor for '%s' "
1810 self.ui.warn(_("warning: can't find ancestor for '%s' "
1809 "copied from '%s'!\n") % (fname, cfname))
1811 "copied from '%s'!\n") % (fname, cfname))
1810
1812
1811 elif fparent1 == nullid:
1813 elif fparent1 == nullid:
1812 fparent1, fparent2 = fparent2, nullid
1814 fparent1, fparent2 = fparent2, nullid
1813 elif fparent2 != nullid:
1815 elif fparent2 != nullid:
1814 # is one parent an ancestor of the other?
1816 # is one parent an ancestor of the other?
1815 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1817 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1816 if fparent1 in fparentancestors:
1818 if fparent1 in fparentancestors:
1817 fparent1, fparent2 = fparent2, nullid
1819 fparent1, fparent2 = fparent2, nullid
1818 elif fparent2 in fparentancestors:
1820 elif fparent2 in fparentancestors:
1819 fparent2 = nullid
1821 fparent2 = nullid
1820
1822
1821 # is the file changed?
1823 # is the file changed?
1822 text = fctx.data()
1824 text = fctx.data()
1823 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1825 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1824 changelist.append(fname)
1826 changelist.append(fname)
1825 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1827 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1826 # are just the flags changed during merge?
1828 # are just the flags changed during merge?
1827 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1829 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1828 changelist.append(fname)
1830 changelist.append(fname)
1829
1831
1830 return fparent1
1832 return fparent1
1831
1833
1832 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1834 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1833 """check for commit arguments that aren't committable"""
1835 """check for commit arguments that aren't committable"""
1834 if match.isexact() or match.prefix():
1836 if match.isexact() or match.prefix():
1835 matched = set(status.modified + status.added + status.removed)
1837 matched = set(status.modified + status.added + status.removed)
1836
1838
1837 for f in match.files():
1839 for f in match.files():
1838 f = self.dirstate.normalize(f)
1840 f = self.dirstate.normalize(f)
1839 if f == '.' or f in matched or f in wctx.substate:
1841 if f == '.' or f in matched or f in wctx.substate:
1840 continue
1842 continue
1841 if f in status.deleted:
1843 if f in status.deleted:
1842 fail(f, _('file not found!'))
1844 fail(f, _('file not found!'))
1843 if f in vdirs: # visited directory
1845 if f in vdirs: # visited directory
1844 d = f + '/'
1846 d = f + '/'
1845 for mf in matched:
1847 for mf in matched:
1846 if mf.startswith(d):
1848 if mf.startswith(d):
1847 break
1849 break
1848 else:
1850 else:
1849 fail(f, _("no match under directory!"))
1851 fail(f, _("no match under directory!"))
1850 elif f not in self.dirstate:
1852 elif f not in self.dirstate:
1851 fail(f, _("file not tracked!"))
1853 fail(f, _("file not tracked!"))
1852
1854
1853 @unfilteredmethod
1855 @unfilteredmethod
1854 def commit(self, text="", user=None, date=None, match=None, force=False,
1856 def commit(self, text="", user=None, date=None, match=None, force=False,
1855 editor=False, extra=None):
1857 editor=False, extra=None):
1856 """Add a new revision to current repository.
1858 """Add a new revision to current repository.
1857
1859
1858 Revision information is gathered from the working directory,
1860 Revision information is gathered from the working directory,
1859 match can be used to filter the committed files. If editor is
1861 match can be used to filter the committed files. If editor is
1860 supplied, it is called to get a commit message.
1862 supplied, it is called to get a commit message.
1861 """
1863 """
1862 if extra is None:
1864 if extra is None:
1863 extra = {}
1865 extra = {}
1864
1866
1865 def fail(f, msg):
1867 def fail(f, msg):
1866 raise error.Abort('%s: %s' % (f, msg))
1868 raise error.Abort('%s: %s' % (f, msg))
1867
1869
1868 if not match:
1870 if not match:
1869 match = matchmod.always(self.root, '')
1871 match = matchmod.always(self.root, '')
1870
1872
1871 if not force:
1873 if not force:
1872 vdirs = []
1874 vdirs = []
1873 match.explicitdir = vdirs.append
1875 match.explicitdir = vdirs.append
1874 match.bad = fail
1876 match.bad = fail
1875
1877
1876 wlock = lock = tr = None
1878 wlock = lock = tr = None
1877 try:
1879 try:
1878 wlock = self.wlock()
1880 wlock = self.wlock()
1879 lock = self.lock() # for recent changelog (see issue4368)
1881 lock = self.lock() # for recent changelog (see issue4368)
1880
1882
1881 wctx = self[None]
1883 wctx = self[None]
1882 merge = len(wctx.parents()) > 1
1884 merge = len(wctx.parents()) > 1
1883
1885
1884 if not force and merge and not match.always():
1886 if not force and merge and not match.always():
1885 raise error.Abort(_('cannot partially commit a merge '
1887 raise error.Abort(_('cannot partially commit a merge '
1886 '(do not specify files or patterns)'))
1888 '(do not specify files or patterns)'))
1887
1889
1888 status = self.status(match=match, clean=force)
1890 status = self.status(match=match, clean=force)
1889 if force:
1891 if force:
1890 status.modified.extend(status.clean) # mq may commit clean files
1892 status.modified.extend(status.clean) # mq may commit clean files
1891
1893
1892 # check subrepos
1894 # check subrepos
1893 subs, commitsubs, newstate = subrepoutil.precommit(
1895 subs, commitsubs, newstate = subrepoutil.precommit(
1894 self.ui, wctx, status, match, force=force)
1896 self.ui, wctx, status, match, force=force)
1895
1897
1896 # make sure all explicit patterns are matched
1898 # make sure all explicit patterns are matched
1897 if not force:
1899 if not force:
1898 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1900 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1899
1901
1900 cctx = context.workingcommitctx(self, status,
1902 cctx = context.workingcommitctx(self, status,
1901 text, user, date, extra)
1903 text, user, date, extra)
1902
1904
1903 # internal config: ui.allowemptycommit
1905 # internal config: ui.allowemptycommit
1904 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1906 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1905 or extra.get('close') or merge or cctx.files()
1907 or extra.get('close') or merge or cctx.files()
1906 or self.ui.configbool('ui', 'allowemptycommit'))
1908 or self.ui.configbool('ui', 'allowemptycommit'))
1907 if not allowemptycommit:
1909 if not allowemptycommit:
1908 return None
1910 return None
1909
1911
1910 if merge and cctx.deleted():
1912 if merge and cctx.deleted():
1911 raise error.Abort(_("cannot commit merge with missing files"))
1913 raise error.Abort(_("cannot commit merge with missing files"))
1912
1914
1913 ms = mergemod.mergestate.read(self)
1915 ms = mergemod.mergestate.read(self)
1914 mergeutil.checkunresolved(ms)
1916 mergeutil.checkunresolved(ms)
1915
1917
1916 if editor:
1918 if editor:
1917 cctx._text = editor(self, cctx, subs)
1919 cctx._text = editor(self, cctx, subs)
1918 edited = (text != cctx._text)
1920 edited = (text != cctx._text)
1919
1921
1920 # Save commit message in case this transaction gets rolled back
1922 # Save commit message in case this transaction gets rolled back
1921 # (e.g. by a pretxncommit hook). Leave the content alone on
1923 # (e.g. by a pretxncommit hook). Leave the content alone on
1922 # the assumption that the user will use the same editor again.
1924 # the assumption that the user will use the same editor again.
1923 msgfn = self.savecommitmessage(cctx._text)
1925 msgfn = self.savecommitmessage(cctx._text)
1924
1926
1925 # commit subs and write new state
1927 # commit subs and write new state
1926 if subs:
1928 if subs:
1927 for s in sorted(commitsubs):
1929 for s in sorted(commitsubs):
1928 sub = wctx.sub(s)
1930 sub = wctx.sub(s)
1929 self.ui.status(_('committing subrepository %s\n') %
1931 self.ui.status(_('committing subrepository %s\n') %
1930 subrepoutil.subrelpath(sub))
1932 subrepoutil.subrelpath(sub))
1931 sr = sub.commit(cctx._text, user, date)
1933 sr = sub.commit(cctx._text, user, date)
1932 newstate[s] = (newstate[s][0], sr)
1934 newstate[s] = (newstate[s][0], sr)
1933 subrepoutil.writestate(self, newstate)
1935 subrepoutil.writestate(self, newstate)
1934
1936
1935 p1, p2 = self.dirstate.parents()
1937 p1, p2 = self.dirstate.parents()
1936 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1938 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1937 try:
1939 try:
1938 self.hook("precommit", throw=True, parent1=hookp1,
1940 self.hook("precommit", throw=True, parent1=hookp1,
1939 parent2=hookp2)
1941 parent2=hookp2)
1940 tr = self.transaction('commit')
1942 tr = self.transaction('commit')
1941 ret = self.commitctx(cctx, True)
1943 ret = self.commitctx(cctx, True)
1942 except: # re-raises
1944 except: # re-raises
1943 if edited:
1945 if edited:
1944 self.ui.write(
1946 self.ui.write(
1945 _('note: commit message saved in %s\n') % msgfn)
1947 _('note: commit message saved in %s\n') % msgfn)
1946 raise
1948 raise
1947 # update bookmarks, dirstate and mergestate
1949 # update bookmarks, dirstate and mergestate
1948 bookmarks.update(self, [p1, p2], ret)
1950 bookmarks.update(self, [p1, p2], ret)
1949 cctx.markcommitted(ret)
1951 cctx.markcommitted(ret)
1950 ms.reset()
1952 ms.reset()
1951 tr.close()
1953 tr.close()
1952
1954
1953 finally:
1955 finally:
1954 lockmod.release(tr, lock, wlock)
1956 lockmod.release(tr, lock, wlock)
1955
1957
1956 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1958 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1957 # hack for command that use a temporary commit (eg: histedit)
1959 # hack for command that use a temporary commit (eg: histedit)
1958 # temporary commit got stripped before hook release
1960 # temporary commit got stripped before hook release
1959 if self.changelog.hasnode(ret):
1961 if self.changelog.hasnode(ret):
1960 self.hook("commit", node=node, parent1=parent1,
1962 self.hook("commit", node=node, parent1=parent1,
1961 parent2=parent2)
1963 parent2=parent2)
1962 self._afterlock(commithook)
1964 self._afterlock(commithook)
1963 return ret
1965 return ret
1964
1966
1965 @unfilteredmethod
1967 @unfilteredmethod
1966 def commitctx(self, ctx, error=False):
1968 def commitctx(self, ctx, error=False):
1967 """Add a new revision to current repository.
1969 """Add a new revision to current repository.
1968 Revision information is passed via the context argument.
1970 Revision information is passed via the context argument.
1969 """
1971 """
1970
1972
1971 tr = None
1973 tr = None
1972 p1, p2 = ctx.p1(), ctx.p2()
1974 p1, p2 = ctx.p1(), ctx.p2()
1973 user = ctx.user()
1975 user = ctx.user()
1974
1976
1975 lock = self.lock()
1977 lock = self.lock()
1976 try:
1978 try:
1977 tr = self.transaction("commit")
1979 tr = self.transaction("commit")
1978 trp = weakref.proxy(tr)
1980 trp = weakref.proxy(tr)
1979
1981
1980 if ctx.manifestnode():
1982 if ctx.manifestnode():
1981 # reuse an existing manifest revision
1983 # reuse an existing manifest revision
1982 mn = ctx.manifestnode()
1984 mn = ctx.manifestnode()
1983 files = ctx.files()
1985 files = ctx.files()
1984 elif ctx.files():
1986 elif ctx.files():
1985 m1ctx = p1.manifestctx()
1987 m1ctx = p1.manifestctx()
1986 m2ctx = p2.manifestctx()
1988 m2ctx = p2.manifestctx()
1987 mctx = m1ctx.copy()
1989 mctx = m1ctx.copy()
1988
1990
1989 m = mctx.read()
1991 m = mctx.read()
1990 m1 = m1ctx.read()
1992 m1 = m1ctx.read()
1991 m2 = m2ctx.read()
1993 m2 = m2ctx.read()
1992
1994
1993 # check in files
1995 # check in files
1994 added = []
1996 added = []
1995 changed = []
1997 changed = []
1996 removed = list(ctx.removed())
1998 removed = list(ctx.removed())
1997 linkrev = len(self)
1999 linkrev = len(self)
1998 self.ui.note(_("committing files:\n"))
2000 self.ui.note(_("committing files:\n"))
1999 for f in sorted(ctx.modified() + ctx.added()):
2001 for f in sorted(ctx.modified() + ctx.added()):
2000 self.ui.note(f + "\n")
2002 self.ui.note(f + "\n")
2001 try:
2003 try:
2002 fctx = ctx[f]
2004 fctx = ctx[f]
2003 if fctx is None:
2005 if fctx is None:
2004 removed.append(f)
2006 removed.append(f)
2005 else:
2007 else:
2006 added.append(f)
2008 added.append(f)
2007 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2009 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2008 trp, changed)
2010 trp, changed)
2009 m.setflag(f, fctx.flags())
2011 m.setflag(f, fctx.flags())
2010 except OSError as inst:
2012 except OSError as inst:
2011 self.ui.warn(_("trouble committing %s!\n") % f)
2013 self.ui.warn(_("trouble committing %s!\n") % f)
2012 raise
2014 raise
2013 except IOError as inst:
2015 except IOError as inst:
2014 errcode = getattr(inst, 'errno', errno.ENOENT)
2016 errcode = getattr(inst, 'errno', errno.ENOENT)
2015 if error or errcode and errcode != errno.ENOENT:
2017 if error or errcode and errcode != errno.ENOENT:
2016 self.ui.warn(_("trouble committing %s!\n") % f)
2018 self.ui.warn(_("trouble committing %s!\n") % f)
2017 raise
2019 raise
2018
2020
2019 # update manifest
2021 # update manifest
2020 self.ui.note(_("committing manifest\n"))
2022 self.ui.note(_("committing manifest\n"))
2021 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2023 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2022 drop = [f for f in removed if f in m]
2024 drop = [f for f in removed if f in m]
2023 for f in drop:
2025 for f in drop:
2024 del m[f]
2026 del m[f]
2025 mn = mctx.write(trp, linkrev,
2027 mn = mctx.write(trp, linkrev,
2026 p1.manifestnode(), p2.manifestnode(),
2028 p1.manifestnode(), p2.manifestnode(),
2027 added, drop)
2029 added, drop)
2028 files = changed + removed
2030 files = changed + removed
2029 else:
2031 else:
2030 mn = p1.manifestnode()
2032 mn = p1.manifestnode()
2031 files = []
2033 files = []
2032
2034
2033 # update changelog
2035 # update changelog
2034 self.ui.note(_("committing changelog\n"))
2036 self.ui.note(_("committing changelog\n"))
2035 self.changelog.delayupdate(tr)
2037 self.changelog.delayupdate(tr)
2036 n = self.changelog.add(mn, files, ctx.description(),
2038 n = self.changelog.add(mn, files, ctx.description(),
2037 trp, p1.node(), p2.node(),
2039 trp, p1.node(), p2.node(),
2038 user, ctx.date(), ctx.extra().copy())
2040 user, ctx.date(), ctx.extra().copy())
2039 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2041 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2040 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2042 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2041 parent2=xp2)
2043 parent2=xp2)
2042 # set the new commit is proper phase
2044 # set the new commit is proper phase
2043 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2045 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2044 if targetphase:
2046 if targetphase:
2045 # retract boundary do not alter parent changeset.
2047 # retract boundary do not alter parent changeset.
2046 # if a parent have higher the resulting phase will
2048 # if a parent have higher the resulting phase will
2047 # be compliant anyway
2049 # be compliant anyway
2048 #
2050 #
2049 # if minimal phase was 0 we don't need to retract anything
2051 # if minimal phase was 0 we don't need to retract anything
2050 phases.registernew(self, tr, targetphase, [n])
2052 phases.registernew(self, tr, targetphase, [n])
2051 tr.close()
2053 tr.close()
2052 return n
2054 return n
2053 finally:
2055 finally:
2054 if tr:
2056 if tr:
2055 tr.release()
2057 tr.release()
2056 lock.release()
2058 lock.release()
2057
2059
2058 @unfilteredmethod
2060 @unfilteredmethod
2059 def destroying(self):
2061 def destroying(self):
2060 '''Inform the repository that nodes are about to be destroyed.
2062 '''Inform the repository that nodes are about to be destroyed.
2061 Intended for use by strip and rollback, so there's a common
2063 Intended for use by strip and rollback, so there's a common
2062 place for anything that has to be done before destroying history.
2064 place for anything that has to be done before destroying history.
2063
2065
2064 This is mostly useful for saving state that is in memory and waiting
2066 This is mostly useful for saving state that is in memory and waiting
2065 to be flushed when the current lock is released. Because a call to
2067 to be flushed when the current lock is released. Because a call to
2066 destroyed is imminent, the repo will be invalidated causing those
2068 destroyed is imminent, the repo will be invalidated causing those
2067 changes to stay in memory (waiting for the next unlock), or vanish
2069 changes to stay in memory (waiting for the next unlock), or vanish
2068 completely.
2070 completely.
2069 '''
2071 '''
2070 # When using the same lock to commit and strip, the phasecache is left
2072 # When using the same lock to commit and strip, the phasecache is left
2071 # dirty after committing. Then when we strip, the repo is invalidated,
2073 # dirty after committing. Then when we strip, the repo is invalidated,
2072 # causing those changes to disappear.
2074 # causing those changes to disappear.
2073 if '_phasecache' in vars(self):
2075 if '_phasecache' in vars(self):
2074 self._phasecache.write()
2076 self._phasecache.write()
2075
2077
2076 @unfilteredmethod
2078 @unfilteredmethod
2077 def destroyed(self):
2079 def destroyed(self):
2078 '''Inform the repository that nodes have been destroyed.
2080 '''Inform the repository that nodes have been destroyed.
2079 Intended for use by strip and rollback, so there's a common
2081 Intended for use by strip and rollback, so there's a common
2080 place for anything that has to be done after destroying history.
2082 place for anything that has to be done after destroying history.
2081 '''
2083 '''
2082 # When one tries to:
2084 # When one tries to:
2083 # 1) destroy nodes thus calling this method (e.g. strip)
2085 # 1) destroy nodes thus calling this method (e.g. strip)
2084 # 2) use phasecache somewhere (e.g. commit)
2086 # 2) use phasecache somewhere (e.g. commit)
2085 #
2087 #
2086 # then 2) will fail because the phasecache contains nodes that were
2088 # then 2) will fail because the phasecache contains nodes that were
2087 # removed. We can either remove phasecache from the filecache,
2089 # removed. We can either remove phasecache from the filecache,
2088 # causing it to reload next time it is accessed, or simply filter
2090 # causing it to reload next time it is accessed, or simply filter
2089 # the removed nodes now and write the updated cache.
2091 # the removed nodes now and write the updated cache.
2090 self._phasecache.filterunknown(self)
2092 self._phasecache.filterunknown(self)
2091 self._phasecache.write()
2093 self._phasecache.write()
2092
2094
2093 # refresh all repository caches
2095 # refresh all repository caches
2094 self.updatecaches()
2096 self.updatecaches()
2095
2097
2096 # Ensure the persistent tag cache is updated. Doing it now
2098 # Ensure the persistent tag cache is updated. Doing it now
2097 # means that the tag cache only has to worry about destroyed
2099 # means that the tag cache only has to worry about destroyed
2098 # heads immediately after a strip/rollback. That in turn
2100 # heads immediately after a strip/rollback. That in turn
2099 # guarantees that "cachetip == currenttip" (comparing both rev
2101 # guarantees that "cachetip == currenttip" (comparing both rev
2100 # and node) always means no nodes have been added or destroyed.
2102 # and node) always means no nodes have been added or destroyed.
2101
2103
2102 # XXX this is suboptimal when qrefresh'ing: we strip the current
2104 # XXX this is suboptimal when qrefresh'ing: we strip the current
2103 # head, refresh the tag cache, then immediately add a new head.
2105 # head, refresh the tag cache, then immediately add a new head.
2104 # But I think doing it this way is necessary for the "instant
2106 # But I think doing it this way is necessary for the "instant
2105 # tag cache retrieval" case to work.
2107 # tag cache retrieval" case to work.
2106 self.invalidate()
2108 self.invalidate()
2107
2109
2108 def status(self, node1='.', node2=None, match=None,
2110 def status(self, node1='.', node2=None, match=None,
2109 ignored=False, clean=False, unknown=False,
2111 ignored=False, clean=False, unknown=False,
2110 listsubrepos=False):
2112 listsubrepos=False):
2111 '''a convenience method that calls node1.status(node2)'''
2113 '''a convenience method that calls node1.status(node2)'''
2112 return self[node1].status(node2, match, ignored, clean, unknown,
2114 return self[node1].status(node2, match, ignored, clean, unknown,
2113 listsubrepos)
2115 listsubrepos)
2114
2116
2115 def addpostdsstatus(self, ps):
2117 def addpostdsstatus(self, ps):
2116 """Add a callback to run within the wlock, at the point at which status
2118 """Add a callback to run within the wlock, at the point at which status
2117 fixups happen.
2119 fixups happen.
2118
2120
2119 On status completion, callback(wctx, status) will be called with the
2121 On status completion, callback(wctx, status) will be called with the
2120 wlock held, unless the dirstate has changed from underneath or the wlock
2122 wlock held, unless the dirstate has changed from underneath or the wlock
2121 couldn't be grabbed.
2123 couldn't be grabbed.
2122
2124
2123 Callbacks should not capture and use a cached copy of the dirstate --
2125 Callbacks should not capture and use a cached copy of the dirstate --
2124 it might change in the meanwhile. Instead, they should access the
2126 it might change in the meanwhile. Instead, they should access the
2125 dirstate via wctx.repo().dirstate.
2127 dirstate via wctx.repo().dirstate.
2126
2128
2127 This list is emptied out after each status run -- extensions should
2129 This list is emptied out after each status run -- extensions should
2128 make sure it adds to this list each time dirstate.status is called.
2130 make sure it adds to this list each time dirstate.status is called.
2129 Extensions should also make sure they don't call this for statuses
2131 Extensions should also make sure they don't call this for statuses
2130 that don't involve the dirstate.
2132 that don't involve the dirstate.
2131 """
2133 """
2132
2134
2133 # The list is located here for uniqueness reasons -- it is actually
2135 # The list is located here for uniqueness reasons -- it is actually
2134 # managed by the workingctx, but that isn't unique per-repo.
2136 # managed by the workingctx, but that isn't unique per-repo.
2135 self._postdsstatus.append(ps)
2137 self._postdsstatus.append(ps)
2136
2138
2137 def postdsstatus(self):
2139 def postdsstatus(self):
2138 """Used by workingctx to get the list of post-dirstate-status hooks."""
2140 """Used by workingctx to get the list of post-dirstate-status hooks."""
2139 return self._postdsstatus
2141 return self._postdsstatus
2140
2142
2141 def clearpostdsstatus(self):
2143 def clearpostdsstatus(self):
2142 """Used by workingctx to clear post-dirstate-status hooks."""
2144 """Used by workingctx to clear post-dirstate-status hooks."""
2143 del self._postdsstatus[:]
2145 del self._postdsstatus[:]
2144
2146
2145 def heads(self, start=None):
2147 def heads(self, start=None):
2146 if start is None:
2148 if start is None:
2147 cl = self.changelog
2149 cl = self.changelog
2148 headrevs = reversed(cl.headrevs())
2150 headrevs = reversed(cl.headrevs())
2149 return [cl.node(rev) for rev in headrevs]
2151 return [cl.node(rev) for rev in headrevs]
2150
2152
2151 heads = self.changelog.heads(start)
2153 heads = self.changelog.heads(start)
2152 # sort the output in rev descending order
2154 # sort the output in rev descending order
2153 return sorted(heads, key=self.changelog.rev, reverse=True)
2155 return sorted(heads, key=self.changelog.rev, reverse=True)
2154
2156
2155 def branchheads(self, branch=None, start=None, closed=False):
2157 def branchheads(self, branch=None, start=None, closed=False):
2156 '''return a (possibly filtered) list of heads for the given branch
2158 '''return a (possibly filtered) list of heads for the given branch
2157
2159
2158 Heads are returned in topological order, from newest to oldest.
2160 Heads are returned in topological order, from newest to oldest.
2159 If branch is None, use the dirstate branch.
2161 If branch is None, use the dirstate branch.
2160 If start is not None, return only heads reachable from start.
2162 If start is not None, return only heads reachable from start.
2161 If closed is True, return heads that are marked as closed as well.
2163 If closed is True, return heads that are marked as closed as well.
2162 '''
2164 '''
2163 if branch is None:
2165 if branch is None:
2164 branch = self[None].branch()
2166 branch = self[None].branch()
2165 branches = self.branchmap()
2167 branches = self.branchmap()
2166 if branch not in branches:
2168 if branch not in branches:
2167 return []
2169 return []
2168 # the cache returns heads ordered lowest to highest
2170 # the cache returns heads ordered lowest to highest
2169 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2171 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2170 if start is not None:
2172 if start is not None:
2171 # filter out the heads that cannot be reached from startrev
2173 # filter out the heads that cannot be reached from startrev
2172 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2174 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2173 bheads = [h for h in bheads if h in fbheads]
2175 bheads = [h for h in bheads if h in fbheads]
2174 return bheads
2176 return bheads
2175
2177
2176 def branches(self, nodes):
2178 def branches(self, nodes):
2177 if not nodes:
2179 if not nodes:
2178 nodes = [self.changelog.tip()]
2180 nodes = [self.changelog.tip()]
2179 b = []
2181 b = []
2180 for n in nodes:
2182 for n in nodes:
2181 t = n
2183 t = n
2182 while True:
2184 while True:
2183 p = self.changelog.parents(n)
2185 p = self.changelog.parents(n)
2184 if p[1] != nullid or p[0] == nullid:
2186 if p[1] != nullid or p[0] == nullid:
2185 b.append((t, n, p[0], p[1]))
2187 b.append((t, n, p[0], p[1]))
2186 break
2188 break
2187 n = p[0]
2189 n = p[0]
2188 return b
2190 return b
2189
2191
2190 def between(self, pairs):
2192 def between(self, pairs):
2191 r = []
2193 r = []
2192
2194
2193 for top, bottom in pairs:
2195 for top, bottom in pairs:
2194 n, l, i = top, [], 0
2196 n, l, i = top, [], 0
2195 f = 1
2197 f = 1
2196
2198
2197 while n != bottom and n != nullid:
2199 while n != bottom and n != nullid:
2198 p = self.changelog.parents(n)[0]
2200 p = self.changelog.parents(n)[0]
2199 if i == f:
2201 if i == f:
2200 l.append(n)
2202 l.append(n)
2201 f = f * 2
2203 f = f * 2
2202 n = p
2204 n = p
2203 i += 1
2205 i += 1
2204
2206
2205 r.append(l)
2207 r.append(l)
2206
2208
2207 return r
2209 return r
2208
2210
2209 def checkpush(self, pushop):
2211 def checkpush(self, pushop):
2210 """Extensions can override this function if additional checks have
2212 """Extensions can override this function if additional checks have
2211 to be performed before pushing, or call it if they override push
2213 to be performed before pushing, or call it if they override push
2212 command.
2214 command.
2213 """
2215 """
2214
2216
2215 @unfilteredpropertycache
2217 @unfilteredpropertycache
2216 def prepushoutgoinghooks(self):
2218 def prepushoutgoinghooks(self):
2217 """Return util.hooks consists of a pushop with repo, remote, outgoing
2219 """Return util.hooks consists of a pushop with repo, remote, outgoing
2218 methods, which are called before pushing changesets.
2220 methods, which are called before pushing changesets.
2219 """
2221 """
2220 return util.hooks()
2222 return util.hooks()
2221
2223
2222 def pushkey(self, namespace, key, old, new):
2224 def pushkey(self, namespace, key, old, new):
2223 try:
2225 try:
2224 tr = self.currenttransaction()
2226 tr = self.currenttransaction()
2225 hookargs = {}
2227 hookargs = {}
2226 if tr is not None:
2228 if tr is not None:
2227 hookargs.update(tr.hookargs)
2229 hookargs.update(tr.hookargs)
2228 hookargs = pycompat.strkwargs(hookargs)
2230 hookargs = pycompat.strkwargs(hookargs)
2229 hookargs[r'namespace'] = namespace
2231 hookargs[r'namespace'] = namespace
2230 hookargs[r'key'] = key
2232 hookargs[r'key'] = key
2231 hookargs[r'old'] = old
2233 hookargs[r'old'] = old
2232 hookargs[r'new'] = new
2234 hookargs[r'new'] = new
2233 self.hook('prepushkey', throw=True, **hookargs)
2235 self.hook('prepushkey', throw=True, **hookargs)
2234 except error.HookAbort as exc:
2236 except error.HookAbort as exc:
2235 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2237 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2236 if exc.hint:
2238 if exc.hint:
2237 self.ui.write_err(_("(%s)\n") % exc.hint)
2239 self.ui.write_err(_("(%s)\n") % exc.hint)
2238 return False
2240 return False
2239 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2241 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2240 ret = pushkey.push(self, namespace, key, old, new)
2242 ret = pushkey.push(self, namespace, key, old, new)
2241 def runhook():
2243 def runhook():
2242 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2244 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2243 ret=ret)
2245 ret=ret)
2244 self._afterlock(runhook)
2246 self._afterlock(runhook)
2245 return ret
2247 return ret
2246
2248
2247 def listkeys(self, namespace):
2249 def listkeys(self, namespace):
2248 self.hook('prelistkeys', throw=True, namespace=namespace)
2250 self.hook('prelistkeys', throw=True, namespace=namespace)
2249 self.ui.debug('listing keys for "%s"\n' % namespace)
2251 self.ui.debug('listing keys for "%s"\n' % namespace)
2250 values = pushkey.list(self, namespace)
2252 values = pushkey.list(self, namespace)
2251 self.hook('listkeys', namespace=namespace, values=values)
2253 self.hook('listkeys', namespace=namespace, values=values)
2252 return values
2254 return values
2253
2255
2254 def debugwireargs(self, one, two, three=None, four=None, five=None):
2256 def debugwireargs(self, one, two, three=None, four=None, five=None):
2255 '''used to test argument passing over the wire'''
2257 '''used to test argument passing over the wire'''
2256 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2258 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2257 pycompat.bytestr(four),
2259 pycompat.bytestr(four),
2258 pycompat.bytestr(five))
2260 pycompat.bytestr(five))
2259
2261
2260 def savecommitmessage(self, text):
2262 def savecommitmessage(self, text):
2261 fp = self.vfs('last-message.txt', 'wb')
2263 fp = self.vfs('last-message.txt', 'wb')
2262 try:
2264 try:
2263 fp.write(text)
2265 fp.write(text)
2264 finally:
2266 finally:
2265 fp.close()
2267 fp.close()
2266 return self.pathto(fp.name[len(self.root) + 1:])
2268 return self.pathto(fp.name[len(self.root) + 1:])
2267
2269
2268 # used to avoid circular references so destructors work
2270 # used to avoid circular references so destructors work
2269 def aftertrans(files):
2271 def aftertrans(files):
2270 renamefiles = [tuple(t) for t in files]
2272 renamefiles = [tuple(t) for t in files]
2271 def a():
2273 def a():
2272 for vfs, src, dest in renamefiles:
2274 for vfs, src, dest in renamefiles:
2273 # if src and dest refer to a same file, vfs.rename is a no-op,
2275 # if src and dest refer to a same file, vfs.rename is a no-op,
2274 # leaving both src and dest on disk. delete dest to make sure
2276 # leaving both src and dest on disk. delete dest to make sure
2275 # the rename couldn't be such a no-op.
2277 # the rename couldn't be such a no-op.
2276 vfs.tryunlink(dest)
2278 vfs.tryunlink(dest)
2277 try:
2279 try:
2278 vfs.rename(src, dest)
2280 vfs.rename(src, dest)
2279 except OSError: # journal file does not yet exist
2281 except OSError: # journal file does not yet exist
2280 pass
2282 pass
2281 return a
2283 return a
2282
2284
2283 def undoname(fn):
2285 def undoname(fn):
2284 base, name = os.path.split(fn)
2286 base, name = os.path.split(fn)
2285 assert name.startswith('journal')
2287 assert name.startswith('journal')
2286 return os.path.join(base, name.replace('journal', 'undo', 1))
2288 return os.path.join(base, name.replace('journal', 'undo', 1))
2287
2289
2288 def instance(ui, path, create):
2290 def instance(ui, path, create):
2289 return localrepository(ui, util.urllocalpath(path), create)
2291 return localrepository(ui, util.urllocalpath(path), create)
2290
2292
2291 def islocal(path):
2293 def islocal(path):
2292 return True
2294 return True
2293
2295
2294 def newreporequirements(repo):
2296 def newreporequirements(repo):
2295 """Determine the set of requirements for a new local repository.
2297 """Determine the set of requirements for a new local repository.
2296
2298
2297 Extensions can wrap this function to specify custom requirements for
2299 Extensions can wrap this function to specify custom requirements for
2298 new repositories.
2300 new repositories.
2299 """
2301 """
2300 ui = repo.ui
2302 ui = repo.ui
2301 requirements = {'revlogv1'}
2303 requirements = {'revlogv1'}
2302 if ui.configbool('format', 'usestore'):
2304 if ui.configbool('format', 'usestore'):
2303 requirements.add('store')
2305 requirements.add('store')
2304 if ui.configbool('format', 'usefncache'):
2306 if ui.configbool('format', 'usefncache'):
2305 requirements.add('fncache')
2307 requirements.add('fncache')
2306 if ui.configbool('format', 'dotencode'):
2308 if ui.configbool('format', 'dotencode'):
2307 requirements.add('dotencode')
2309 requirements.add('dotencode')
2308
2310
2309 compengine = ui.config('experimental', 'format.compression')
2311 compengine = ui.config('experimental', 'format.compression')
2310 if compengine not in util.compengines:
2312 if compengine not in util.compengines:
2311 raise error.Abort(_('compression engine %s defined by '
2313 raise error.Abort(_('compression engine %s defined by '
2312 'experimental.format.compression not available') %
2314 'experimental.format.compression not available') %
2313 compengine,
2315 compengine,
2314 hint=_('run "hg debuginstall" to list available '
2316 hint=_('run "hg debuginstall" to list available '
2315 'compression engines'))
2317 'compression engines'))
2316
2318
2317 # zlib is the historical default and doesn't need an explicit requirement.
2319 # zlib is the historical default and doesn't need an explicit requirement.
2318 if compengine != 'zlib':
2320 if compengine != 'zlib':
2319 requirements.add('exp-compression-%s' % compengine)
2321 requirements.add('exp-compression-%s' % compengine)
2320
2322
2321 if scmutil.gdinitconfig(ui):
2323 if scmutil.gdinitconfig(ui):
2322 requirements.add('generaldelta')
2324 requirements.add('generaldelta')
2323 if ui.configbool('experimental', 'treemanifest'):
2325 if ui.configbool('experimental', 'treemanifest'):
2324 requirements.add('treemanifest')
2326 requirements.add('treemanifest')
2325
2327
2326 revlogv2 = ui.config('experimental', 'revlogv2')
2328 revlogv2 = ui.config('experimental', 'revlogv2')
2327 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2329 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2328 requirements.remove('revlogv1')
2330 requirements.remove('revlogv1')
2329 # generaldelta is implied by revlogv2.
2331 # generaldelta is implied by revlogv2.
2330 requirements.discard('generaldelta')
2332 requirements.discard('generaldelta')
2331 requirements.add(REVLOGV2_REQUIREMENT)
2333 requirements.add(REVLOGV2_REQUIREMENT)
2332
2334
2333 return requirements
2335 return requirements
General Comments 0
You need to be logged in to leave comments. Login now