##// END OF EJS Templates
context: reduce dependence of changectx constructor...
Martin von Zweigbergk -
r39993:e1e3d1b4 default
parent child Browse files
Show More
@@ -1,2497 +1,2497 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52 class basectx(object):
52 class basectx(object):
53 """A basectx object represents the common logic for its children:
53 """A basectx object represents the common logic for its children:
54 changectx: read-only context that is already present in the repo,
54 changectx: read-only context that is already present in the repo,
55 workingctx: a context that represents the working directory and can
55 workingctx: a context that represents the working directory and can
56 be committed,
56 be committed,
57 memctx: a context that represents changes in-memory and can also
57 memctx: a context that represents changes in-memory and can also
58 be committed."""
58 be committed."""
59
59
60 def __init__(self, repo):
60 def __init__(self, repo):
61 self._repo = repo
61 self._repo = repo
62
62
63 def __bytes__(self):
63 def __bytes__(self):
64 return short(self.node())
64 return short(self.node())
65
65
66 __str__ = encoding.strmethod(__bytes__)
66 __str__ = encoding.strmethod(__bytes__)
67
67
68 def __repr__(self):
68 def __repr__(self):
69 return r"<%s %s>" % (type(self).__name__, str(self))
69 return r"<%s %s>" % (type(self).__name__, str(self))
70
70
71 def __eq__(self, other):
71 def __eq__(self, other):
72 try:
72 try:
73 return type(self) == type(other) and self._rev == other._rev
73 return type(self) == type(other) and self._rev == other._rev
74 except AttributeError:
74 except AttributeError:
75 return False
75 return False
76
76
77 def __ne__(self, other):
77 def __ne__(self, other):
78 return not (self == other)
78 return not (self == other)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 return iter(self._manifest)
87 return iter(self._manifest)
88
88
89 def _buildstatusmanifest(self, status):
89 def _buildstatusmanifest(self, status):
90 """Builds a manifest that includes the given status results, if this is
90 """Builds a manifest that includes the given status results, if this is
91 a working copy context. For non-working copy contexts, it just returns
91 a working copy context. For non-working copy contexts, it just returns
92 the normal manifest."""
92 the normal manifest."""
93 return self.manifest()
93 return self.manifest()
94
94
95 def _matchstatus(self, other, match):
95 def _matchstatus(self, other, match):
96 """This internal method provides a way for child objects to override the
96 """This internal method provides a way for child objects to override the
97 match operator.
97 match operator.
98 """
98 """
99 return match
99 return match
100
100
101 def _buildstatus(self, other, s, match, listignored, listclean,
101 def _buildstatus(self, other, s, match, listignored, listclean,
102 listunknown):
102 listunknown):
103 """build a status with respect to another context"""
103 """build a status with respect to another context"""
104 # Load earliest manifest first for caching reasons. More specifically,
104 # Load earliest manifest first for caching reasons. More specifically,
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta application.
109 # delta application.
110 mf2 = None
110 mf2 = None
111 if self.rev() is not None and self.rev() < other.rev():
111 if self.rev() is not None and self.rev() < other.rev():
112 mf2 = self._buildstatusmanifest(s)
112 mf2 = self._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
114 if mf2 is None:
114 if mf2 is None:
115 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
116
116
117 modified, added = [], []
117 modified, added = [], []
118 removed = []
118 removed = []
119 clean = []
119 clean = []
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deletedset = set(deleted)
121 deletedset = set(deleted)
122 d = mf1.diff(mf2, match=match, clean=listclean)
122 d = mf1.diff(mf2, match=match, clean=listclean)
123 for fn, value in d.iteritems():
123 for fn, value in d.iteritems():
124 if fn in deletedset:
124 if fn in deletedset:
125 continue
125 continue
126 if value is None:
126 if value is None:
127 clean.append(fn)
127 clean.append(fn)
128 continue
128 continue
129 (node1, flag1), (node2, flag2) = value
129 (node1, flag1), (node2, flag2) = value
130 if node1 is None:
130 if node1 is None:
131 added.append(fn)
131 added.append(fn)
132 elif node2 is None:
132 elif node2 is None:
133 removed.append(fn)
133 removed.append(fn)
134 elif flag1 != flag2:
134 elif flag1 != flag2:
135 modified.append(fn)
135 modified.append(fn)
136 elif node2 not in wdirfilenodeids:
136 elif node2 not in wdirfilenodeids:
137 # When comparing files between two commits, we save time by
137 # When comparing files between two commits, we save time by
138 # not comparing the file contents when the nodeids differ.
138 # not comparing the file contents when the nodeids differ.
139 # Note that this means we incorrectly report a reverted change
139 # Note that this means we incorrectly report a reverted change
140 # to a file as a modification.
140 # to a file as a modification.
141 modified.append(fn)
141 modified.append(fn)
142 elif self[fn].cmp(other[fn]):
142 elif self[fn].cmp(other[fn]):
143 modified.append(fn)
143 modified.append(fn)
144 else:
144 else:
145 clean.append(fn)
145 clean.append(fn)
146
146
147 if removed:
147 if removed:
148 # need to filter files if they are already reported as removed
148 # need to filter files if they are already reported as removed
149 unknown = [fn for fn in unknown if fn not in mf1 and
149 unknown = [fn for fn in unknown if fn not in mf1 and
150 (not match or match(fn))]
150 (not match or match(fn))]
151 ignored = [fn for fn in ignored if fn not in mf1 and
151 ignored = [fn for fn in ignored if fn not in mf1 and
152 (not match or match(fn))]
152 (not match or match(fn))]
153 # if they're deleted, don't report them as removed
153 # if they're deleted, don't report them as removed
154 removed = [fn for fn in removed if fn not in deletedset]
154 removed = [fn for fn in removed if fn not in deletedset]
155
155
156 return scmutil.status(modified, added, removed, deleted, unknown,
156 return scmutil.status(modified, added, removed, deleted, unknown,
157 ignored, clean)
157 ignored, clean)
158
158
159 @propertycache
159 @propertycache
160 def substate(self):
160 def substate(self):
161 return subrepoutil.state(self, self._repo.ui)
161 return subrepoutil.state(self, self._repo.ui)
162
162
163 def subrev(self, subpath):
163 def subrev(self, subpath):
164 return self.substate[subpath][1]
164 return self.substate[subpath][1]
165
165
166 def rev(self):
166 def rev(self):
167 return self._rev
167 return self._rev
168 def node(self):
168 def node(self):
169 return self._node
169 return self._node
170 def hex(self):
170 def hex(self):
171 return hex(self.node())
171 return hex(self.node())
172 def manifest(self):
172 def manifest(self):
173 return self._manifest
173 return self._manifest
174 def manifestctx(self):
174 def manifestctx(self):
175 return self._manifestctx
175 return self._manifestctx
176 def repo(self):
176 def repo(self):
177 return self._repo
177 return self._repo
178 def phasestr(self):
178 def phasestr(self):
179 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
180 def mutable(self):
180 def mutable(self):
181 return self.phase() > phases.public
181 return self.phase() > phases.public
182
182
183 def matchfileset(self, expr, badfn=None):
183 def matchfileset(self, expr, badfn=None):
184 return fileset.match(self, expr, badfn=badfn)
184 return fileset.match(self, expr, badfn=badfn)
185
185
186 def obsolete(self):
186 def obsolete(self):
187 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189
189
190 def extinct(self):
190 def extinct(self):
191 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193
193
194 def orphan(self):
194 def orphan(self):
195 """True if the changeset is not obsolete, but its ancestor is"""
195 """True if the changeset is not obsolete, but its ancestor is"""
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197
197
198 def phasedivergent(self):
198 def phasedivergent(self):
199 """True if the changeset tries to be a successor of a public changeset
199 """True if the changeset tries to be a successor of a public changeset
200
200
201 Only non-public and non-obsolete changesets may be phase-divergent.
201 Only non-public and non-obsolete changesets may be phase-divergent.
202 """
202 """
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204
204
205 def contentdivergent(self):
205 def contentdivergent(self):
206 """Is a successor of a changeset with multiple possible successor sets
206 """Is a successor of a changeset with multiple possible successor sets
207
207
208 Only non-public and non-obsolete changesets may be content-divergent.
208 Only non-public and non-obsolete changesets may be content-divergent.
209 """
209 """
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211
211
212 def isunstable(self):
212 def isunstable(self):
213 """True if the changeset is either orphan, phase-divergent or
213 """True if the changeset is either orphan, phase-divergent or
214 content-divergent"""
214 content-divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return self._repo[nullrev]
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
299 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
308 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 narrowmatch = self._repo.narrowmatch()
375 narrowmatch = self._repo.narrowmatch()
376 if not narrowmatch.always():
376 if not narrowmatch.always():
377 for l in r:
377 for l in r:
378 l[:] = list(filter(narrowmatch, l))
378 l[:] = list(filter(narrowmatch, l))
379 for l in r:
379 for l in r:
380 l.sort()
380 l.sort()
381
381
382 return r
382 return r
383
383
384 class changectx(basectx):
384 class changectx(basectx):
385 """A changecontext object makes access to data related to a particular
385 """A changecontext object makes access to data related to a particular
386 changeset convenient. It represents a read-only context already present in
386 changeset convenient. It represents a read-only context already present in
387 the repo."""
387 the repo."""
388 def __init__(self, repo, changeid='.'):
388 def __init__(self, repo, changeid='.'):
389 """changeid is a revision number, node, or tag"""
389 """changeid is a revision number, node, or tag"""
390 super(changectx, self).__init__(repo)
390 super(changectx, self).__init__(repo)
391
391
392 try:
392 try:
393 if isinstance(changeid, int):
393 if isinstance(changeid, int):
394 self._node = repo.changelog.node(changeid)
394 self._node = repo.changelog.node(changeid)
395 self._rev = changeid
395 self._rev = changeid
396 return
396 return
397 elif changeid == 'null':
397 elif changeid == 'null':
398 self._node = nullid
398 self._node = nullid
399 self._rev = nullrev
399 self._rev = nullrev
400 return
400 return
401 elif changeid == 'tip':
401 elif changeid == 'tip':
402 self._node = repo.changelog.tip()
402 self._node = repo.changelog.tip()
403 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
404 return
404 return
405 elif (changeid == '.'
405 elif (changeid == '.'
406 or repo.local() and changeid == repo.dirstate.p1()):
406 or repo.local() and changeid == repo.dirstate.p1()):
407 # this is a hack to delay/avoid loading obsmarkers
407 # this is a hack to delay/avoid loading obsmarkers
408 # when we know that '.' won't be hidden
408 # when we know that '.' won't be hidden
409 self._node = repo.dirstate.p1()
409 self._node = repo.dirstate.p1()
410 self._rev = repo.unfiltered().changelog.rev(self._node)
410 self._rev = repo.unfiltered().changelog.rev(self._node)
411 return
411 return
412 elif len(changeid) == 20:
412 elif len(changeid) == 20:
413 try:
413 try:
414 self._node = changeid
414 self._node = changeid
415 self._rev = repo.changelog.rev(changeid)
415 self._rev = repo.changelog.rev(changeid)
416 return
416 return
417 except error.FilteredLookupError:
417 except error.FilteredLookupError:
418 changeid = hex(changeid) # for the error message
418 changeid = hex(changeid) # for the error message
419 raise
419 raise
420 except LookupError:
420 except LookupError:
421 # check if it might have come from damaged dirstate
421 # check if it might have come from damaged dirstate
422 #
422 #
423 # XXX we could avoid the unfiltered if we had a recognizable
423 # XXX we could avoid the unfiltered if we had a recognizable
424 # exception for filtered changeset access
424 # exception for filtered changeset access
425 if (repo.local()
425 if (repo.local()
426 and changeid in repo.unfiltered().dirstate.parents()):
426 and changeid in repo.unfiltered().dirstate.parents()):
427 msg = _("working directory has unknown parent '%s'!")
427 msg = _("working directory has unknown parent '%s'!")
428 raise error.Abort(msg % short(changeid))
428 raise error.Abort(msg % short(changeid))
429 changeid = hex(changeid) # for the error message
429 changeid = hex(changeid) # for the error message
430
430
431 elif len(changeid) == 40:
431 elif len(changeid) == 40:
432 try:
432 try:
433 self._node = bin(changeid)
433 self._node = bin(changeid)
434 self._rev = repo.changelog.rev(self._node)
434 self._rev = repo.changelog.rev(self._node)
435 return
435 return
436 except error.FilteredLookupError:
436 except error.FilteredLookupError:
437 raise
437 raise
438 except LookupError:
438 except LookupError:
439 pass
439 pass
440 else:
440 else:
441 raise error.ProgrammingError(
441 raise error.ProgrammingError(
442 "unsupported changeid '%s' of type %s" %
442 "unsupported changeid '%s' of type %s" %
443 (changeid, type(changeid)))
443 (changeid, type(changeid)))
444
444
445 except (error.FilteredIndexError, error.FilteredLookupError):
445 except (error.FilteredIndexError, error.FilteredLookupError):
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
447 % pycompat.bytestr(changeid))
447 % pycompat.bytestr(changeid))
448 except IndexError:
448 except IndexError:
449 pass
449 pass
450 raise error.RepoLookupError(
450 raise error.RepoLookupError(
451 _("unknown revision '%s'") % changeid)
451 _("unknown revision '%s'") % changeid)
452
452
453 def __hash__(self):
453 def __hash__(self):
454 try:
454 try:
455 return hash(self._rev)
455 return hash(self._rev)
456 except AttributeError:
456 except AttributeError:
457 return id(self)
457 return id(self)
458
458
459 def __nonzero__(self):
459 def __nonzero__(self):
460 return self._rev != nullrev
460 return self._rev != nullrev
461
461
462 __bool__ = __nonzero__
462 __bool__ = __nonzero__
463
463
464 @propertycache
464 @propertycache
465 def _changeset(self):
465 def _changeset(self):
466 return self._repo.changelog.changelogrevision(self.rev())
466 return self._repo.changelog.changelogrevision(self.rev())
467
467
468 @propertycache
468 @propertycache
469 def _manifest(self):
469 def _manifest(self):
470 return self._manifestctx.read()
470 return self._manifestctx.read()
471
471
472 @property
472 @property
473 def _manifestctx(self):
473 def _manifestctx(self):
474 return self._repo.manifestlog[self._changeset.manifest]
474 return self._repo.manifestlog[self._changeset.manifest]
475
475
476 @propertycache
476 @propertycache
477 def _manifestdelta(self):
477 def _manifestdelta(self):
478 return self._manifestctx.readdelta()
478 return self._manifestctx.readdelta()
479
479
480 @propertycache
480 @propertycache
481 def _parents(self):
481 def _parents(self):
482 repo = self._repo
482 repo = self._repo
483 p1, p2 = repo.changelog.parentrevs(self._rev)
483 p1, p2 = repo.changelog.parentrevs(self._rev)
484 if p2 == nullrev:
484 if p2 == nullrev:
485 return [changectx(repo, p1)]
485 return [repo[p1]]
486 return [changectx(repo, p1), changectx(repo, p2)]
486 return [repo[p1], repo[p2]]
487
487
488 def changeset(self):
488 def changeset(self):
489 c = self._changeset
489 c = self._changeset
490 return (
490 return (
491 c.manifest,
491 c.manifest,
492 c.user,
492 c.user,
493 c.date,
493 c.date,
494 c.files,
494 c.files,
495 c.description,
495 c.description,
496 c.extra,
496 c.extra,
497 )
497 )
498 def manifestnode(self):
498 def manifestnode(self):
499 return self._changeset.manifest
499 return self._changeset.manifest
500
500
501 def user(self):
501 def user(self):
502 return self._changeset.user
502 return self._changeset.user
503 def date(self):
503 def date(self):
504 return self._changeset.date
504 return self._changeset.date
505 def files(self):
505 def files(self):
506 return self._changeset.files
506 return self._changeset.files
507 def description(self):
507 def description(self):
508 return self._changeset.description
508 return self._changeset.description
509 def branch(self):
509 def branch(self):
510 return encoding.tolocal(self._changeset.extra.get("branch"))
510 return encoding.tolocal(self._changeset.extra.get("branch"))
511 def closesbranch(self):
511 def closesbranch(self):
512 return 'close' in self._changeset.extra
512 return 'close' in self._changeset.extra
513 def extra(self):
513 def extra(self):
514 """Return a dict of extra information."""
514 """Return a dict of extra information."""
515 return self._changeset.extra
515 return self._changeset.extra
516 def tags(self):
516 def tags(self):
517 """Return a list of byte tag names"""
517 """Return a list of byte tag names"""
518 return self._repo.nodetags(self._node)
518 return self._repo.nodetags(self._node)
519 def bookmarks(self):
519 def bookmarks(self):
520 """Return a list of byte bookmark names."""
520 """Return a list of byte bookmark names."""
521 return self._repo.nodebookmarks(self._node)
521 return self._repo.nodebookmarks(self._node)
522 def phase(self):
522 def phase(self):
523 return self._repo._phasecache.phase(self._repo, self._rev)
523 return self._repo._phasecache.phase(self._repo, self._rev)
524 def hidden(self):
524 def hidden(self):
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
526
526
527 def isinmemory(self):
527 def isinmemory(self):
528 return False
528 return False
529
529
530 def children(self):
530 def children(self):
531 """return list of changectx contexts for each child changeset.
531 """return list of changectx contexts for each child changeset.
532
532
533 This returns only the immediate child changesets. Use descendants() to
533 This returns only the immediate child changesets. Use descendants() to
534 recursively walk children.
534 recursively walk children.
535 """
535 """
536 c = self._repo.changelog.children(self._node)
536 c = self._repo.changelog.children(self._node)
537 return [changectx(self._repo, x) for x in c]
537 return [self._repo[x] for x in c]
538
538
539 def ancestors(self):
539 def ancestors(self):
540 for a in self._repo.changelog.ancestors([self._rev]):
540 for a in self._repo.changelog.ancestors([self._rev]):
541 yield changectx(self._repo, a)
541 yield self._repo[a]
542
542
543 def descendants(self):
543 def descendants(self):
544 """Recursively yield all children of the changeset.
544 """Recursively yield all children of the changeset.
545
545
546 For just the immediate children, use children()
546 For just the immediate children, use children()
547 """
547 """
548 for d in self._repo.changelog.descendants([self._rev]):
548 for d in self._repo.changelog.descendants([self._rev]):
549 yield changectx(self._repo, d)
549 yield self._repo[d]
550
550
551 def filectx(self, path, fileid=None, filelog=None):
551 def filectx(self, path, fileid=None, filelog=None):
552 """get a file context from this changeset"""
552 """get a file context from this changeset"""
553 if fileid is None:
553 if fileid is None:
554 fileid = self.filenode(path)
554 fileid = self.filenode(path)
555 return filectx(self._repo, path, fileid=fileid,
555 return filectx(self._repo, path, fileid=fileid,
556 changectx=self, filelog=filelog)
556 changectx=self, filelog=filelog)
557
557
558 def ancestor(self, c2, warn=False):
558 def ancestor(self, c2, warn=False):
559 """return the "best" ancestor context of self and c2
559 """return the "best" ancestor context of self and c2
560
560
561 If there are multiple candidates, it will show a message and check
561 If there are multiple candidates, it will show a message and check
562 merge.preferancestor configuration before falling back to the
562 merge.preferancestor configuration before falling back to the
563 revlog ancestor."""
563 revlog ancestor."""
564 # deal with workingctxs
564 # deal with workingctxs
565 n2 = c2._node
565 n2 = c2._node
566 if n2 is None:
566 if n2 is None:
567 n2 = c2._parents[0]._node
567 n2 = c2._parents[0]._node
568 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
568 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
569 if not cahs:
569 if not cahs:
570 anc = nullid
570 anc = nullid
571 elif len(cahs) == 1:
571 elif len(cahs) == 1:
572 anc = cahs[0]
572 anc = cahs[0]
573 else:
573 else:
574 # experimental config: merge.preferancestor
574 # experimental config: merge.preferancestor
575 for r in self._repo.ui.configlist('merge', 'preferancestor'):
575 for r in self._repo.ui.configlist('merge', 'preferancestor'):
576 try:
576 try:
577 ctx = scmutil.revsymbol(self._repo, r)
577 ctx = scmutil.revsymbol(self._repo, r)
578 except error.RepoLookupError:
578 except error.RepoLookupError:
579 continue
579 continue
580 anc = ctx.node()
580 anc = ctx.node()
581 if anc in cahs:
581 if anc in cahs:
582 break
582 break
583 else:
583 else:
584 anc = self._repo.changelog.ancestor(self._node, n2)
584 anc = self._repo.changelog.ancestor(self._node, n2)
585 if warn:
585 if warn:
586 self._repo.ui.status(
586 self._repo.ui.status(
587 (_("note: using %s as ancestor of %s and %s\n") %
587 (_("note: using %s as ancestor of %s and %s\n") %
588 (short(anc), short(self._node), short(n2))) +
588 (short(anc), short(self._node), short(n2))) +
589 ''.join(_(" alternatively, use --config "
589 ''.join(_(" alternatively, use --config "
590 "merge.preferancestor=%s\n") %
590 "merge.preferancestor=%s\n") %
591 short(n) for n in sorted(cahs) if n != anc))
591 short(n) for n in sorted(cahs) if n != anc))
592 return changectx(self._repo, anc)
592 return self._repo[anc]
593
593
594 def isancestorof(self, other):
594 def isancestorof(self, other):
595 """True if this changeset is an ancestor of other"""
595 """True if this changeset is an ancestor of other"""
596 return self._repo.changelog.isancestorrev(self._rev, other._rev)
596 return self._repo.changelog.isancestorrev(self._rev, other._rev)
597
597
598 def walk(self, match):
598 def walk(self, match):
599 '''Generates matching file names.'''
599 '''Generates matching file names.'''
600
600
601 # Wrap match.bad method to have message with nodeid
601 # Wrap match.bad method to have message with nodeid
602 def bad(fn, msg):
602 def bad(fn, msg):
603 # The manifest doesn't know about subrepos, so don't complain about
603 # The manifest doesn't know about subrepos, so don't complain about
604 # paths into valid subrepos.
604 # paths into valid subrepos.
605 if any(fn == s or fn.startswith(s + '/')
605 if any(fn == s or fn.startswith(s + '/')
606 for s in self.substate):
606 for s in self.substate):
607 return
607 return
608 match.bad(fn, _('no such file in rev %s') % self)
608 match.bad(fn, _('no such file in rev %s') % self)
609
609
610 m = matchmod.badmatch(match, bad)
610 m = matchmod.badmatch(match, bad)
611 return self._manifest.walk(m)
611 return self._manifest.walk(m)
612
612
613 def matches(self, match):
613 def matches(self, match):
614 return self.walk(match)
614 return self.walk(match)
615
615
616 class basefilectx(object):
616 class basefilectx(object):
617 """A filecontext object represents the common logic for its children:
617 """A filecontext object represents the common logic for its children:
618 filectx: read-only access to a filerevision that is already present
618 filectx: read-only access to a filerevision that is already present
619 in the repo,
619 in the repo,
620 workingfilectx: a filecontext that represents files from the working
620 workingfilectx: a filecontext that represents files from the working
621 directory,
621 directory,
622 memfilectx: a filecontext that represents files in-memory,
622 memfilectx: a filecontext that represents files in-memory,
623 """
623 """
624 @propertycache
624 @propertycache
625 def _filelog(self):
625 def _filelog(self):
626 return self._repo.file(self._path)
626 return self._repo.file(self._path)
627
627
628 @propertycache
628 @propertycache
629 def _changeid(self):
629 def _changeid(self):
630 if r'_changeid' in self.__dict__:
630 if r'_changeid' in self.__dict__:
631 return self._changeid
631 return self._changeid
632 elif r'_changectx' in self.__dict__:
632 elif r'_changectx' in self.__dict__:
633 return self._changectx.rev()
633 return self._changectx.rev()
634 elif r'_descendantrev' in self.__dict__:
634 elif r'_descendantrev' in self.__dict__:
635 # this file context was created from a revision with a known
635 # this file context was created from a revision with a known
636 # descendant, we can (lazily) correct for linkrev aliases
636 # descendant, we can (lazily) correct for linkrev aliases
637 return self._adjustlinkrev(self._descendantrev)
637 return self._adjustlinkrev(self._descendantrev)
638 else:
638 else:
639 return self._filelog.linkrev(self._filerev)
639 return self._filelog.linkrev(self._filerev)
640
640
641 @propertycache
641 @propertycache
642 def _filenode(self):
642 def _filenode(self):
643 if r'_fileid' in self.__dict__:
643 if r'_fileid' in self.__dict__:
644 return self._filelog.lookup(self._fileid)
644 return self._filelog.lookup(self._fileid)
645 else:
645 else:
646 return self._changectx.filenode(self._path)
646 return self._changectx.filenode(self._path)
647
647
648 @propertycache
648 @propertycache
649 def _filerev(self):
649 def _filerev(self):
650 return self._filelog.rev(self._filenode)
650 return self._filelog.rev(self._filenode)
651
651
652 @propertycache
652 @propertycache
653 def _repopath(self):
653 def _repopath(self):
654 return self._path
654 return self._path
655
655
656 def __nonzero__(self):
656 def __nonzero__(self):
657 try:
657 try:
658 self._filenode
658 self._filenode
659 return True
659 return True
660 except error.LookupError:
660 except error.LookupError:
661 # file is missing
661 # file is missing
662 return False
662 return False
663
663
664 __bool__ = __nonzero__
664 __bool__ = __nonzero__
665
665
666 def __bytes__(self):
666 def __bytes__(self):
667 try:
667 try:
668 return "%s@%s" % (self.path(), self._changectx)
668 return "%s@%s" % (self.path(), self._changectx)
669 except error.LookupError:
669 except error.LookupError:
670 return "%s@???" % self.path()
670 return "%s@???" % self.path()
671
671
672 __str__ = encoding.strmethod(__bytes__)
672 __str__ = encoding.strmethod(__bytes__)
673
673
674 def __repr__(self):
674 def __repr__(self):
675 return r"<%s %s>" % (type(self).__name__, str(self))
675 return r"<%s %s>" % (type(self).__name__, str(self))
676
676
677 def __hash__(self):
677 def __hash__(self):
678 try:
678 try:
679 return hash((self._path, self._filenode))
679 return hash((self._path, self._filenode))
680 except AttributeError:
680 except AttributeError:
681 return id(self)
681 return id(self)
682
682
683 def __eq__(self, other):
683 def __eq__(self, other):
684 try:
684 try:
685 return (type(self) == type(other) and self._path == other._path
685 return (type(self) == type(other) and self._path == other._path
686 and self._filenode == other._filenode)
686 and self._filenode == other._filenode)
687 except AttributeError:
687 except AttributeError:
688 return False
688 return False
689
689
690 def __ne__(self, other):
690 def __ne__(self, other):
691 return not (self == other)
691 return not (self == other)
692
692
693 def filerev(self):
693 def filerev(self):
694 return self._filerev
694 return self._filerev
695 def filenode(self):
695 def filenode(self):
696 return self._filenode
696 return self._filenode
697 @propertycache
697 @propertycache
698 def _flags(self):
698 def _flags(self):
699 return self._changectx.flags(self._path)
699 return self._changectx.flags(self._path)
700 def flags(self):
700 def flags(self):
701 return self._flags
701 return self._flags
702 def filelog(self):
702 def filelog(self):
703 return self._filelog
703 return self._filelog
704 def rev(self):
704 def rev(self):
705 return self._changeid
705 return self._changeid
706 def linkrev(self):
706 def linkrev(self):
707 return self._filelog.linkrev(self._filerev)
707 return self._filelog.linkrev(self._filerev)
708 def node(self):
708 def node(self):
709 return self._changectx.node()
709 return self._changectx.node()
710 def hex(self):
710 def hex(self):
711 return self._changectx.hex()
711 return self._changectx.hex()
712 def user(self):
712 def user(self):
713 return self._changectx.user()
713 return self._changectx.user()
714 def date(self):
714 def date(self):
715 return self._changectx.date()
715 return self._changectx.date()
716 def files(self):
716 def files(self):
717 return self._changectx.files()
717 return self._changectx.files()
718 def description(self):
718 def description(self):
719 return self._changectx.description()
719 return self._changectx.description()
720 def branch(self):
720 def branch(self):
721 return self._changectx.branch()
721 return self._changectx.branch()
722 def extra(self):
722 def extra(self):
723 return self._changectx.extra()
723 return self._changectx.extra()
724 def phase(self):
724 def phase(self):
725 return self._changectx.phase()
725 return self._changectx.phase()
726 def phasestr(self):
726 def phasestr(self):
727 return self._changectx.phasestr()
727 return self._changectx.phasestr()
728 def obsolete(self):
728 def obsolete(self):
729 return self._changectx.obsolete()
729 return self._changectx.obsolete()
730 def instabilities(self):
730 def instabilities(self):
731 return self._changectx.instabilities()
731 return self._changectx.instabilities()
732 def manifest(self):
732 def manifest(self):
733 return self._changectx.manifest()
733 return self._changectx.manifest()
734 def changectx(self):
734 def changectx(self):
735 return self._changectx
735 return self._changectx
736 def renamed(self):
736 def renamed(self):
737 return self._copied
737 return self._copied
738 def repo(self):
738 def repo(self):
739 return self._repo
739 return self._repo
740 def size(self):
740 def size(self):
741 return len(self.data())
741 return len(self.data())
742
742
743 def path(self):
743 def path(self):
744 return self._path
744 return self._path
745
745
746 def isbinary(self):
746 def isbinary(self):
747 try:
747 try:
748 return stringutil.binary(self.data())
748 return stringutil.binary(self.data())
749 except IOError:
749 except IOError:
750 return False
750 return False
751 def isexec(self):
751 def isexec(self):
752 return 'x' in self.flags()
752 return 'x' in self.flags()
753 def islink(self):
753 def islink(self):
754 return 'l' in self.flags()
754 return 'l' in self.flags()
755
755
756 def isabsent(self):
756 def isabsent(self):
757 """whether this filectx represents a file not in self._changectx
757 """whether this filectx represents a file not in self._changectx
758
758
759 This is mainly for merge code to detect change/delete conflicts. This is
759 This is mainly for merge code to detect change/delete conflicts. This is
760 expected to be True for all subclasses of basectx."""
760 expected to be True for all subclasses of basectx."""
761 return False
761 return False
762
762
763 _customcmp = False
763 _customcmp = False
764 def cmp(self, fctx):
764 def cmp(self, fctx):
765 """compare with other file context
765 """compare with other file context
766
766
767 returns True if different than fctx.
767 returns True if different than fctx.
768 """
768 """
769 if fctx._customcmp:
769 if fctx._customcmp:
770 return fctx.cmp(self)
770 return fctx.cmp(self)
771
771
772 if (fctx._filenode is None
772 if (fctx._filenode is None
773 and (self._repo._encodefilterpats
773 and (self._repo._encodefilterpats
774 # if file data starts with '\1\n', empty metadata block is
774 # if file data starts with '\1\n', empty metadata block is
775 # prepended, which adds 4 bytes to filelog.size().
775 # prepended, which adds 4 bytes to filelog.size().
776 or self.size() - 4 == fctx.size())
776 or self.size() - 4 == fctx.size())
777 or self.size() == fctx.size()):
777 or self.size() == fctx.size()):
778 return self._filelog.cmp(self._filenode, fctx.data())
778 return self._filelog.cmp(self._filenode, fctx.data())
779
779
780 return True
780 return True
781
781
782 def _adjustlinkrev(self, srcrev, inclusive=False):
782 def _adjustlinkrev(self, srcrev, inclusive=False):
783 """return the first ancestor of <srcrev> introducing <fnode>
783 """return the first ancestor of <srcrev> introducing <fnode>
784
784
785 If the linkrev of the file revision does not point to an ancestor of
785 If the linkrev of the file revision does not point to an ancestor of
786 srcrev, we'll walk down the ancestors until we find one introducing
786 srcrev, we'll walk down the ancestors until we find one introducing
787 this file revision.
787 this file revision.
788
788
789 :srcrev: the changeset revision we search ancestors from
789 :srcrev: the changeset revision we search ancestors from
790 :inclusive: if true, the src revision will also be checked
790 :inclusive: if true, the src revision will also be checked
791 """
791 """
792 repo = self._repo
792 repo = self._repo
793 cl = repo.unfiltered().changelog
793 cl = repo.unfiltered().changelog
794 mfl = repo.manifestlog
794 mfl = repo.manifestlog
795 # fetch the linkrev
795 # fetch the linkrev
796 lkr = self.linkrev()
796 lkr = self.linkrev()
797 # hack to reuse ancestor computation when searching for renames
797 # hack to reuse ancestor computation when searching for renames
798 memberanc = getattr(self, '_ancestrycontext', None)
798 memberanc = getattr(self, '_ancestrycontext', None)
799 iteranc = None
799 iteranc = None
800 if srcrev is None:
800 if srcrev is None:
801 # wctx case, used by workingfilectx during mergecopy
801 # wctx case, used by workingfilectx during mergecopy
802 revs = [p.rev() for p in self._repo[None].parents()]
802 revs = [p.rev() for p in self._repo[None].parents()]
803 inclusive = True # we skipped the real (revless) source
803 inclusive = True # we skipped the real (revless) source
804 else:
804 else:
805 revs = [srcrev]
805 revs = [srcrev]
806 if memberanc is None:
806 if memberanc is None:
807 memberanc = iteranc = cl.ancestors(revs, lkr,
807 memberanc = iteranc = cl.ancestors(revs, lkr,
808 inclusive=inclusive)
808 inclusive=inclusive)
809 # check if this linkrev is an ancestor of srcrev
809 # check if this linkrev is an ancestor of srcrev
810 if lkr not in memberanc:
810 if lkr not in memberanc:
811 if iteranc is None:
811 if iteranc is None:
812 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
812 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
813 fnode = self._filenode
813 fnode = self._filenode
814 path = self._path
814 path = self._path
815 for a in iteranc:
815 for a in iteranc:
816 ac = cl.read(a) # get changeset data (we avoid object creation)
816 ac = cl.read(a) # get changeset data (we avoid object creation)
817 if path in ac[3]: # checking the 'files' field.
817 if path in ac[3]: # checking the 'files' field.
818 # The file has been touched, check if the content is
818 # The file has been touched, check if the content is
819 # similar to the one we search for.
819 # similar to the one we search for.
820 if fnode == mfl[ac[0]].readfast().get(path):
820 if fnode == mfl[ac[0]].readfast().get(path):
821 return a
821 return a
822 # In theory, we should never get out of that loop without a result.
822 # In theory, we should never get out of that loop without a result.
823 # But if manifest uses a buggy file revision (not children of the
823 # But if manifest uses a buggy file revision (not children of the
824 # one it replaces) we could. Such a buggy situation will likely
824 # one it replaces) we could. Such a buggy situation will likely
825 # result is crash somewhere else at to some point.
825 # result is crash somewhere else at to some point.
826 return lkr
826 return lkr
827
827
828 def introrev(self):
828 def introrev(self):
829 """return the rev of the changeset which introduced this file revision
829 """return the rev of the changeset which introduced this file revision
830
830
831 This method is different from linkrev because it take into account the
831 This method is different from linkrev because it take into account the
832 changeset the filectx was created from. It ensures the returned
832 changeset the filectx was created from. It ensures the returned
833 revision is one of its ancestors. This prevents bugs from
833 revision is one of its ancestors. This prevents bugs from
834 'linkrev-shadowing' when a file revision is used by multiple
834 'linkrev-shadowing' when a file revision is used by multiple
835 changesets.
835 changesets.
836 """
836 """
837 lkr = self.linkrev()
837 lkr = self.linkrev()
838 attrs = vars(self)
838 attrs = vars(self)
839 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
839 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
840 if noctx or self.rev() == lkr:
840 if noctx or self.rev() == lkr:
841 return self.linkrev()
841 return self.linkrev()
842 return self._adjustlinkrev(self.rev(), inclusive=True)
842 return self._adjustlinkrev(self.rev(), inclusive=True)
843
843
844 def introfilectx(self):
844 def introfilectx(self):
845 """Return filectx having identical contents, but pointing to the
845 """Return filectx having identical contents, but pointing to the
846 changeset revision where this filectx was introduced"""
846 changeset revision where this filectx was introduced"""
847 introrev = self.introrev()
847 introrev = self.introrev()
848 if self.rev() == introrev:
848 if self.rev() == introrev:
849 return self
849 return self
850 return self.filectx(self.filenode(), changeid=introrev)
850 return self.filectx(self.filenode(), changeid=introrev)
851
851
852 def _parentfilectx(self, path, fileid, filelog):
852 def _parentfilectx(self, path, fileid, filelog):
853 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
853 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
854 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
854 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
855 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
855 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
856 # If self is associated with a changeset (probably explicitly
856 # If self is associated with a changeset (probably explicitly
857 # fed), ensure the created filectx is associated with a
857 # fed), ensure the created filectx is associated with a
858 # changeset that is an ancestor of self.changectx.
858 # changeset that is an ancestor of self.changectx.
859 # This lets us later use _adjustlinkrev to get a correct link.
859 # This lets us later use _adjustlinkrev to get a correct link.
860 fctx._descendantrev = self.rev()
860 fctx._descendantrev = self.rev()
861 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
861 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
862 elif r'_descendantrev' in vars(self):
862 elif r'_descendantrev' in vars(self):
863 # Otherwise propagate _descendantrev if we have one associated.
863 # Otherwise propagate _descendantrev if we have one associated.
864 fctx._descendantrev = self._descendantrev
864 fctx._descendantrev = self._descendantrev
865 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
865 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
866 return fctx
866 return fctx
867
867
868 def parents(self):
868 def parents(self):
869 _path = self._path
869 _path = self._path
870 fl = self._filelog
870 fl = self._filelog
871 parents = self._filelog.parents(self._filenode)
871 parents = self._filelog.parents(self._filenode)
872 pl = [(_path, node, fl) for node in parents if node != nullid]
872 pl = [(_path, node, fl) for node in parents if node != nullid]
873
873
874 r = fl.renamed(self._filenode)
874 r = fl.renamed(self._filenode)
875 if r:
875 if r:
876 # - In the simple rename case, both parent are nullid, pl is empty.
876 # - In the simple rename case, both parent are nullid, pl is empty.
877 # - In case of merge, only one of the parent is null id and should
877 # - In case of merge, only one of the parent is null id and should
878 # be replaced with the rename information. This parent is -always-
878 # be replaced with the rename information. This parent is -always-
879 # the first one.
879 # the first one.
880 #
880 #
881 # As null id have always been filtered out in the previous list
881 # As null id have always been filtered out in the previous list
882 # comprehension, inserting to 0 will always result in "replacing
882 # comprehension, inserting to 0 will always result in "replacing
883 # first nullid parent with rename information.
883 # first nullid parent with rename information.
884 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
884 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
885
885
886 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
886 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
887
887
888 def p1(self):
888 def p1(self):
889 return self.parents()[0]
889 return self.parents()[0]
890
890
891 def p2(self):
891 def p2(self):
892 p = self.parents()
892 p = self.parents()
893 if len(p) == 2:
893 if len(p) == 2:
894 return p[1]
894 return p[1]
895 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
895 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
896
896
897 def annotate(self, follow=False, skiprevs=None, diffopts=None):
897 def annotate(self, follow=False, skiprevs=None, diffopts=None):
898 """Returns a list of annotateline objects for each line in the file
898 """Returns a list of annotateline objects for each line in the file
899
899
900 - line.fctx is the filectx of the node where that line was last changed
900 - line.fctx is the filectx of the node where that line was last changed
901 - line.lineno is the line number at the first appearance in the managed
901 - line.lineno is the line number at the first appearance in the managed
902 file
902 file
903 - line.text is the data on that line (including newline character)
903 - line.text is the data on that line (including newline character)
904 """
904 """
905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
906
906
907 def parents(f):
907 def parents(f):
908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
911 # isn't an ancestor of the srcrev.
911 # isn't an ancestor of the srcrev.
912 f._changeid
912 f._changeid
913 pl = f.parents()
913 pl = f.parents()
914
914
915 # Don't return renamed parents if we aren't following.
915 # Don't return renamed parents if we aren't following.
916 if not follow:
916 if not follow:
917 pl = [p for p in pl if p.path() == f.path()]
917 pl = [p for p in pl if p.path() == f.path()]
918
918
919 # renamed filectx won't have a filelog yet, so set it
919 # renamed filectx won't have a filelog yet, so set it
920 # from the cache to save time
920 # from the cache to save time
921 for p in pl:
921 for p in pl:
922 if not r'_filelog' in p.__dict__:
922 if not r'_filelog' in p.__dict__:
923 p._filelog = getlog(p.path())
923 p._filelog = getlog(p.path())
924
924
925 return pl
925 return pl
926
926
927 # use linkrev to find the first changeset where self appeared
927 # use linkrev to find the first changeset where self appeared
928 base = self.introfilectx()
928 base = self.introfilectx()
929 if getattr(base, '_ancestrycontext', None) is None:
929 if getattr(base, '_ancestrycontext', None) is None:
930 cl = self._repo.changelog
930 cl = self._repo.changelog
931 if base.rev() is None:
931 if base.rev() is None:
932 # wctx is not inclusive, but works because _ancestrycontext
932 # wctx is not inclusive, but works because _ancestrycontext
933 # is used to test filelog revisions
933 # is used to test filelog revisions
934 ac = cl.ancestors([p.rev() for p in base.parents()],
934 ac = cl.ancestors([p.rev() for p in base.parents()],
935 inclusive=True)
935 inclusive=True)
936 else:
936 else:
937 ac = cl.ancestors([base.rev()], inclusive=True)
937 ac = cl.ancestors([base.rev()], inclusive=True)
938 base._ancestrycontext = ac
938 base._ancestrycontext = ac
939
939
940 return dagop.annotate(base, parents, skiprevs=skiprevs,
940 return dagop.annotate(base, parents, skiprevs=skiprevs,
941 diffopts=diffopts)
941 diffopts=diffopts)
942
942
943 def ancestors(self, followfirst=False):
943 def ancestors(self, followfirst=False):
944 visit = {}
944 visit = {}
945 c = self
945 c = self
946 if followfirst:
946 if followfirst:
947 cut = 1
947 cut = 1
948 else:
948 else:
949 cut = None
949 cut = None
950
950
951 while True:
951 while True:
952 for parent in c.parents()[:cut]:
952 for parent in c.parents()[:cut]:
953 visit[(parent.linkrev(), parent.filenode())] = parent
953 visit[(parent.linkrev(), parent.filenode())] = parent
954 if not visit:
954 if not visit:
955 break
955 break
956 c = visit.pop(max(visit))
956 c = visit.pop(max(visit))
957 yield c
957 yield c
958
958
959 def decodeddata(self):
959 def decodeddata(self):
960 """Returns `data()` after running repository decoding filters.
960 """Returns `data()` after running repository decoding filters.
961
961
962 This is often equivalent to how the data would be expressed on disk.
962 This is often equivalent to how the data would be expressed on disk.
963 """
963 """
964 return self._repo.wwritedata(self.path(), self.data())
964 return self._repo.wwritedata(self.path(), self.data())
965
965
966 class filectx(basefilectx):
966 class filectx(basefilectx):
967 """A filecontext object makes access to data related to a particular
967 """A filecontext object makes access to data related to a particular
968 filerevision convenient."""
968 filerevision convenient."""
969 def __init__(self, repo, path, changeid=None, fileid=None,
969 def __init__(self, repo, path, changeid=None, fileid=None,
970 filelog=None, changectx=None):
970 filelog=None, changectx=None):
971 """changeid can be a changeset revision, node, or tag.
971 """changeid can be a changeset revision, node, or tag.
972 fileid can be a file revision or node."""
972 fileid can be a file revision or node."""
973 self._repo = repo
973 self._repo = repo
974 self._path = path
974 self._path = path
975
975
976 assert (changeid is not None
976 assert (changeid is not None
977 or fileid is not None
977 or fileid is not None
978 or changectx is not None), \
978 or changectx is not None), \
979 ("bad args: changeid=%r, fileid=%r, changectx=%r"
979 ("bad args: changeid=%r, fileid=%r, changectx=%r"
980 % (changeid, fileid, changectx))
980 % (changeid, fileid, changectx))
981
981
982 if filelog is not None:
982 if filelog is not None:
983 self._filelog = filelog
983 self._filelog = filelog
984
984
985 if changeid is not None:
985 if changeid is not None:
986 self._changeid = changeid
986 self._changeid = changeid
987 if changectx is not None:
987 if changectx is not None:
988 self._changectx = changectx
988 self._changectx = changectx
989 if fileid is not None:
989 if fileid is not None:
990 self._fileid = fileid
990 self._fileid = fileid
991
991
992 @propertycache
992 @propertycache
993 def _changectx(self):
993 def _changectx(self):
994 try:
994 try:
995 return changectx(self._repo, self._changeid)
995 return self._repo[self._changeid]
996 except error.FilteredRepoLookupError:
996 except error.FilteredRepoLookupError:
997 # Linkrev may point to any revision in the repository. When the
997 # Linkrev may point to any revision in the repository. When the
998 # repository is filtered this may lead to `filectx` trying to build
998 # repository is filtered this may lead to `filectx` trying to build
999 # `changectx` for filtered revision. In such case we fallback to
999 # `changectx` for filtered revision. In such case we fallback to
1000 # creating `changectx` on the unfiltered version of the reposition.
1000 # creating `changectx` on the unfiltered version of the reposition.
1001 # This fallback should not be an issue because `changectx` from
1001 # This fallback should not be an issue because `changectx` from
1002 # `filectx` are not used in complex operations that care about
1002 # `filectx` are not used in complex operations that care about
1003 # filtering.
1003 # filtering.
1004 #
1004 #
1005 # This fallback is a cheap and dirty fix that prevent several
1005 # This fallback is a cheap and dirty fix that prevent several
1006 # crashes. It does not ensure the behavior is correct. However the
1006 # crashes. It does not ensure the behavior is correct. However the
1007 # behavior was not correct before filtering either and "incorrect
1007 # behavior was not correct before filtering either and "incorrect
1008 # behavior" is seen as better as "crash"
1008 # behavior" is seen as better as "crash"
1009 #
1009 #
1010 # Linkrevs have several serious troubles with filtering that are
1010 # Linkrevs have several serious troubles with filtering that are
1011 # complicated to solve. Proper handling of the issue here should be
1011 # complicated to solve. Proper handling of the issue here should be
1012 # considered when solving linkrev issue are on the table.
1012 # considered when solving linkrev issue are on the table.
1013 return changectx(self._repo.unfiltered(), self._changeid)
1013 return self._repo.unfiltered()[self._changeid]
1014
1014
1015 def filectx(self, fileid, changeid=None):
1015 def filectx(self, fileid, changeid=None):
1016 '''opens an arbitrary revision of the file without
1016 '''opens an arbitrary revision of the file without
1017 opening a new filelog'''
1017 opening a new filelog'''
1018 return filectx(self._repo, self._path, fileid=fileid,
1018 return filectx(self._repo, self._path, fileid=fileid,
1019 filelog=self._filelog, changeid=changeid)
1019 filelog=self._filelog, changeid=changeid)
1020
1020
1021 def rawdata(self):
1021 def rawdata(self):
1022 return self._filelog.revision(self._filenode, raw=True)
1022 return self._filelog.revision(self._filenode, raw=True)
1023
1023
1024 def rawflags(self):
1024 def rawflags(self):
1025 """low-level revlog flags"""
1025 """low-level revlog flags"""
1026 return self._filelog.flags(self._filerev)
1026 return self._filelog.flags(self._filerev)
1027
1027
1028 def data(self):
1028 def data(self):
1029 try:
1029 try:
1030 return self._filelog.read(self._filenode)
1030 return self._filelog.read(self._filenode)
1031 except error.CensoredNodeError:
1031 except error.CensoredNodeError:
1032 if self._repo.ui.config("censor", "policy") == "ignore":
1032 if self._repo.ui.config("censor", "policy") == "ignore":
1033 return ""
1033 return ""
1034 raise error.Abort(_("censored node: %s") % short(self._filenode),
1034 raise error.Abort(_("censored node: %s") % short(self._filenode),
1035 hint=_("set censor.policy to ignore errors"))
1035 hint=_("set censor.policy to ignore errors"))
1036
1036
1037 def size(self):
1037 def size(self):
1038 return self._filelog.size(self._filerev)
1038 return self._filelog.size(self._filerev)
1039
1039
1040 @propertycache
1040 @propertycache
1041 def _copied(self):
1041 def _copied(self):
1042 """check if file was actually renamed in this changeset revision
1042 """check if file was actually renamed in this changeset revision
1043
1043
1044 If rename logged in file revision, we report copy for changeset only
1044 If rename logged in file revision, we report copy for changeset only
1045 if file revisions linkrev points back to the changeset in question
1045 if file revisions linkrev points back to the changeset in question
1046 or both changeset parents contain different file revisions.
1046 or both changeset parents contain different file revisions.
1047 """
1047 """
1048
1048
1049 renamed = self._filelog.renamed(self._filenode)
1049 renamed = self._filelog.renamed(self._filenode)
1050 if not renamed:
1050 if not renamed:
1051 return None
1051 return None
1052
1052
1053 if self.rev() == self.linkrev():
1053 if self.rev() == self.linkrev():
1054 return renamed
1054 return renamed
1055
1055
1056 name = self.path()
1056 name = self.path()
1057 fnode = self._filenode
1057 fnode = self._filenode
1058 for p in self._changectx.parents():
1058 for p in self._changectx.parents():
1059 try:
1059 try:
1060 if fnode == p.filenode(name):
1060 if fnode == p.filenode(name):
1061 return None
1061 return None
1062 except error.LookupError:
1062 except error.LookupError:
1063 pass
1063 pass
1064 return renamed
1064 return renamed
1065
1065
1066 def children(self):
1066 def children(self):
1067 # hard for renames
1067 # hard for renames
1068 c = self._filelog.children(self._filenode)
1068 c = self._filelog.children(self._filenode)
1069 return [filectx(self._repo, self._path, fileid=x,
1069 return [filectx(self._repo, self._path, fileid=x,
1070 filelog=self._filelog) for x in c]
1070 filelog=self._filelog) for x in c]
1071
1071
1072 class committablectx(basectx):
1072 class committablectx(basectx):
1073 """A committablectx object provides common functionality for a context that
1073 """A committablectx object provides common functionality for a context that
1074 wants the ability to commit, e.g. workingctx or memctx."""
1074 wants the ability to commit, e.g. workingctx or memctx."""
1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1076 changes=None):
1076 changes=None):
1077 super(committablectx, self).__init__(repo)
1077 super(committablectx, self).__init__(repo)
1078 self._rev = None
1078 self._rev = None
1079 self._node = None
1079 self._node = None
1080 self._text = text
1080 self._text = text
1081 if date:
1081 if date:
1082 self._date = dateutil.parsedate(date)
1082 self._date = dateutil.parsedate(date)
1083 if user:
1083 if user:
1084 self._user = user
1084 self._user = user
1085 if changes:
1085 if changes:
1086 self._status = changes
1086 self._status = changes
1087
1087
1088 self._extra = {}
1088 self._extra = {}
1089 if extra:
1089 if extra:
1090 self._extra = extra.copy()
1090 self._extra = extra.copy()
1091 if 'branch' not in self._extra:
1091 if 'branch' not in self._extra:
1092 try:
1092 try:
1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1094 except UnicodeDecodeError:
1094 except UnicodeDecodeError:
1095 raise error.Abort(_('branch name not in UTF-8!'))
1095 raise error.Abort(_('branch name not in UTF-8!'))
1096 self._extra['branch'] = branch
1096 self._extra['branch'] = branch
1097 if self._extra['branch'] == '':
1097 if self._extra['branch'] == '':
1098 self._extra['branch'] = 'default'
1098 self._extra['branch'] = 'default'
1099
1099
1100 def __bytes__(self):
1100 def __bytes__(self):
1101 return bytes(self._parents[0]) + "+"
1101 return bytes(self._parents[0]) + "+"
1102
1102
1103 __str__ = encoding.strmethod(__bytes__)
1103 __str__ = encoding.strmethod(__bytes__)
1104
1104
1105 def __nonzero__(self):
1105 def __nonzero__(self):
1106 return True
1106 return True
1107
1107
1108 __bool__ = __nonzero__
1108 __bool__ = __nonzero__
1109
1109
1110 def _buildflagfunc(self):
1110 def _buildflagfunc(self):
1111 # Create a fallback function for getting file flags when the
1111 # Create a fallback function for getting file flags when the
1112 # filesystem doesn't support them
1112 # filesystem doesn't support them
1113
1113
1114 copiesget = self._repo.dirstate.copies().get
1114 copiesget = self._repo.dirstate.copies().get
1115 parents = self.parents()
1115 parents = self.parents()
1116 if len(parents) < 2:
1116 if len(parents) < 2:
1117 # when we have one parent, it's easy: copy from parent
1117 # when we have one parent, it's easy: copy from parent
1118 man = parents[0].manifest()
1118 man = parents[0].manifest()
1119 def func(f):
1119 def func(f):
1120 f = copiesget(f, f)
1120 f = copiesget(f, f)
1121 return man.flags(f)
1121 return man.flags(f)
1122 else:
1122 else:
1123 # merges are tricky: we try to reconstruct the unstored
1123 # merges are tricky: we try to reconstruct the unstored
1124 # result from the merge (issue1802)
1124 # result from the merge (issue1802)
1125 p1, p2 = parents
1125 p1, p2 = parents
1126 pa = p1.ancestor(p2)
1126 pa = p1.ancestor(p2)
1127 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1127 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1128
1128
1129 def func(f):
1129 def func(f):
1130 f = copiesget(f, f) # may be wrong for merges with copies
1130 f = copiesget(f, f) # may be wrong for merges with copies
1131 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1131 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1132 if fl1 == fl2:
1132 if fl1 == fl2:
1133 return fl1
1133 return fl1
1134 if fl1 == fla:
1134 if fl1 == fla:
1135 return fl2
1135 return fl2
1136 if fl2 == fla:
1136 if fl2 == fla:
1137 return fl1
1137 return fl1
1138 return '' # punt for conflicts
1138 return '' # punt for conflicts
1139
1139
1140 return func
1140 return func
1141
1141
1142 @propertycache
1142 @propertycache
1143 def _flagfunc(self):
1143 def _flagfunc(self):
1144 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1144 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1145
1145
1146 @propertycache
1146 @propertycache
1147 def _status(self):
1147 def _status(self):
1148 return self._repo.status()
1148 return self._repo.status()
1149
1149
1150 @propertycache
1150 @propertycache
1151 def _user(self):
1151 def _user(self):
1152 return self._repo.ui.username()
1152 return self._repo.ui.username()
1153
1153
1154 @propertycache
1154 @propertycache
1155 def _date(self):
1155 def _date(self):
1156 ui = self._repo.ui
1156 ui = self._repo.ui
1157 date = ui.configdate('devel', 'default-date')
1157 date = ui.configdate('devel', 'default-date')
1158 if date is None:
1158 if date is None:
1159 date = dateutil.makedate()
1159 date = dateutil.makedate()
1160 return date
1160 return date
1161
1161
1162 def subrev(self, subpath):
1162 def subrev(self, subpath):
1163 return None
1163 return None
1164
1164
1165 def manifestnode(self):
1165 def manifestnode(self):
1166 return None
1166 return None
1167 def user(self):
1167 def user(self):
1168 return self._user or self._repo.ui.username()
1168 return self._user or self._repo.ui.username()
1169 def date(self):
1169 def date(self):
1170 return self._date
1170 return self._date
1171 def description(self):
1171 def description(self):
1172 return self._text
1172 return self._text
1173 def files(self):
1173 def files(self):
1174 return sorted(self._status.modified + self._status.added +
1174 return sorted(self._status.modified + self._status.added +
1175 self._status.removed)
1175 self._status.removed)
1176
1176
1177 def modified(self):
1177 def modified(self):
1178 return self._status.modified
1178 return self._status.modified
1179 def added(self):
1179 def added(self):
1180 return self._status.added
1180 return self._status.added
1181 def removed(self):
1181 def removed(self):
1182 return self._status.removed
1182 return self._status.removed
1183 def deleted(self):
1183 def deleted(self):
1184 return self._status.deleted
1184 return self._status.deleted
1185 def branch(self):
1185 def branch(self):
1186 return encoding.tolocal(self._extra['branch'])
1186 return encoding.tolocal(self._extra['branch'])
1187 def closesbranch(self):
1187 def closesbranch(self):
1188 return 'close' in self._extra
1188 return 'close' in self._extra
1189 def extra(self):
1189 def extra(self):
1190 return self._extra
1190 return self._extra
1191
1191
1192 def isinmemory(self):
1192 def isinmemory(self):
1193 return False
1193 return False
1194
1194
1195 def tags(self):
1195 def tags(self):
1196 return []
1196 return []
1197
1197
1198 def bookmarks(self):
1198 def bookmarks(self):
1199 b = []
1199 b = []
1200 for p in self.parents():
1200 for p in self.parents():
1201 b.extend(p.bookmarks())
1201 b.extend(p.bookmarks())
1202 return b
1202 return b
1203
1203
1204 def phase(self):
1204 def phase(self):
1205 phase = phases.draft # default phase to draft
1205 phase = phases.draft # default phase to draft
1206 for p in self.parents():
1206 for p in self.parents():
1207 phase = max(phase, p.phase())
1207 phase = max(phase, p.phase())
1208 return phase
1208 return phase
1209
1209
1210 def hidden(self):
1210 def hidden(self):
1211 return False
1211 return False
1212
1212
1213 def children(self):
1213 def children(self):
1214 return []
1214 return []
1215
1215
1216 def flags(self, path):
1216 def flags(self, path):
1217 if r'_manifest' in self.__dict__:
1217 if r'_manifest' in self.__dict__:
1218 try:
1218 try:
1219 return self._manifest.flags(path)
1219 return self._manifest.flags(path)
1220 except KeyError:
1220 except KeyError:
1221 return ''
1221 return ''
1222
1222
1223 try:
1223 try:
1224 return self._flagfunc(path)
1224 return self._flagfunc(path)
1225 except OSError:
1225 except OSError:
1226 return ''
1226 return ''
1227
1227
1228 def ancestor(self, c2):
1228 def ancestor(self, c2):
1229 """return the "best" ancestor context of self and c2"""
1229 """return the "best" ancestor context of self and c2"""
1230 return self._parents[0].ancestor(c2) # punt on two parents for now
1230 return self._parents[0].ancestor(c2) # punt on two parents for now
1231
1231
1232 def walk(self, match):
1232 def walk(self, match):
1233 '''Generates matching file names.'''
1233 '''Generates matching file names.'''
1234 return sorted(self._repo.dirstate.walk(match,
1234 return sorted(self._repo.dirstate.walk(match,
1235 subrepos=sorted(self.substate),
1235 subrepos=sorted(self.substate),
1236 unknown=True, ignored=False))
1236 unknown=True, ignored=False))
1237
1237
1238 def matches(self, match):
1238 def matches(self, match):
1239 ds = self._repo.dirstate
1239 ds = self._repo.dirstate
1240 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1240 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1241
1241
1242 def ancestors(self):
1242 def ancestors(self):
1243 for p in self._parents:
1243 for p in self._parents:
1244 yield p
1244 yield p
1245 for a in self._repo.changelog.ancestors(
1245 for a in self._repo.changelog.ancestors(
1246 [p.rev() for p in self._parents]):
1246 [p.rev() for p in self._parents]):
1247 yield changectx(self._repo, a)
1247 yield self._repo[a]
1248
1248
1249 def markcommitted(self, node):
1249 def markcommitted(self, node):
1250 """Perform post-commit cleanup necessary after committing this ctx
1250 """Perform post-commit cleanup necessary after committing this ctx
1251
1251
1252 Specifically, this updates backing stores this working context
1252 Specifically, this updates backing stores this working context
1253 wraps to reflect the fact that the changes reflected by this
1253 wraps to reflect the fact that the changes reflected by this
1254 workingctx have been committed. For example, it marks
1254 workingctx have been committed. For example, it marks
1255 modified and added files as normal in the dirstate.
1255 modified and added files as normal in the dirstate.
1256
1256
1257 """
1257 """
1258
1258
1259 with self._repo.dirstate.parentchange():
1259 with self._repo.dirstate.parentchange():
1260 for f in self.modified() + self.added():
1260 for f in self.modified() + self.added():
1261 self._repo.dirstate.normal(f)
1261 self._repo.dirstate.normal(f)
1262 for f in self.removed():
1262 for f in self.removed():
1263 self._repo.dirstate.drop(f)
1263 self._repo.dirstate.drop(f)
1264 self._repo.dirstate.setparents(node)
1264 self._repo.dirstate.setparents(node)
1265
1265
1266 # write changes out explicitly, because nesting wlock at
1266 # write changes out explicitly, because nesting wlock at
1267 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1267 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1268 # from immediately doing so for subsequent changing files
1268 # from immediately doing so for subsequent changing files
1269 self._repo.dirstate.write(self._repo.currenttransaction())
1269 self._repo.dirstate.write(self._repo.currenttransaction())
1270
1270
1271 def dirty(self, missing=False, merge=True, branch=True):
1271 def dirty(self, missing=False, merge=True, branch=True):
1272 return False
1272 return False
1273
1273
1274 class workingctx(committablectx):
1274 class workingctx(committablectx):
1275 """A workingctx object makes access to data related to
1275 """A workingctx object makes access to data related to
1276 the current working directory convenient.
1276 the current working directory convenient.
1277 date - any valid date string or (unixtime, offset), or None.
1277 date - any valid date string or (unixtime, offset), or None.
1278 user - username string, or None.
1278 user - username string, or None.
1279 extra - a dictionary of extra values, or None.
1279 extra - a dictionary of extra values, or None.
1280 changes - a list of file lists as returned by localrepo.status()
1280 changes - a list of file lists as returned by localrepo.status()
1281 or None to use the repository status.
1281 or None to use the repository status.
1282 """
1282 """
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 changes=None):
1284 changes=None):
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286
1286
1287 def __iter__(self):
1287 def __iter__(self):
1288 d = self._repo.dirstate
1288 d = self._repo.dirstate
1289 for f in d:
1289 for f in d:
1290 if d[f] != 'r':
1290 if d[f] != 'r':
1291 yield f
1291 yield f
1292
1292
1293 def __contains__(self, key):
1293 def __contains__(self, key):
1294 return self._repo.dirstate[key] not in "?r"
1294 return self._repo.dirstate[key] not in "?r"
1295
1295
1296 def hex(self):
1296 def hex(self):
1297 return hex(wdirid)
1297 return hex(wdirid)
1298
1298
1299 @propertycache
1299 @propertycache
1300 def _parents(self):
1300 def _parents(self):
1301 p = self._repo.dirstate.parents()
1301 p = self._repo.dirstate.parents()
1302 if p[1] == nullid:
1302 if p[1] == nullid:
1303 p = p[:-1]
1303 p = p[:-1]
1304 return [changectx(self._repo, x) for x in p]
1304 return [self._repo[x] for x in p]
1305
1305
1306 def _fileinfo(self, path):
1306 def _fileinfo(self, path):
1307 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1307 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1308 self._manifest
1308 self._manifest
1309 return super(workingctx, self)._fileinfo(path)
1309 return super(workingctx, self)._fileinfo(path)
1310
1310
1311 def filectx(self, path, filelog=None):
1311 def filectx(self, path, filelog=None):
1312 """get a file context from the working directory"""
1312 """get a file context from the working directory"""
1313 return workingfilectx(self._repo, path, workingctx=self,
1313 return workingfilectx(self._repo, path, workingctx=self,
1314 filelog=filelog)
1314 filelog=filelog)
1315
1315
1316 def dirty(self, missing=False, merge=True, branch=True):
1316 def dirty(self, missing=False, merge=True, branch=True):
1317 "check whether a working directory is modified"
1317 "check whether a working directory is modified"
1318 # check subrepos first
1318 # check subrepos first
1319 for s in sorted(self.substate):
1319 for s in sorted(self.substate):
1320 if self.sub(s).dirty(missing=missing):
1320 if self.sub(s).dirty(missing=missing):
1321 return True
1321 return True
1322 # check current working dir
1322 # check current working dir
1323 return ((merge and self.p2()) or
1323 return ((merge and self.p2()) or
1324 (branch and self.branch() != self.p1().branch()) or
1324 (branch and self.branch() != self.p1().branch()) or
1325 self.modified() or self.added() or self.removed() or
1325 self.modified() or self.added() or self.removed() or
1326 (missing and self.deleted()))
1326 (missing and self.deleted()))
1327
1327
1328 def add(self, list, prefix=""):
1328 def add(self, list, prefix=""):
1329 with self._repo.wlock():
1329 with self._repo.wlock():
1330 ui, ds = self._repo.ui, self._repo.dirstate
1330 ui, ds = self._repo.ui, self._repo.dirstate
1331 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1331 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1332 rejected = []
1332 rejected = []
1333 lstat = self._repo.wvfs.lstat
1333 lstat = self._repo.wvfs.lstat
1334 for f in list:
1334 for f in list:
1335 # ds.pathto() returns an absolute file when this is invoked from
1335 # ds.pathto() returns an absolute file when this is invoked from
1336 # the keyword extension. That gets flagged as non-portable on
1336 # the keyword extension. That gets flagged as non-portable on
1337 # Windows, since it contains the drive letter and colon.
1337 # Windows, since it contains the drive letter and colon.
1338 scmutil.checkportable(ui, os.path.join(prefix, f))
1338 scmutil.checkportable(ui, os.path.join(prefix, f))
1339 try:
1339 try:
1340 st = lstat(f)
1340 st = lstat(f)
1341 except OSError:
1341 except OSError:
1342 ui.warn(_("%s does not exist!\n") % uipath(f))
1342 ui.warn(_("%s does not exist!\n") % uipath(f))
1343 rejected.append(f)
1343 rejected.append(f)
1344 continue
1344 continue
1345 limit = ui.configbytes('ui', 'large-file-limit')
1345 limit = ui.configbytes('ui', 'large-file-limit')
1346 if limit != 0 and st.st_size > limit:
1346 if limit != 0 and st.st_size > limit:
1347 ui.warn(_("%s: up to %d MB of RAM may be required "
1347 ui.warn(_("%s: up to %d MB of RAM may be required "
1348 "to manage this file\n"
1348 "to manage this file\n"
1349 "(use 'hg revert %s' to cancel the "
1349 "(use 'hg revert %s' to cancel the "
1350 "pending addition)\n")
1350 "pending addition)\n")
1351 % (f, 3 * st.st_size // 1000000, uipath(f)))
1351 % (f, 3 * st.st_size // 1000000, uipath(f)))
1352 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1352 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1353 ui.warn(_("%s not added: only files and symlinks "
1353 ui.warn(_("%s not added: only files and symlinks "
1354 "supported currently\n") % uipath(f))
1354 "supported currently\n") % uipath(f))
1355 rejected.append(f)
1355 rejected.append(f)
1356 elif ds[f] in 'amn':
1356 elif ds[f] in 'amn':
1357 ui.warn(_("%s already tracked!\n") % uipath(f))
1357 ui.warn(_("%s already tracked!\n") % uipath(f))
1358 elif ds[f] == 'r':
1358 elif ds[f] == 'r':
1359 ds.normallookup(f)
1359 ds.normallookup(f)
1360 else:
1360 else:
1361 ds.add(f)
1361 ds.add(f)
1362 return rejected
1362 return rejected
1363
1363
1364 def forget(self, files, prefix=""):
1364 def forget(self, files, prefix=""):
1365 with self._repo.wlock():
1365 with self._repo.wlock():
1366 ds = self._repo.dirstate
1366 ds = self._repo.dirstate
1367 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1367 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1368 rejected = []
1368 rejected = []
1369 for f in files:
1369 for f in files:
1370 if f not in self._repo.dirstate:
1370 if f not in self._repo.dirstate:
1371 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1371 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1372 rejected.append(f)
1372 rejected.append(f)
1373 elif self._repo.dirstate[f] != 'a':
1373 elif self._repo.dirstate[f] != 'a':
1374 self._repo.dirstate.remove(f)
1374 self._repo.dirstate.remove(f)
1375 else:
1375 else:
1376 self._repo.dirstate.drop(f)
1376 self._repo.dirstate.drop(f)
1377 return rejected
1377 return rejected
1378
1378
1379 def undelete(self, list):
1379 def undelete(self, list):
1380 pctxs = self.parents()
1380 pctxs = self.parents()
1381 with self._repo.wlock():
1381 with self._repo.wlock():
1382 ds = self._repo.dirstate
1382 ds = self._repo.dirstate
1383 for f in list:
1383 for f in list:
1384 if self._repo.dirstate[f] != 'r':
1384 if self._repo.dirstate[f] != 'r':
1385 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1385 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1386 else:
1386 else:
1387 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1387 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1388 t = fctx.data()
1388 t = fctx.data()
1389 self._repo.wwrite(f, t, fctx.flags())
1389 self._repo.wwrite(f, t, fctx.flags())
1390 self._repo.dirstate.normal(f)
1390 self._repo.dirstate.normal(f)
1391
1391
1392 def copy(self, source, dest):
1392 def copy(self, source, dest):
1393 try:
1393 try:
1394 st = self._repo.wvfs.lstat(dest)
1394 st = self._repo.wvfs.lstat(dest)
1395 except OSError as err:
1395 except OSError as err:
1396 if err.errno != errno.ENOENT:
1396 if err.errno != errno.ENOENT:
1397 raise
1397 raise
1398 self._repo.ui.warn(_("%s does not exist!\n")
1398 self._repo.ui.warn(_("%s does not exist!\n")
1399 % self._repo.dirstate.pathto(dest))
1399 % self._repo.dirstate.pathto(dest))
1400 return
1400 return
1401 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1401 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1402 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1402 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1403 "symbolic link\n")
1403 "symbolic link\n")
1404 % self._repo.dirstate.pathto(dest))
1404 % self._repo.dirstate.pathto(dest))
1405 else:
1405 else:
1406 with self._repo.wlock():
1406 with self._repo.wlock():
1407 if self._repo.dirstate[dest] in '?':
1407 if self._repo.dirstate[dest] in '?':
1408 self._repo.dirstate.add(dest)
1408 self._repo.dirstate.add(dest)
1409 elif self._repo.dirstate[dest] in 'r':
1409 elif self._repo.dirstate[dest] in 'r':
1410 self._repo.dirstate.normallookup(dest)
1410 self._repo.dirstate.normallookup(dest)
1411 self._repo.dirstate.copy(source, dest)
1411 self._repo.dirstate.copy(source, dest)
1412
1412
1413 def match(self, pats=None, include=None, exclude=None, default='glob',
1413 def match(self, pats=None, include=None, exclude=None, default='glob',
1414 listsubrepos=False, badfn=None):
1414 listsubrepos=False, badfn=None):
1415 r = self._repo
1415 r = self._repo
1416
1416
1417 # Only a case insensitive filesystem needs magic to translate user input
1417 # Only a case insensitive filesystem needs magic to translate user input
1418 # to actual case in the filesystem.
1418 # to actual case in the filesystem.
1419 icasefs = not util.fscasesensitive(r.root)
1419 icasefs = not util.fscasesensitive(r.root)
1420 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1420 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1421 default, auditor=r.auditor, ctx=self,
1421 default, auditor=r.auditor, ctx=self,
1422 listsubrepos=listsubrepos, badfn=badfn,
1422 listsubrepos=listsubrepos, badfn=badfn,
1423 icasefs=icasefs)
1423 icasefs=icasefs)
1424
1424
1425 def _filtersuspectsymlink(self, files):
1425 def _filtersuspectsymlink(self, files):
1426 if not files or self._repo.dirstate._checklink:
1426 if not files or self._repo.dirstate._checklink:
1427 return files
1427 return files
1428
1428
1429 # Symlink placeholders may get non-symlink-like contents
1429 # Symlink placeholders may get non-symlink-like contents
1430 # via user error or dereferencing by NFS or Samba servers,
1430 # via user error or dereferencing by NFS or Samba servers,
1431 # so we filter out any placeholders that don't look like a
1431 # so we filter out any placeholders that don't look like a
1432 # symlink
1432 # symlink
1433 sane = []
1433 sane = []
1434 for f in files:
1434 for f in files:
1435 if self.flags(f) == 'l':
1435 if self.flags(f) == 'l':
1436 d = self[f].data()
1436 d = self[f].data()
1437 if (d == '' or len(d) >= 1024 or '\n' in d
1437 if (d == '' or len(d) >= 1024 or '\n' in d
1438 or stringutil.binary(d)):
1438 or stringutil.binary(d)):
1439 self._repo.ui.debug('ignoring suspect symlink placeholder'
1439 self._repo.ui.debug('ignoring suspect symlink placeholder'
1440 ' "%s"\n' % f)
1440 ' "%s"\n' % f)
1441 continue
1441 continue
1442 sane.append(f)
1442 sane.append(f)
1443 return sane
1443 return sane
1444
1444
1445 def _checklookup(self, files):
1445 def _checklookup(self, files):
1446 # check for any possibly clean files
1446 # check for any possibly clean files
1447 if not files:
1447 if not files:
1448 return [], [], []
1448 return [], [], []
1449
1449
1450 modified = []
1450 modified = []
1451 deleted = []
1451 deleted = []
1452 fixup = []
1452 fixup = []
1453 pctx = self._parents[0]
1453 pctx = self._parents[0]
1454 # do a full compare of any files that might have changed
1454 # do a full compare of any files that might have changed
1455 for f in sorted(files):
1455 for f in sorted(files):
1456 try:
1456 try:
1457 # This will return True for a file that got replaced by a
1457 # This will return True for a file that got replaced by a
1458 # directory in the interim, but fixing that is pretty hard.
1458 # directory in the interim, but fixing that is pretty hard.
1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1460 or pctx[f].cmp(self[f])):
1460 or pctx[f].cmp(self[f])):
1461 modified.append(f)
1461 modified.append(f)
1462 else:
1462 else:
1463 fixup.append(f)
1463 fixup.append(f)
1464 except (IOError, OSError):
1464 except (IOError, OSError):
1465 # A file become inaccessible in between? Mark it as deleted,
1465 # A file become inaccessible in between? Mark it as deleted,
1466 # matching dirstate behavior (issue5584).
1466 # matching dirstate behavior (issue5584).
1467 # The dirstate has more complex behavior around whether a
1467 # The dirstate has more complex behavior around whether a
1468 # missing file matches a directory, etc, but we don't need to
1468 # missing file matches a directory, etc, but we don't need to
1469 # bother with that: if f has made it to this point, we're sure
1469 # bother with that: if f has made it to this point, we're sure
1470 # it's in the dirstate.
1470 # it's in the dirstate.
1471 deleted.append(f)
1471 deleted.append(f)
1472
1472
1473 return modified, deleted, fixup
1473 return modified, deleted, fixup
1474
1474
1475 def _poststatusfixup(self, status, fixup):
1475 def _poststatusfixup(self, status, fixup):
1476 """update dirstate for files that are actually clean"""
1476 """update dirstate for files that are actually clean"""
1477 poststatus = self._repo.postdsstatus()
1477 poststatus = self._repo.postdsstatus()
1478 if fixup or poststatus:
1478 if fixup or poststatus:
1479 try:
1479 try:
1480 oldid = self._repo.dirstate.identity()
1480 oldid = self._repo.dirstate.identity()
1481
1481
1482 # updating the dirstate is optional
1482 # updating the dirstate is optional
1483 # so we don't wait on the lock
1483 # so we don't wait on the lock
1484 # wlock can invalidate the dirstate, so cache normal _after_
1484 # wlock can invalidate the dirstate, so cache normal _after_
1485 # taking the lock
1485 # taking the lock
1486 with self._repo.wlock(False):
1486 with self._repo.wlock(False):
1487 if self._repo.dirstate.identity() == oldid:
1487 if self._repo.dirstate.identity() == oldid:
1488 if fixup:
1488 if fixup:
1489 normal = self._repo.dirstate.normal
1489 normal = self._repo.dirstate.normal
1490 for f in fixup:
1490 for f in fixup:
1491 normal(f)
1491 normal(f)
1492 # write changes out explicitly, because nesting
1492 # write changes out explicitly, because nesting
1493 # wlock at runtime may prevent 'wlock.release()'
1493 # wlock at runtime may prevent 'wlock.release()'
1494 # after this block from doing so for subsequent
1494 # after this block from doing so for subsequent
1495 # changing files
1495 # changing files
1496 tr = self._repo.currenttransaction()
1496 tr = self._repo.currenttransaction()
1497 self._repo.dirstate.write(tr)
1497 self._repo.dirstate.write(tr)
1498
1498
1499 if poststatus:
1499 if poststatus:
1500 for ps in poststatus:
1500 for ps in poststatus:
1501 ps(self, status)
1501 ps(self, status)
1502 else:
1502 else:
1503 # in this case, writing changes out breaks
1503 # in this case, writing changes out breaks
1504 # consistency, because .hg/dirstate was
1504 # consistency, because .hg/dirstate was
1505 # already changed simultaneously after last
1505 # already changed simultaneously after last
1506 # caching (see also issue5584 for detail)
1506 # caching (see also issue5584 for detail)
1507 self._repo.ui.debug('skip updating dirstate: '
1507 self._repo.ui.debug('skip updating dirstate: '
1508 'identity mismatch\n')
1508 'identity mismatch\n')
1509 except error.LockError:
1509 except error.LockError:
1510 pass
1510 pass
1511 finally:
1511 finally:
1512 # Even if the wlock couldn't be grabbed, clear out the list.
1512 # Even if the wlock couldn't be grabbed, clear out the list.
1513 self._repo.clearpostdsstatus()
1513 self._repo.clearpostdsstatus()
1514
1514
1515 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1515 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1516 '''Gets the status from the dirstate -- internal use only.'''
1516 '''Gets the status from the dirstate -- internal use only.'''
1517 subrepos = []
1517 subrepos = []
1518 if '.hgsub' in self:
1518 if '.hgsub' in self:
1519 subrepos = sorted(self.substate)
1519 subrepos = sorted(self.substate)
1520 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1520 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1521 clean=clean, unknown=unknown)
1521 clean=clean, unknown=unknown)
1522
1522
1523 # check for any possibly clean files
1523 # check for any possibly clean files
1524 fixup = []
1524 fixup = []
1525 if cmp:
1525 if cmp:
1526 modified2, deleted2, fixup = self._checklookup(cmp)
1526 modified2, deleted2, fixup = self._checklookup(cmp)
1527 s.modified.extend(modified2)
1527 s.modified.extend(modified2)
1528 s.deleted.extend(deleted2)
1528 s.deleted.extend(deleted2)
1529
1529
1530 if fixup and clean:
1530 if fixup and clean:
1531 s.clean.extend(fixup)
1531 s.clean.extend(fixup)
1532
1532
1533 self._poststatusfixup(s, fixup)
1533 self._poststatusfixup(s, fixup)
1534
1534
1535 if match.always():
1535 if match.always():
1536 # cache for performance
1536 # cache for performance
1537 if s.unknown or s.ignored or s.clean:
1537 if s.unknown or s.ignored or s.clean:
1538 # "_status" is cached with list*=False in the normal route
1538 # "_status" is cached with list*=False in the normal route
1539 self._status = scmutil.status(s.modified, s.added, s.removed,
1539 self._status = scmutil.status(s.modified, s.added, s.removed,
1540 s.deleted, [], [], [])
1540 s.deleted, [], [], [])
1541 else:
1541 else:
1542 self._status = s
1542 self._status = s
1543
1543
1544 return s
1544 return s
1545
1545
1546 @propertycache
1546 @propertycache
1547 def _manifest(self):
1547 def _manifest(self):
1548 """generate a manifest corresponding to the values in self._status
1548 """generate a manifest corresponding to the values in self._status
1549
1549
1550 This reuse the file nodeid from parent, but we use special node
1550 This reuse the file nodeid from parent, but we use special node
1551 identifiers for added and modified files. This is used by manifests
1551 identifiers for added and modified files. This is used by manifests
1552 merge to see that files are different and by update logic to avoid
1552 merge to see that files are different and by update logic to avoid
1553 deleting newly added files.
1553 deleting newly added files.
1554 """
1554 """
1555 return self._buildstatusmanifest(self._status)
1555 return self._buildstatusmanifest(self._status)
1556
1556
1557 def _buildstatusmanifest(self, status):
1557 def _buildstatusmanifest(self, status):
1558 """Builds a manifest that includes the given status results."""
1558 """Builds a manifest that includes the given status results."""
1559 parents = self.parents()
1559 parents = self.parents()
1560
1560
1561 man = parents[0].manifest().copy()
1561 man = parents[0].manifest().copy()
1562
1562
1563 ff = self._flagfunc
1563 ff = self._flagfunc
1564 for i, l in ((addednodeid, status.added),
1564 for i, l in ((addednodeid, status.added),
1565 (modifiednodeid, status.modified)):
1565 (modifiednodeid, status.modified)):
1566 for f in l:
1566 for f in l:
1567 man[f] = i
1567 man[f] = i
1568 try:
1568 try:
1569 man.setflag(f, ff(f))
1569 man.setflag(f, ff(f))
1570 except OSError:
1570 except OSError:
1571 pass
1571 pass
1572
1572
1573 for f in status.deleted + status.removed:
1573 for f in status.deleted + status.removed:
1574 if f in man:
1574 if f in man:
1575 del man[f]
1575 del man[f]
1576
1576
1577 return man
1577 return man
1578
1578
1579 def _buildstatus(self, other, s, match, listignored, listclean,
1579 def _buildstatus(self, other, s, match, listignored, listclean,
1580 listunknown):
1580 listunknown):
1581 """build a status with respect to another context
1581 """build a status with respect to another context
1582
1582
1583 This includes logic for maintaining the fast path of status when
1583 This includes logic for maintaining the fast path of status when
1584 comparing the working directory against its parent, which is to skip
1584 comparing the working directory against its parent, which is to skip
1585 building a new manifest if self (working directory) is not comparing
1585 building a new manifest if self (working directory) is not comparing
1586 against its parent (repo['.']).
1586 against its parent (repo['.']).
1587 """
1587 """
1588 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1588 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1589 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1589 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1590 # might have accidentally ended up with the entire contents of the file
1590 # might have accidentally ended up with the entire contents of the file
1591 # they are supposed to be linking to.
1591 # they are supposed to be linking to.
1592 s.modified[:] = self._filtersuspectsymlink(s.modified)
1592 s.modified[:] = self._filtersuspectsymlink(s.modified)
1593 if other != self._repo['.']:
1593 if other != self._repo['.']:
1594 s = super(workingctx, self)._buildstatus(other, s, match,
1594 s = super(workingctx, self)._buildstatus(other, s, match,
1595 listignored, listclean,
1595 listignored, listclean,
1596 listunknown)
1596 listunknown)
1597 return s
1597 return s
1598
1598
1599 def _matchstatus(self, other, match):
1599 def _matchstatus(self, other, match):
1600 """override the match method with a filter for directory patterns
1600 """override the match method with a filter for directory patterns
1601
1601
1602 We use inheritance to customize the match.bad method only in cases of
1602 We use inheritance to customize the match.bad method only in cases of
1603 workingctx since it belongs only to the working directory when
1603 workingctx since it belongs only to the working directory when
1604 comparing against the parent changeset.
1604 comparing against the parent changeset.
1605
1605
1606 If we aren't comparing against the working directory's parent, then we
1606 If we aren't comparing against the working directory's parent, then we
1607 just use the default match object sent to us.
1607 just use the default match object sent to us.
1608 """
1608 """
1609 if other != self._repo['.']:
1609 if other != self._repo['.']:
1610 def bad(f, msg):
1610 def bad(f, msg):
1611 # 'f' may be a directory pattern from 'match.files()',
1611 # 'f' may be a directory pattern from 'match.files()',
1612 # so 'f not in ctx1' is not enough
1612 # so 'f not in ctx1' is not enough
1613 if f not in other and not other.hasdir(f):
1613 if f not in other and not other.hasdir(f):
1614 self._repo.ui.warn('%s: %s\n' %
1614 self._repo.ui.warn('%s: %s\n' %
1615 (self._repo.dirstate.pathto(f), msg))
1615 (self._repo.dirstate.pathto(f), msg))
1616 match.bad = bad
1616 match.bad = bad
1617 return match
1617 return match
1618
1618
1619 def markcommitted(self, node):
1619 def markcommitted(self, node):
1620 super(workingctx, self).markcommitted(node)
1620 super(workingctx, self).markcommitted(node)
1621
1621
1622 sparse.aftercommit(self._repo, node)
1622 sparse.aftercommit(self._repo, node)
1623
1623
1624 class committablefilectx(basefilectx):
1624 class committablefilectx(basefilectx):
1625 """A committablefilectx provides common functionality for a file context
1625 """A committablefilectx provides common functionality for a file context
1626 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1626 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 def __init__(self, repo, path, filelog=None, ctx=None):
1627 def __init__(self, repo, path, filelog=None, ctx=None):
1628 self._repo = repo
1628 self._repo = repo
1629 self._path = path
1629 self._path = path
1630 self._changeid = None
1630 self._changeid = None
1631 self._filerev = self._filenode = None
1631 self._filerev = self._filenode = None
1632
1632
1633 if filelog is not None:
1633 if filelog is not None:
1634 self._filelog = filelog
1634 self._filelog = filelog
1635 if ctx:
1635 if ctx:
1636 self._changectx = ctx
1636 self._changectx = ctx
1637
1637
1638 def __nonzero__(self):
1638 def __nonzero__(self):
1639 return True
1639 return True
1640
1640
1641 __bool__ = __nonzero__
1641 __bool__ = __nonzero__
1642
1642
1643 def linkrev(self):
1643 def linkrev(self):
1644 # linked to self._changectx no matter if file is modified or not
1644 # linked to self._changectx no matter if file is modified or not
1645 return self.rev()
1645 return self.rev()
1646
1646
1647 def parents(self):
1647 def parents(self):
1648 '''return parent filectxs, following copies if necessary'''
1648 '''return parent filectxs, following copies if necessary'''
1649 def filenode(ctx, path):
1649 def filenode(ctx, path):
1650 return ctx._manifest.get(path, nullid)
1650 return ctx._manifest.get(path, nullid)
1651
1651
1652 path = self._path
1652 path = self._path
1653 fl = self._filelog
1653 fl = self._filelog
1654 pcl = self._changectx._parents
1654 pcl = self._changectx._parents
1655 renamed = self.renamed()
1655 renamed = self.renamed()
1656
1656
1657 if renamed:
1657 if renamed:
1658 pl = [renamed + (None,)]
1658 pl = [renamed + (None,)]
1659 else:
1659 else:
1660 pl = [(path, filenode(pcl[0], path), fl)]
1660 pl = [(path, filenode(pcl[0], path), fl)]
1661
1661
1662 for pc in pcl[1:]:
1662 for pc in pcl[1:]:
1663 pl.append((path, filenode(pc, path), fl))
1663 pl.append((path, filenode(pc, path), fl))
1664
1664
1665 return [self._parentfilectx(p, fileid=n, filelog=l)
1665 return [self._parentfilectx(p, fileid=n, filelog=l)
1666 for p, n, l in pl if n != nullid]
1666 for p, n, l in pl if n != nullid]
1667
1667
1668 def children(self):
1668 def children(self):
1669 return []
1669 return []
1670
1670
1671 class workingfilectx(committablefilectx):
1671 class workingfilectx(committablefilectx):
1672 """A workingfilectx object makes access to data related to a particular
1672 """A workingfilectx object makes access to data related to a particular
1673 file in the working directory convenient."""
1673 file in the working directory convenient."""
1674 def __init__(self, repo, path, filelog=None, workingctx=None):
1674 def __init__(self, repo, path, filelog=None, workingctx=None):
1675 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1675 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1676
1676
1677 @propertycache
1677 @propertycache
1678 def _changectx(self):
1678 def _changectx(self):
1679 return workingctx(self._repo)
1679 return workingctx(self._repo)
1680
1680
1681 def data(self):
1681 def data(self):
1682 return self._repo.wread(self._path)
1682 return self._repo.wread(self._path)
1683 def renamed(self):
1683 def renamed(self):
1684 rp = self._repo.dirstate.copied(self._path)
1684 rp = self._repo.dirstate.copied(self._path)
1685 if not rp:
1685 if not rp:
1686 return None
1686 return None
1687 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1687 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1688
1688
1689 def size(self):
1689 def size(self):
1690 return self._repo.wvfs.lstat(self._path).st_size
1690 return self._repo.wvfs.lstat(self._path).st_size
1691 def date(self):
1691 def date(self):
1692 t, tz = self._changectx.date()
1692 t, tz = self._changectx.date()
1693 try:
1693 try:
1694 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1694 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1695 except OSError as err:
1695 except OSError as err:
1696 if err.errno != errno.ENOENT:
1696 if err.errno != errno.ENOENT:
1697 raise
1697 raise
1698 return (t, tz)
1698 return (t, tz)
1699
1699
1700 def exists(self):
1700 def exists(self):
1701 return self._repo.wvfs.exists(self._path)
1701 return self._repo.wvfs.exists(self._path)
1702
1702
1703 def lexists(self):
1703 def lexists(self):
1704 return self._repo.wvfs.lexists(self._path)
1704 return self._repo.wvfs.lexists(self._path)
1705
1705
1706 def audit(self):
1706 def audit(self):
1707 return self._repo.wvfs.audit(self._path)
1707 return self._repo.wvfs.audit(self._path)
1708
1708
1709 def cmp(self, fctx):
1709 def cmp(self, fctx):
1710 """compare with other file context
1710 """compare with other file context
1711
1711
1712 returns True if different than fctx.
1712 returns True if different than fctx.
1713 """
1713 """
1714 # fctx should be a filectx (not a workingfilectx)
1714 # fctx should be a filectx (not a workingfilectx)
1715 # invert comparison to reuse the same code path
1715 # invert comparison to reuse the same code path
1716 return fctx.cmp(self)
1716 return fctx.cmp(self)
1717
1717
1718 def remove(self, ignoremissing=False):
1718 def remove(self, ignoremissing=False):
1719 """wraps unlink for a repo's working directory"""
1719 """wraps unlink for a repo's working directory"""
1720 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1720 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1721 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1721 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1722 rmdir=rmdir)
1722 rmdir=rmdir)
1723
1723
1724 def write(self, data, flags, backgroundclose=False, **kwargs):
1724 def write(self, data, flags, backgroundclose=False, **kwargs):
1725 """wraps repo.wwrite"""
1725 """wraps repo.wwrite"""
1726 self._repo.wwrite(self._path, data, flags,
1726 self._repo.wwrite(self._path, data, flags,
1727 backgroundclose=backgroundclose,
1727 backgroundclose=backgroundclose,
1728 **kwargs)
1728 **kwargs)
1729
1729
1730 def markcopied(self, src):
1730 def markcopied(self, src):
1731 """marks this file a copy of `src`"""
1731 """marks this file a copy of `src`"""
1732 if self._repo.dirstate[self._path] in "nma":
1732 if self._repo.dirstate[self._path] in "nma":
1733 self._repo.dirstate.copy(src, self._path)
1733 self._repo.dirstate.copy(src, self._path)
1734
1734
1735 def clearunknown(self):
1735 def clearunknown(self):
1736 """Removes conflicting items in the working directory so that
1736 """Removes conflicting items in the working directory so that
1737 ``write()`` can be called successfully.
1737 ``write()`` can be called successfully.
1738 """
1738 """
1739 wvfs = self._repo.wvfs
1739 wvfs = self._repo.wvfs
1740 f = self._path
1740 f = self._path
1741 wvfs.audit(f)
1741 wvfs.audit(f)
1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1743 # remove files under the directory as they should already be
1743 # remove files under the directory as they should already be
1744 # warned and backed up
1744 # warned and backed up
1745 if wvfs.isdir(f) and not wvfs.islink(f):
1745 if wvfs.isdir(f) and not wvfs.islink(f):
1746 wvfs.rmtree(f, forcibly=True)
1746 wvfs.rmtree(f, forcibly=True)
1747 for p in reversed(list(util.finddirs(f))):
1747 for p in reversed(list(util.finddirs(f))):
1748 if wvfs.isfileorlink(p):
1748 if wvfs.isfileorlink(p):
1749 wvfs.unlink(p)
1749 wvfs.unlink(p)
1750 break
1750 break
1751 else:
1751 else:
1752 # don't remove files if path conflicts are not processed
1752 # don't remove files if path conflicts are not processed
1753 if wvfs.isdir(f) and not wvfs.islink(f):
1753 if wvfs.isdir(f) and not wvfs.islink(f):
1754 wvfs.removedirs(f)
1754 wvfs.removedirs(f)
1755
1755
1756 def setflags(self, l, x):
1756 def setflags(self, l, x):
1757 self._repo.wvfs.setflags(self._path, l, x)
1757 self._repo.wvfs.setflags(self._path, l, x)
1758
1758
1759 class overlayworkingctx(committablectx):
1759 class overlayworkingctx(committablectx):
1760 """Wraps another mutable context with a write-back cache that can be
1760 """Wraps another mutable context with a write-back cache that can be
1761 converted into a commit context.
1761 converted into a commit context.
1762
1762
1763 self._cache[path] maps to a dict with keys: {
1763 self._cache[path] maps to a dict with keys: {
1764 'exists': bool?
1764 'exists': bool?
1765 'date': date?
1765 'date': date?
1766 'data': str?
1766 'data': str?
1767 'flags': str?
1767 'flags': str?
1768 'copied': str? (path or None)
1768 'copied': str? (path or None)
1769 }
1769 }
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1771 is `False`, the file was deleted.
1771 is `False`, the file was deleted.
1772 """
1772 """
1773
1773
1774 def __init__(self, repo):
1774 def __init__(self, repo):
1775 super(overlayworkingctx, self).__init__(repo)
1775 super(overlayworkingctx, self).__init__(repo)
1776 self.clean()
1776 self.clean()
1777
1777
1778 def setbase(self, wrappedctx):
1778 def setbase(self, wrappedctx):
1779 self._wrappedctx = wrappedctx
1779 self._wrappedctx = wrappedctx
1780 self._parents = [wrappedctx]
1780 self._parents = [wrappedctx]
1781 # Drop old manifest cache as it is now out of date.
1781 # Drop old manifest cache as it is now out of date.
1782 # This is necessary when, e.g., rebasing several nodes with one
1782 # This is necessary when, e.g., rebasing several nodes with one
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1784 util.clearcachedproperty(self, '_manifest')
1784 util.clearcachedproperty(self, '_manifest')
1785
1785
1786 def data(self, path):
1786 def data(self, path):
1787 if self.isdirty(path):
1787 if self.isdirty(path):
1788 if self._cache[path]['exists']:
1788 if self._cache[path]['exists']:
1789 if self._cache[path]['data']:
1789 if self._cache[path]['data']:
1790 return self._cache[path]['data']
1790 return self._cache[path]['data']
1791 else:
1791 else:
1792 # Must fallback here, too, because we only set flags.
1792 # Must fallback here, too, because we only set flags.
1793 return self._wrappedctx[path].data()
1793 return self._wrappedctx[path].data()
1794 else:
1794 else:
1795 raise error.ProgrammingError("No such file or directory: %s" %
1795 raise error.ProgrammingError("No such file or directory: %s" %
1796 path)
1796 path)
1797 else:
1797 else:
1798 return self._wrappedctx[path].data()
1798 return self._wrappedctx[path].data()
1799
1799
1800 @propertycache
1800 @propertycache
1801 def _manifest(self):
1801 def _manifest(self):
1802 parents = self.parents()
1802 parents = self.parents()
1803 man = parents[0].manifest().copy()
1803 man = parents[0].manifest().copy()
1804
1804
1805 flag = self._flagfunc
1805 flag = self._flagfunc
1806 for path in self.added():
1806 for path in self.added():
1807 man[path] = addednodeid
1807 man[path] = addednodeid
1808 man.setflag(path, flag(path))
1808 man.setflag(path, flag(path))
1809 for path in self.modified():
1809 for path in self.modified():
1810 man[path] = modifiednodeid
1810 man[path] = modifiednodeid
1811 man.setflag(path, flag(path))
1811 man.setflag(path, flag(path))
1812 for path in self.removed():
1812 for path in self.removed():
1813 del man[path]
1813 del man[path]
1814 return man
1814 return man
1815
1815
1816 @propertycache
1816 @propertycache
1817 def _flagfunc(self):
1817 def _flagfunc(self):
1818 def f(path):
1818 def f(path):
1819 return self._cache[path]['flags']
1819 return self._cache[path]['flags']
1820 return f
1820 return f
1821
1821
1822 def files(self):
1822 def files(self):
1823 return sorted(self.added() + self.modified() + self.removed())
1823 return sorted(self.added() + self.modified() + self.removed())
1824
1824
1825 def modified(self):
1825 def modified(self):
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1827 self._existsinparent(f)]
1827 self._existsinparent(f)]
1828
1828
1829 def added(self):
1829 def added(self):
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1831 not self._existsinparent(f)]
1831 not self._existsinparent(f)]
1832
1832
1833 def removed(self):
1833 def removed(self):
1834 return [f for f in self._cache.keys() if
1834 return [f for f in self._cache.keys() if
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1836
1836
1837 def isinmemory(self):
1837 def isinmemory(self):
1838 return True
1838 return True
1839
1839
1840 def filedate(self, path):
1840 def filedate(self, path):
1841 if self.isdirty(path):
1841 if self.isdirty(path):
1842 return self._cache[path]['date']
1842 return self._cache[path]['date']
1843 else:
1843 else:
1844 return self._wrappedctx[path].date()
1844 return self._wrappedctx[path].date()
1845
1845
1846 def markcopied(self, path, origin):
1846 def markcopied(self, path, origin):
1847 if self.isdirty(path):
1847 if self.isdirty(path):
1848 self._cache[path]['copied'] = origin
1848 self._cache[path]['copied'] = origin
1849 else:
1849 else:
1850 raise error.ProgrammingError('markcopied() called on clean context')
1850 raise error.ProgrammingError('markcopied() called on clean context')
1851
1851
1852 def copydata(self, path):
1852 def copydata(self, path):
1853 if self.isdirty(path):
1853 if self.isdirty(path):
1854 return self._cache[path]['copied']
1854 return self._cache[path]['copied']
1855 else:
1855 else:
1856 raise error.ProgrammingError('copydata() called on clean context')
1856 raise error.ProgrammingError('copydata() called on clean context')
1857
1857
1858 def flags(self, path):
1858 def flags(self, path):
1859 if self.isdirty(path):
1859 if self.isdirty(path):
1860 if self._cache[path]['exists']:
1860 if self._cache[path]['exists']:
1861 return self._cache[path]['flags']
1861 return self._cache[path]['flags']
1862 else:
1862 else:
1863 raise error.ProgrammingError("No such file or directory: %s" %
1863 raise error.ProgrammingError("No such file or directory: %s" %
1864 self._path)
1864 self._path)
1865 else:
1865 else:
1866 return self._wrappedctx[path].flags()
1866 return self._wrappedctx[path].flags()
1867
1867
1868 def _existsinparent(self, path):
1868 def _existsinparent(self, path):
1869 try:
1869 try:
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1872 # with an ``exists()`` function.
1872 # with an ``exists()`` function.
1873 self._wrappedctx[path]
1873 self._wrappedctx[path]
1874 return True
1874 return True
1875 except error.ManifestLookupError:
1875 except error.ManifestLookupError:
1876 return False
1876 return False
1877
1877
1878 def _auditconflicts(self, path):
1878 def _auditconflicts(self, path):
1879 """Replicates conflict checks done by wvfs.write().
1879 """Replicates conflict checks done by wvfs.write().
1880
1880
1881 Since we never write to the filesystem and never call `applyupdates` in
1881 Since we never write to the filesystem and never call `applyupdates` in
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1884 """
1884 """
1885 def fail(path, component):
1885 def fail(path, component):
1886 # p1() is the base and we're receiving "writes" for p2()'s
1886 # p1() is the base and we're receiving "writes" for p2()'s
1887 # files.
1887 # files.
1888 if 'l' in self.p1()[component].flags():
1888 if 'l' in self.p1()[component].flags():
1889 raise error.Abort("error: %s conflicts with symlink %s "
1889 raise error.Abort("error: %s conflicts with symlink %s "
1890 "in %s." % (path, component,
1890 "in %s." % (path, component,
1891 self.p1().rev()))
1891 self.p1().rev()))
1892 else:
1892 else:
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1894 "%s." % (path, component,
1894 "%s." % (path, component,
1895 self.p1().rev()))
1895 self.p1().rev()))
1896
1896
1897 # Test that each new directory to be created to write this path from p2
1897 # Test that each new directory to be created to write this path from p2
1898 # is not a file in p1.
1898 # is not a file in p1.
1899 components = path.split('/')
1899 components = path.split('/')
1900 for i in pycompat.xrange(len(components)):
1900 for i in pycompat.xrange(len(components)):
1901 component = "/".join(components[0:i])
1901 component = "/".join(components[0:i])
1902 if component in self.p1() and self._cache[component]['exists']:
1902 if component in self.p1() and self._cache[component]['exists']:
1903 fail(path, component)
1903 fail(path, component)
1904
1904
1905 # Test the other direction -- that this path from p2 isn't a directory
1905 # Test the other direction -- that this path from p2 isn't a directory
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1908 matches = self.p1().manifest().matches(match)
1908 matches = self.p1().manifest().matches(match)
1909 mfiles = matches.keys()
1909 mfiles = matches.keys()
1910 if len(mfiles) > 0:
1910 if len(mfiles) > 0:
1911 if len(mfiles) == 1 and mfiles[0] == path:
1911 if len(mfiles) == 1 and mfiles[0] == path:
1912 return
1912 return
1913 # omit the files which are deleted in current IMM wctx
1913 # omit the files which are deleted in current IMM wctx
1914 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1914 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1915 if not mfiles:
1915 if not mfiles:
1916 return
1916 return
1917 raise error.Abort("error: file '%s' cannot be written because "
1917 raise error.Abort("error: file '%s' cannot be written because "
1918 " '%s/' is a folder in %s (containing %d "
1918 " '%s/' is a folder in %s (containing %d "
1919 "entries: %s)"
1919 "entries: %s)"
1920 % (path, path, self.p1(), len(mfiles),
1920 % (path, path, self.p1(), len(mfiles),
1921 ', '.join(mfiles)))
1921 ', '.join(mfiles)))
1922
1922
1923 def write(self, path, data, flags='', **kwargs):
1923 def write(self, path, data, flags='', **kwargs):
1924 if data is None:
1924 if data is None:
1925 raise error.ProgrammingError("data must be non-None")
1925 raise error.ProgrammingError("data must be non-None")
1926 self._auditconflicts(path)
1926 self._auditconflicts(path)
1927 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1927 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1928 flags=flags)
1928 flags=flags)
1929
1929
1930 def setflags(self, path, l, x):
1930 def setflags(self, path, l, x):
1931 flag = ''
1931 flag = ''
1932 if l:
1932 if l:
1933 flag = 'l'
1933 flag = 'l'
1934 elif x:
1934 elif x:
1935 flag = 'x'
1935 flag = 'x'
1936 self._markdirty(path, exists=True, date=dateutil.makedate(),
1936 self._markdirty(path, exists=True, date=dateutil.makedate(),
1937 flags=flag)
1937 flags=flag)
1938
1938
1939 def remove(self, path):
1939 def remove(self, path):
1940 self._markdirty(path, exists=False)
1940 self._markdirty(path, exists=False)
1941
1941
1942 def exists(self, path):
1942 def exists(self, path):
1943 """exists behaves like `lexists`, but needs to follow symlinks and
1943 """exists behaves like `lexists`, but needs to follow symlinks and
1944 return False if they are broken.
1944 return False if they are broken.
1945 """
1945 """
1946 if self.isdirty(path):
1946 if self.isdirty(path):
1947 # If this path exists and is a symlink, "follow" it by calling
1947 # If this path exists and is a symlink, "follow" it by calling
1948 # exists on the destination path.
1948 # exists on the destination path.
1949 if (self._cache[path]['exists'] and
1949 if (self._cache[path]['exists'] and
1950 'l' in self._cache[path]['flags']):
1950 'l' in self._cache[path]['flags']):
1951 return self.exists(self._cache[path]['data'].strip())
1951 return self.exists(self._cache[path]['data'].strip())
1952 else:
1952 else:
1953 return self._cache[path]['exists']
1953 return self._cache[path]['exists']
1954
1954
1955 return self._existsinparent(path)
1955 return self._existsinparent(path)
1956
1956
1957 def lexists(self, path):
1957 def lexists(self, path):
1958 """lexists returns True if the path exists"""
1958 """lexists returns True if the path exists"""
1959 if self.isdirty(path):
1959 if self.isdirty(path):
1960 return self._cache[path]['exists']
1960 return self._cache[path]['exists']
1961
1961
1962 return self._existsinparent(path)
1962 return self._existsinparent(path)
1963
1963
1964 def size(self, path):
1964 def size(self, path):
1965 if self.isdirty(path):
1965 if self.isdirty(path):
1966 if self._cache[path]['exists']:
1966 if self._cache[path]['exists']:
1967 return len(self._cache[path]['data'])
1967 return len(self._cache[path]['data'])
1968 else:
1968 else:
1969 raise error.ProgrammingError("No such file or directory: %s" %
1969 raise error.ProgrammingError("No such file or directory: %s" %
1970 self._path)
1970 self._path)
1971 return self._wrappedctx[path].size()
1971 return self._wrappedctx[path].size()
1972
1972
1973 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1973 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1974 user=None, editor=None):
1974 user=None, editor=None):
1975 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1975 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1976 committed.
1976 committed.
1977
1977
1978 ``text`` is the commit message.
1978 ``text`` is the commit message.
1979 ``parents`` (optional) are rev numbers.
1979 ``parents`` (optional) are rev numbers.
1980 """
1980 """
1981 # Default parents to the wrapped contexts' if not passed.
1981 # Default parents to the wrapped contexts' if not passed.
1982 if parents is None:
1982 if parents is None:
1983 parents = self._wrappedctx.parents()
1983 parents = self._wrappedctx.parents()
1984 if len(parents) == 1:
1984 if len(parents) == 1:
1985 parents = (parents[0], None)
1985 parents = (parents[0], None)
1986
1986
1987 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1987 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1988 if parents[1] is None:
1988 if parents[1] is None:
1989 parents = (self._repo[parents[0]], None)
1989 parents = (self._repo[parents[0]], None)
1990 else:
1990 else:
1991 parents = (self._repo[parents[0]], self._repo[parents[1]])
1991 parents = (self._repo[parents[0]], self._repo[parents[1]])
1992
1992
1993 files = self._cache.keys()
1993 files = self._cache.keys()
1994 def getfile(repo, memctx, path):
1994 def getfile(repo, memctx, path):
1995 if self._cache[path]['exists']:
1995 if self._cache[path]['exists']:
1996 return memfilectx(repo, memctx, path,
1996 return memfilectx(repo, memctx, path,
1997 self._cache[path]['data'],
1997 self._cache[path]['data'],
1998 'l' in self._cache[path]['flags'],
1998 'l' in self._cache[path]['flags'],
1999 'x' in self._cache[path]['flags'],
1999 'x' in self._cache[path]['flags'],
2000 self._cache[path]['copied'])
2000 self._cache[path]['copied'])
2001 else:
2001 else:
2002 # Returning None, but including the path in `files`, is
2002 # Returning None, but including the path in `files`, is
2003 # necessary for memctx to register a deletion.
2003 # necessary for memctx to register a deletion.
2004 return None
2004 return None
2005 return memctx(self._repo, parents, text, files, getfile, date=date,
2005 return memctx(self._repo, parents, text, files, getfile, date=date,
2006 extra=extra, user=user, branch=branch, editor=editor)
2006 extra=extra, user=user, branch=branch, editor=editor)
2007
2007
2008 def isdirty(self, path):
2008 def isdirty(self, path):
2009 return path in self._cache
2009 return path in self._cache
2010
2010
2011 def isempty(self):
2011 def isempty(self):
2012 # We need to discard any keys that are actually clean before the empty
2012 # We need to discard any keys that are actually clean before the empty
2013 # commit check.
2013 # commit check.
2014 self._compact()
2014 self._compact()
2015 return len(self._cache) == 0
2015 return len(self._cache) == 0
2016
2016
2017 def clean(self):
2017 def clean(self):
2018 self._cache = {}
2018 self._cache = {}
2019
2019
2020 def _compact(self):
2020 def _compact(self):
2021 """Removes keys from the cache that are actually clean, by comparing
2021 """Removes keys from the cache that are actually clean, by comparing
2022 them with the underlying context.
2022 them with the underlying context.
2023
2023
2024 This can occur during the merge process, e.g. by passing --tool :local
2024 This can occur during the merge process, e.g. by passing --tool :local
2025 to resolve a conflict.
2025 to resolve a conflict.
2026 """
2026 """
2027 keys = []
2027 keys = []
2028 for path in self._cache.keys():
2028 for path in self._cache.keys():
2029 cache = self._cache[path]
2029 cache = self._cache[path]
2030 try:
2030 try:
2031 underlying = self._wrappedctx[path]
2031 underlying = self._wrappedctx[path]
2032 if (underlying.data() == cache['data'] and
2032 if (underlying.data() == cache['data'] and
2033 underlying.flags() == cache['flags']):
2033 underlying.flags() == cache['flags']):
2034 keys.append(path)
2034 keys.append(path)
2035 except error.ManifestLookupError:
2035 except error.ManifestLookupError:
2036 # Path not in the underlying manifest (created).
2036 # Path not in the underlying manifest (created).
2037 continue
2037 continue
2038
2038
2039 for path in keys:
2039 for path in keys:
2040 del self._cache[path]
2040 del self._cache[path]
2041 return keys
2041 return keys
2042
2042
2043 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2043 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2044 # data not provided, let's see if we already have some; if not, let's
2044 # data not provided, let's see if we already have some; if not, let's
2045 # grab it from our underlying context, so that we always have data if
2045 # grab it from our underlying context, so that we always have data if
2046 # the file is marked as existing.
2046 # the file is marked as existing.
2047 if exists and data is None:
2047 if exists and data is None:
2048 oldentry = self._cache.get(path) or {}
2048 oldentry = self._cache.get(path) or {}
2049 data = oldentry.get('data') or self._wrappedctx[path].data()
2049 data = oldentry.get('data') or self._wrappedctx[path].data()
2050
2050
2051 self._cache[path] = {
2051 self._cache[path] = {
2052 'exists': exists,
2052 'exists': exists,
2053 'data': data,
2053 'data': data,
2054 'date': date,
2054 'date': date,
2055 'flags': flags,
2055 'flags': flags,
2056 'copied': None,
2056 'copied': None,
2057 }
2057 }
2058
2058
2059 def filectx(self, path, filelog=None):
2059 def filectx(self, path, filelog=None):
2060 return overlayworkingfilectx(self._repo, path, parent=self,
2060 return overlayworkingfilectx(self._repo, path, parent=self,
2061 filelog=filelog)
2061 filelog=filelog)
2062
2062
2063 class overlayworkingfilectx(committablefilectx):
2063 class overlayworkingfilectx(committablefilectx):
2064 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2064 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2065 cache, which can be flushed through later by calling ``flush()``."""
2065 cache, which can be flushed through later by calling ``flush()``."""
2066
2066
2067 def __init__(self, repo, path, filelog=None, parent=None):
2067 def __init__(self, repo, path, filelog=None, parent=None):
2068 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2068 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2069 parent)
2069 parent)
2070 self._repo = repo
2070 self._repo = repo
2071 self._parent = parent
2071 self._parent = parent
2072 self._path = path
2072 self._path = path
2073
2073
2074 def cmp(self, fctx):
2074 def cmp(self, fctx):
2075 return self.data() != fctx.data()
2075 return self.data() != fctx.data()
2076
2076
2077 def changectx(self):
2077 def changectx(self):
2078 return self._parent
2078 return self._parent
2079
2079
2080 def data(self):
2080 def data(self):
2081 return self._parent.data(self._path)
2081 return self._parent.data(self._path)
2082
2082
2083 def date(self):
2083 def date(self):
2084 return self._parent.filedate(self._path)
2084 return self._parent.filedate(self._path)
2085
2085
2086 def exists(self):
2086 def exists(self):
2087 return self.lexists()
2087 return self.lexists()
2088
2088
2089 def lexists(self):
2089 def lexists(self):
2090 return self._parent.exists(self._path)
2090 return self._parent.exists(self._path)
2091
2091
2092 def renamed(self):
2092 def renamed(self):
2093 path = self._parent.copydata(self._path)
2093 path = self._parent.copydata(self._path)
2094 if not path:
2094 if not path:
2095 return None
2095 return None
2096 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2096 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2097
2097
2098 def size(self):
2098 def size(self):
2099 return self._parent.size(self._path)
2099 return self._parent.size(self._path)
2100
2100
2101 def markcopied(self, origin):
2101 def markcopied(self, origin):
2102 self._parent.markcopied(self._path, origin)
2102 self._parent.markcopied(self._path, origin)
2103
2103
2104 def audit(self):
2104 def audit(self):
2105 pass
2105 pass
2106
2106
2107 def flags(self):
2107 def flags(self):
2108 return self._parent.flags(self._path)
2108 return self._parent.flags(self._path)
2109
2109
2110 def setflags(self, islink, isexec):
2110 def setflags(self, islink, isexec):
2111 return self._parent.setflags(self._path, islink, isexec)
2111 return self._parent.setflags(self._path, islink, isexec)
2112
2112
2113 def write(self, data, flags, backgroundclose=False, **kwargs):
2113 def write(self, data, flags, backgroundclose=False, **kwargs):
2114 return self._parent.write(self._path, data, flags, **kwargs)
2114 return self._parent.write(self._path, data, flags, **kwargs)
2115
2115
2116 def remove(self, ignoremissing=False):
2116 def remove(self, ignoremissing=False):
2117 return self._parent.remove(self._path)
2117 return self._parent.remove(self._path)
2118
2118
2119 def clearunknown(self):
2119 def clearunknown(self):
2120 pass
2120 pass
2121
2121
2122 class workingcommitctx(workingctx):
2122 class workingcommitctx(workingctx):
2123 """A workingcommitctx object makes access to data related to
2123 """A workingcommitctx object makes access to data related to
2124 the revision being committed convenient.
2124 the revision being committed convenient.
2125
2125
2126 This hides changes in the working directory, if they aren't
2126 This hides changes in the working directory, if they aren't
2127 committed in this context.
2127 committed in this context.
2128 """
2128 """
2129 def __init__(self, repo, changes,
2129 def __init__(self, repo, changes,
2130 text="", user=None, date=None, extra=None):
2130 text="", user=None, date=None, extra=None):
2131 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2131 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2132 changes)
2132 changes)
2133
2133
2134 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2134 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2135 """Return matched files only in ``self._status``
2135 """Return matched files only in ``self._status``
2136
2136
2137 Uncommitted files appear "clean" via this context, even if
2137 Uncommitted files appear "clean" via this context, even if
2138 they aren't actually so in the working directory.
2138 they aren't actually so in the working directory.
2139 """
2139 """
2140 if clean:
2140 if clean:
2141 clean = [f for f in self._manifest if f not in self._changedset]
2141 clean = [f for f in self._manifest if f not in self._changedset]
2142 else:
2142 else:
2143 clean = []
2143 clean = []
2144 return scmutil.status([f for f in self._status.modified if match(f)],
2144 return scmutil.status([f for f in self._status.modified if match(f)],
2145 [f for f in self._status.added if match(f)],
2145 [f for f in self._status.added if match(f)],
2146 [f for f in self._status.removed if match(f)],
2146 [f for f in self._status.removed if match(f)],
2147 [], [], [], clean)
2147 [], [], [], clean)
2148
2148
2149 @propertycache
2149 @propertycache
2150 def _changedset(self):
2150 def _changedset(self):
2151 """Return the set of files changed in this context
2151 """Return the set of files changed in this context
2152 """
2152 """
2153 changed = set(self._status.modified)
2153 changed = set(self._status.modified)
2154 changed.update(self._status.added)
2154 changed.update(self._status.added)
2155 changed.update(self._status.removed)
2155 changed.update(self._status.removed)
2156 return changed
2156 return changed
2157
2157
2158 def makecachingfilectxfn(func):
2158 def makecachingfilectxfn(func):
2159 """Create a filectxfn that caches based on the path.
2159 """Create a filectxfn that caches based on the path.
2160
2160
2161 We can't use util.cachefunc because it uses all arguments as the cache
2161 We can't use util.cachefunc because it uses all arguments as the cache
2162 key and this creates a cycle since the arguments include the repo and
2162 key and this creates a cycle since the arguments include the repo and
2163 memctx.
2163 memctx.
2164 """
2164 """
2165 cache = {}
2165 cache = {}
2166
2166
2167 def getfilectx(repo, memctx, path):
2167 def getfilectx(repo, memctx, path):
2168 if path not in cache:
2168 if path not in cache:
2169 cache[path] = func(repo, memctx, path)
2169 cache[path] = func(repo, memctx, path)
2170 return cache[path]
2170 return cache[path]
2171
2171
2172 return getfilectx
2172 return getfilectx
2173
2173
2174 def memfilefromctx(ctx):
2174 def memfilefromctx(ctx):
2175 """Given a context return a memfilectx for ctx[path]
2175 """Given a context return a memfilectx for ctx[path]
2176
2176
2177 This is a convenience method for building a memctx based on another
2177 This is a convenience method for building a memctx based on another
2178 context.
2178 context.
2179 """
2179 """
2180 def getfilectx(repo, memctx, path):
2180 def getfilectx(repo, memctx, path):
2181 fctx = ctx[path]
2181 fctx = ctx[path]
2182 # this is weird but apparently we only keep track of one parent
2182 # this is weird but apparently we only keep track of one parent
2183 # (why not only store that instead of a tuple?)
2183 # (why not only store that instead of a tuple?)
2184 copied = fctx.renamed()
2184 copied = fctx.renamed()
2185 if copied:
2185 if copied:
2186 copied = copied[0]
2186 copied = copied[0]
2187 return memfilectx(repo, memctx, path, fctx.data(),
2187 return memfilectx(repo, memctx, path, fctx.data(),
2188 islink=fctx.islink(), isexec=fctx.isexec(),
2188 islink=fctx.islink(), isexec=fctx.isexec(),
2189 copied=copied)
2189 copied=copied)
2190
2190
2191 return getfilectx
2191 return getfilectx
2192
2192
2193 def memfilefrompatch(patchstore):
2193 def memfilefrompatch(patchstore):
2194 """Given a patch (e.g. patchstore object) return a memfilectx
2194 """Given a patch (e.g. patchstore object) return a memfilectx
2195
2195
2196 This is a convenience method for building a memctx based on a patchstore.
2196 This is a convenience method for building a memctx based on a patchstore.
2197 """
2197 """
2198 def getfilectx(repo, memctx, path):
2198 def getfilectx(repo, memctx, path):
2199 data, mode, copied = patchstore.getfile(path)
2199 data, mode, copied = patchstore.getfile(path)
2200 if data is None:
2200 if data is None:
2201 return None
2201 return None
2202 islink, isexec = mode
2202 islink, isexec = mode
2203 return memfilectx(repo, memctx, path, data, islink=islink,
2203 return memfilectx(repo, memctx, path, data, islink=islink,
2204 isexec=isexec, copied=copied)
2204 isexec=isexec, copied=copied)
2205
2205
2206 return getfilectx
2206 return getfilectx
2207
2207
2208 class memctx(committablectx):
2208 class memctx(committablectx):
2209 """Use memctx to perform in-memory commits via localrepo.commitctx().
2209 """Use memctx to perform in-memory commits via localrepo.commitctx().
2210
2210
2211 Revision information is supplied at initialization time while
2211 Revision information is supplied at initialization time while
2212 related files data and is made available through a callback
2212 related files data and is made available through a callback
2213 mechanism. 'repo' is the current localrepo, 'parents' is a
2213 mechanism. 'repo' is the current localrepo, 'parents' is a
2214 sequence of two parent revisions identifiers (pass None for every
2214 sequence of two parent revisions identifiers (pass None for every
2215 missing parent), 'text' is the commit message and 'files' lists
2215 missing parent), 'text' is the commit message and 'files' lists
2216 names of files touched by the revision (normalized and relative to
2216 names of files touched by the revision (normalized and relative to
2217 repository root).
2217 repository root).
2218
2218
2219 filectxfn(repo, memctx, path) is a callable receiving the
2219 filectxfn(repo, memctx, path) is a callable receiving the
2220 repository, the current memctx object and the normalized path of
2220 repository, the current memctx object and the normalized path of
2221 requested file, relative to repository root. It is fired by the
2221 requested file, relative to repository root. It is fired by the
2222 commit function for every file in 'files', but calls order is
2222 commit function for every file in 'files', but calls order is
2223 undefined. If the file is available in the revision being
2223 undefined. If the file is available in the revision being
2224 committed (updated or added), filectxfn returns a memfilectx
2224 committed (updated or added), filectxfn returns a memfilectx
2225 object. If the file was removed, filectxfn return None for recent
2225 object. If the file was removed, filectxfn return None for recent
2226 Mercurial. Moved files are represented by marking the source file
2226 Mercurial. Moved files are represented by marking the source file
2227 removed and the new file added with copy information (see
2227 removed and the new file added with copy information (see
2228 memfilectx).
2228 memfilectx).
2229
2229
2230 user receives the committer name and defaults to current
2230 user receives the committer name and defaults to current
2231 repository username, date is the commit date in any format
2231 repository username, date is the commit date in any format
2232 supported by dateutil.parsedate() and defaults to current date, extra
2232 supported by dateutil.parsedate() and defaults to current date, extra
2233 is a dictionary of metadata or is left empty.
2233 is a dictionary of metadata or is left empty.
2234 """
2234 """
2235
2235
2236 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2236 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2237 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2237 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2238 # this field to determine what to do in filectxfn.
2238 # this field to determine what to do in filectxfn.
2239 _returnnoneformissingfiles = True
2239 _returnnoneformissingfiles = True
2240
2240
2241 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2241 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2242 date=None, extra=None, branch=None, editor=False):
2242 date=None, extra=None, branch=None, editor=False):
2243 super(memctx, self).__init__(repo, text, user, date, extra)
2243 super(memctx, self).__init__(repo, text, user, date, extra)
2244 self._rev = None
2244 self._rev = None
2245 self._node = None
2245 self._node = None
2246 parents = [(p or nullid) for p in parents]
2246 parents = [(p or nullid) for p in parents]
2247 p1, p2 = parents
2247 p1, p2 = parents
2248 self._parents = [self._repo[p] for p in (p1, p2)]
2248 self._parents = [self._repo[p] for p in (p1, p2)]
2249 files = sorted(set(files))
2249 files = sorted(set(files))
2250 self._files = files
2250 self._files = files
2251 if branch is not None:
2251 if branch is not None:
2252 self._extra['branch'] = encoding.fromlocal(branch)
2252 self._extra['branch'] = encoding.fromlocal(branch)
2253 self.substate = {}
2253 self.substate = {}
2254
2254
2255 if isinstance(filectxfn, patch.filestore):
2255 if isinstance(filectxfn, patch.filestore):
2256 filectxfn = memfilefrompatch(filectxfn)
2256 filectxfn = memfilefrompatch(filectxfn)
2257 elif not callable(filectxfn):
2257 elif not callable(filectxfn):
2258 # if store is not callable, wrap it in a function
2258 # if store is not callable, wrap it in a function
2259 filectxfn = memfilefromctx(filectxfn)
2259 filectxfn = memfilefromctx(filectxfn)
2260
2260
2261 # memoizing increases performance for e.g. vcs convert scenarios.
2261 # memoizing increases performance for e.g. vcs convert scenarios.
2262 self._filectxfn = makecachingfilectxfn(filectxfn)
2262 self._filectxfn = makecachingfilectxfn(filectxfn)
2263
2263
2264 if editor:
2264 if editor:
2265 self._text = editor(self._repo, self, [])
2265 self._text = editor(self._repo, self, [])
2266 self._repo.savecommitmessage(self._text)
2266 self._repo.savecommitmessage(self._text)
2267
2267
2268 def filectx(self, path, filelog=None):
2268 def filectx(self, path, filelog=None):
2269 """get a file context from the working directory
2269 """get a file context from the working directory
2270
2270
2271 Returns None if file doesn't exist and should be removed."""
2271 Returns None if file doesn't exist and should be removed."""
2272 return self._filectxfn(self._repo, self, path)
2272 return self._filectxfn(self._repo, self, path)
2273
2273
2274 def commit(self):
2274 def commit(self):
2275 """commit context to the repo"""
2275 """commit context to the repo"""
2276 return self._repo.commitctx(self)
2276 return self._repo.commitctx(self)
2277
2277
2278 @propertycache
2278 @propertycache
2279 def _manifest(self):
2279 def _manifest(self):
2280 """generate a manifest based on the return values of filectxfn"""
2280 """generate a manifest based on the return values of filectxfn"""
2281
2281
2282 # keep this simple for now; just worry about p1
2282 # keep this simple for now; just worry about p1
2283 pctx = self._parents[0]
2283 pctx = self._parents[0]
2284 man = pctx.manifest().copy()
2284 man = pctx.manifest().copy()
2285
2285
2286 for f in self._status.modified:
2286 for f in self._status.modified:
2287 man[f] = modifiednodeid
2287 man[f] = modifiednodeid
2288
2288
2289 for f in self._status.added:
2289 for f in self._status.added:
2290 man[f] = addednodeid
2290 man[f] = addednodeid
2291
2291
2292 for f in self._status.removed:
2292 for f in self._status.removed:
2293 if f in man:
2293 if f in man:
2294 del man[f]
2294 del man[f]
2295
2295
2296 return man
2296 return man
2297
2297
2298 @propertycache
2298 @propertycache
2299 def _status(self):
2299 def _status(self):
2300 """Calculate exact status from ``files`` specified at construction
2300 """Calculate exact status from ``files`` specified at construction
2301 """
2301 """
2302 man1 = self.p1().manifest()
2302 man1 = self.p1().manifest()
2303 p2 = self._parents[1]
2303 p2 = self._parents[1]
2304 # "1 < len(self._parents)" can't be used for checking
2304 # "1 < len(self._parents)" can't be used for checking
2305 # existence of the 2nd parent, because "memctx._parents" is
2305 # existence of the 2nd parent, because "memctx._parents" is
2306 # explicitly initialized by the list, of which length is 2.
2306 # explicitly initialized by the list, of which length is 2.
2307 if p2.node() != nullid:
2307 if p2.node() != nullid:
2308 man2 = p2.manifest()
2308 man2 = p2.manifest()
2309 managing = lambda f: f in man1 or f in man2
2309 managing = lambda f: f in man1 or f in man2
2310 else:
2310 else:
2311 managing = lambda f: f in man1
2311 managing = lambda f: f in man1
2312
2312
2313 modified, added, removed = [], [], []
2313 modified, added, removed = [], [], []
2314 for f in self._files:
2314 for f in self._files:
2315 if not managing(f):
2315 if not managing(f):
2316 added.append(f)
2316 added.append(f)
2317 elif self[f]:
2317 elif self[f]:
2318 modified.append(f)
2318 modified.append(f)
2319 else:
2319 else:
2320 removed.append(f)
2320 removed.append(f)
2321
2321
2322 return scmutil.status(modified, added, removed, [], [], [], [])
2322 return scmutil.status(modified, added, removed, [], [], [], [])
2323
2323
2324 class memfilectx(committablefilectx):
2324 class memfilectx(committablefilectx):
2325 """memfilectx represents an in-memory file to commit.
2325 """memfilectx represents an in-memory file to commit.
2326
2326
2327 See memctx and committablefilectx for more details.
2327 See memctx and committablefilectx for more details.
2328 """
2328 """
2329 def __init__(self, repo, changectx, path, data, islink=False,
2329 def __init__(self, repo, changectx, path, data, islink=False,
2330 isexec=False, copied=None):
2330 isexec=False, copied=None):
2331 """
2331 """
2332 path is the normalized file path relative to repository root.
2332 path is the normalized file path relative to repository root.
2333 data is the file content as a string.
2333 data is the file content as a string.
2334 islink is True if the file is a symbolic link.
2334 islink is True if the file is a symbolic link.
2335 isexec is True if the file is executable.
2335 isexec is True if the file is executable.
2336 copied is the source file path if current file was copied in the
2336 copied is the source file path if current file was copied in the
2337 revision being committed, or None."""
2337 revision being committed, or None."""
2338 super(memfilectx, self).__init__(repo, path, None, changectx)
2338 super(memfilectx, self).__init__(repo, path, None, changectx)
2339 self._data = data
2339 self._data = data
2340 if islink:
2340 if islink:
2341 self._flags = 'l'
2341 self._flags = 'l'
2342 elif isexec:
2342 elif isexec:
2343 self._flags = 'x'
2343 self._flags = 'x'
2344 else:
2344 else:
2345 self._flags = ''
2345 self._flags = ''
2346 self._copied = None
2346 self._copied = None
2347 if copied:
2347 if copied:
2348 self._copied = (copied, nullid)
2348 self._copied = (copied, nullid)
2349
2349
2350 def data(self):
2350 def data(self):
2351 return self._data
2351 return self._data
2352
2352
2353 def remove(self, ignoremissing=False):
2353 def remove(self, ignoremissing=False):
2354 """wraps unlink for a repo's working directory"""
2354 """wraps unlink for a repo's working directory"""
2355 # need to figure out what to do here
2355 # need to figure out what to do here
2356 del self._changectx[self._path]
2356 del self._changectx[self._path]
2357
2357
2358 def write(self, data, flags, **kwargs):
2358 def write(self, data, flags, **kwargs):
2359 """wraps repo.wwrite"""
2359 """wraps repo.wwrite"""
2360 self._data = data
2360 self._data = data
2361
2361
2362
2362
2363 class metadataonlyctx(committablectx):
2363 class metadataonlyctx(committablectx):
2364 """Like memctx but it's reusing the manifest of different commit.
2364 """Like memctx but it's reusing the manifest of different commit.
2365 Intended to be used by lightweight operations that are creating
2365 Intended to be used by lightweight operations that are creating
2366 metadata-only changes.
2366 metadata-only changes.
2367
2367
2368 Revision information is supplied at initialization time. 'repo' is the
2368 Revision information is supplied at initialization time. 'repo' is the
2369 current localrepo, 'ctx' is original revision which manifest we're reuisng
2369 current localrepo, 'ctx' is original revision which manifest we're reuisng
2370 'parents' is a sequence of two parent revisions identifiers (pass None for
2370 'parents' is a sequence of two parent revisions identifiers (pass None for
2371 every missing parent), 'text' is the commit.
2371 every missing parent), 'text' is the commit.
2372
2372
2373 user receives the committer name and defaults to current repository
2373 user receives the committer name and defaults to current repository
2374 username, date is the commit date in any format supported by
2374 username, date is the commit date in any format supported by
2375 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2375 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2376 metadata or is left empty.
2376 metadata or is left empty.
2377 """
2377 """
2378 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2378 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2379 date=None, extra=None, editor=False):
2379 date=None, extra=None, editor=False):
2380 if text is None:
2380 if text is None:
2381 text = originalctx.description()
2381 text = originalctx.description()
2382 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2382 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2383 self._rev = None
2383 self._rev = None
2384 self._node = None
2384 self._node = None
2385 self._originalctx = originalctx
2385 self._originalctx = originalctx
2386 self._manifestnode = originalctx.manifestnode()
2386 self._manifestnode = originalctx.manifestnode()
2387 if parents is None:
2387 if parents is None:
2388 parents = originalctx.parents()
2388 parents = originalctx.parents()
2389 else:
2389 else:
2390 parents = [repo[p] for p in parents if p is not None]
2390 parents = [repo[p] for p in parents if p is not None]
2391 parents = parents[:]
2391 parents = parents[:]
2392 while len(parents) < 2:
2392 while len(parents) < 2:
2393 parents.append(repo[nullid])
2393 parents.append(repo[nullid])
2394 p1, p2 = self._parents = parents
2394 p1, p2 = self._parents = parents
2395
2395
2396 # sanity check to ensure that the reused manifest parents are
2396 # sanity check to ensure that the reused manifest parents are
2397 # manifests of our commit parents
2397 # manifests of our commit parents
2398 mp1, mp2 = self.manifestctx().parents
2398 mp1, mp2 = self.manifestctx().parents
2399 if p1 != nullid and p1.manifestnode() != mp1:
2399 if p1 != nullid and p1.manifestnode() != mp1:
2400 raise RuntimeError('can\'t reuse the manifest: '
2400 raise RuntimeError('can\'t reuse the manifest: '
2401 'its p1 doesn\'t match the new ctx p1')
2401 'its p1 doesn\'t match the new ctx p1')
2402 if p2 != nullid and p2.manifestnode() != mp2:
2402 if p2 != nullid and p2.manifestnode() != mp2:
2403 raise RuntimeError('can\'t reuse the manifest: '
2403 raise RuntimeError('can\'t reuse the manifest: '
2404 'its p2 doesn\'t match the new ctx p2')
2404 'its p2 doesn\'t match the new ctx p2')
2405
2405
2406 self._files = originalctx.files()
2406 self._files = originalctx.files()
2407 self.substate = {}
2407 self.substate = {}
2408
2408
2409 if editor:
2409 if editor:
2410 self._text = editor(self._repo, self, [])
2410 self._text = editor(self._repo, self, [])
2411 self._repo.savecommitmessage(self._text)
2411 self._repo.savecommitmessage(self._text)
2412
2412
2413 def manifestnode(self):
2413 def manifestnode(self):
2414 return self._manifestnode
2414 return self._manifestnode
2415
2415
2416 @property
2416 @property
2417 def _manifestctx(self):
2417 def _manifestctx(self):
2418 return self._repo.manifestlog[self._manifestnode]
2418 return self._repo.manifestlog[self._manifestnode]
2419
2419
2420 def filectx(self, path, filelog=None):
2420 def filectx(self, path, filelog=None):
2421 return self._originalctx.filectx(path, filelog=filelog)
2421 return self._originalctx.filectx(path, filelog=filelog)
2422
2422
2423 def commit(self):
2423 def commit(self):
2424 """commit context to the repo"""
2424 """commit context to the repo"""
2425 return self._repo.commitctx(self)
2425 return self._repo.commitctx(self)
2426
2426
2427 @property
2427 @property
2428 def _manifest(self):
2428 def _manifest(self):
2429 return self._originalctx.manifest()
2429 return self._originalctx.manifest()
2430
2430
2431 @propertycache
2431 @propertycache
2432 def _status(self):
2432 def _status(self):
2433 """Calculate exact status from ``files`` specified in the ``origctx``
2433 """Calculate exact status from ``files`` specified in the ``origctx``
2434 and parents manifests.
2434 and parents manifests.
2435 """
2435 """
2436 man1 = self.p1().manifest()
2436 man1 = self.p1().manifest()
2437 p2 = self._parents[1]
2437 p2 = self._parents[1]
2438 # "1 < len(self._parents)" can't be used for checking
2438 # "1 < len(self._parents)" can't be used for checking
2439 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2439 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2440 # explicitly initialized by the list, of which length is 2.
2440 # explicitly initialized by the list, of which length is 2.
2441 if p2.node() != nullid:
2441 if p2.node() != nullid:
2442 man2 = p2.manifest()
2442 man2 = p2.manifest()
2443 managing = lambda f: f in man1 or f in man2
2443 managing = lambda f: f in man1 or f in man2
2444 else:
2444 else:
2445 managing = lambda f: f in man1
2445 managing = lambda f: f in man1
2446
2446
2447 modified, added, removed = [], [], []
2447 modified, added, removed = [], [], []
2448 for f in self._files:
2448 for f in self._files:
2449 if not managing(f):
2449 if not managing(f):
2450 added.append(f)
2450 added.append(f)
2451 elif f in self:
2451 elif f in self:
2452 modified.append(f)
2452 modified.append(f)
2453 else:
2453 else:
2454 removed.append(f)
2454 removed.append(f)
2455
2455
2456 return scmutil.status(modified, added, removed, [], [], [], [])
2456 return scmutil.status(modified, added, removed, [], [], [], [])
2457
2457
2458 class arbitraryfilectx(object):
2458 class arbitraryfilectx(object):
2459 """Allows you to use filectx-like functions on a file in an arbitrary
2459 """Allows you to use filectx-like functions on a file in an arbitrary
2460 location on disk, possibly not in the working directory.
2460 location on disk, possibly not in the working directory.
2461 """
2461 """
2462 def __init__(self, path, repo=None):
2462 def __init__(self, path, repo=None):
2463 # Repo is optional because contrib/simplemerge uses this class.
2463 # Repo is optional because contrib/simplemerge uses this class.
2464 self._repo = repo
2464 self._repo = repo
2465 self._path = path
2465 self._path = path
2466
2466
2467 def cmp(self, fctx):
2467 def cmp(self, fctx):
2468 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2468 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2469 # path if either side is a symlink.
2469 # path if either side is a symlink.
2470 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2470 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2471 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2471 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2472 # Add a fast-path for merge if both sides are disk-backed.
2472 # Add a fast-path for merge if both sides are disk-backed.
2473 # Note that filecmp uses the opposite return values (True if same)
2473 # Note that filecmp uses the opposite return values (True if same)
2474 # from our cmp functions (True if different).
2474 # from our cmp functions (True if different).
2475 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2475 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2476 return self.data() != fctx.data()
2476 return self.data() != fctx.data()
2477
2477
2478 def path(self):
2478 def path(self):
2479 return self._path
2479 return self._path
2480
2480
2481 def flags(self):
2481 def flags(self):
2482 return ''
2482 return ''
2483
2483
2484 def data(self):
2484 def data(self):
2485 return util.readfile(self._path)
2485 return util.readfile(self._path)
2486
2486
2487 def decodeddata(self):
2487 def decodeddata(self):
2488 with open(self._path, "rb") as f:
2488 with open(self._path, "rb") as f:
2489 return f.read()
2489 return f.read()
2490
2490
2491 def remove(self):
2491 def remove(self):
2492 util.unlink(self._path)
2492 util.unlink(self._path)
2493
2493
2494 def write(self, data, flags, **kwargs):
2494 def write(self, data, flags, **kwargs):
2495 assert not flags
2495 assert not flags
2496 with open(self._path, "w") as f:
2496 with open(self._path, "w") as f:
2497 f.write(data)
2497 f.write(data)
@@ -1,2941 +1,2941 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 performs various early repository loading functionality (such as
383 performs various early repository loading functionality (such as
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
385 the repository can be opened, derives a type suitable for representing
385 the repository can be opened, derives a type suitable for representing
386 that repository, and returns an instance of it.
386 that repository, and returns an instance of it.
387
387
388 The returned object conforms to the ``repository.completelocalrepository``
388 The returned object conforms to the ``repository.completelocalrepository``
389 interface.
389 interface.
390
390
391 The repository type is derived by calling a series of factory functions
391 The repository type is derived by calling a series of factory functions
392 for each aspect/interface of the final repository. These are defined by
392 for each aspect/interface of the final repository. These are defined by
393 ``REPO_INTERFACES``.
393 ``REPO_INTERFACES``.
394
394
395 Each factory function is called to produce a type implementing a specific
395 Each factory function is called to produce a type implementing a specific
396 interface. The cumulative list of returned types will be combined into a
396 interface. The cumulative list of returned types will be combined into a
397 new type and that type will be instantiated to represent the local
397 new type and that type will be instantiated to represent the local
398 repository.
398 repository.
399
399
400 The factory functions each receive various state that may be consulted
400 The factory functions each receive various state that may be consulted
401 as part of deriving a type.
401 as part of deriving a type.
402
402
403 Extensions should wrap these factory functions to customize repository type
403 Extensions should wrap these factory functions to customize repository type
404 creation. Note that an extension's wrapped function may be called even if
404 creation. Note that an extension's wrapped function may be called even if
405 that extension is not loaded for the repo being constructed. Extensions
405 that extension is not loaded for the repo being constructed. Extensions
406 should check if their ``__name__`` appears in the
406 should check if their ``__name__`` appears in the
407 ``extensionmodulenames`` set passed to the factory function and no-op if
407 ``extensionmodulenames`` set passed to the factory function and no-op if
408 not.
408 not.
409 """
409 """
410 ui = baseui.copy()
410 ui = baseui.copy()
411 # Prevent copying repo configuration.
411 # Prevent copying repo configuration.
412 ui.copy = baseui.copy
412 ui.copy = baseui.copy
413
413
414 # Working directory VFS rooted at repository root.
414 # Working directory VFS rooted at repository root.
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
416
416
417 # Main VFS for .hg/ directory.
417 # Main VFS for .hg/ directory.
418 hgpath = wdirvfs.join(b'.hg')
418 hgpath = wdirvfs.join(b'.hg')
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
420
420
421 # The .hg/ path should exist and should be a directory. All other
421 # The .hg/ path should exist and should be a directory. All other
422 # cases are errors.
422 # cases are errors.
423 if not hgvfs.isdir():
423 if not hgvfs.isdir():
424 try:
424 try:
425 hgvfs.stat()
425 hgvfs.stat()
426 except OSError as e:
426 except OSError as e:
427 if e.errno != errno.ENOENT:
427 if e.errno != errno.ENOENT:
428 raise
428 raise
429
429
430 raise error.RepoError(_(b'repository %s not found') % path)
430 raise error.RepoError(_(b'repository %s not found') % path)
431
431
432 # .hg/requires file contains a newline-delimited list of
432 # .hg/requires file contains a newline-delimited list of
433 # features/capabilities the opener (us) must have in order to use
433 # features/capabilities the opener (us) must have in order to use
434 # the repository. This file was introduced in Mercurial 0.9.2,
434 # the repository. This file was introduced in Mercurial 0.9.2,
435 # which means very old repositories may not have one. We assume
435 # which means very old repositories may not have one. We assume
436 # a missing file translates to no requirements.
436 # a missing file translates to no requirements.
437 try:
437 try:
438 requirements = set(hgvfs.read(b'requires').splitlines())
438 requirements = set(hgvfs.read(b'requires').splitlines())
439 except IOError as e:
439 except IOError as e:
440 if e.errno != errno.ENOENT:
440 if e.errno != errno.ENOENT:
441 raise
441 raise
442 requirements = set()
442 requirements = set()
443
443
444 # The .hg/hgrc file may load extensions or contain config options
444 # The .hg/hgrc file may load extensions or contain config options
445 # that influence repository construction. Attempt to load it and
445 # that influence repository construction. Attempt to load it and
446 # process any new extensions that it may have pulled in.
446 # process any new extensions that it may have pulled in.
447 try:
447 try:
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
449 # Run this before extensions.loadall() so extensions can be
449 # Run this before extensions.loadall() so extensions can be
450 # automatically enabled.
450 # automatically enabled.
451 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
451 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
452 except IOError:
452 except IOError:
453 pass
453 pass
454 else:
454 else:
455 extensions.loadall(ui)
455 extensions.loadall(ui)
456
456
457 # Set of module names of extensions loaded for this repository.
457 # Set of module names of extensions loaded for this repository.
458 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
458 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
459
459
460 supportedrequirements = gathersupportedrequirements(ui)
460 supportedrequirements = gathersupportedrequirements(ui)
461
461
462 # We first validate the requirements are known.
462 # We first validate the requirements are known.
463 ensurerequirementsrecognized(requirements, supportedrequirements)
463 ensurerequirementsrecognized(requirements, supportedrequirements)
464
464
465 # Then we validate that the known set is reasonable to use together.
465 # Then we validate that the known set is reasonable to use together.
466 ensurerequirementscompatible(ui, requirements)
466 ensurerequirementscompatible(ui, requirements)
467
467
468 # TODO there are unhandled edge cases related to opening repositories with
468 # TODO there are unhandled edge cases related to opening repositories with
469 # shared storage. If storage is shared, we should also test for requirements
469 # shared storage. If storage is shared, we should also test for requirements
470 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
470 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
471 # that repo, as that repo may load extensions needed to open it. This is a
471 # that repo, as that repo may load extensions needed to open it. This is a
472 # bit complicated because we don't want the other hgrc to overwrite settings
472 # bit complicated because we don't want the other hgrc to overwrite settings
473 # in this hgrc.
473 # in this hgrc.
474 #
474 #
475 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
475 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
476 # file when sharing repos. But if a requirement is added after the share is
476 # file when sharing repos. But if a requirement is added after the share is
477 # performed, thereby introducing a new requirement for the opener, we may
477 # performed, thereby introducing a new requirement for the opener, we may
478 # will not see that and could encounter a run-time error interacting with
478 # will not see that and could encounter a run-time error interacting with
479 # that shared store since it has an unknown-to-us requirement.
479 # that shared store since it has an unknown-to-us requirement.
480
480
481 # At this point, we know we should be capable of opening the repository.
481 # At this point, we know we should be capable of opening the repository.
482 # Now get on with doing that.
482 # Now get on with doing that.
483
483
484 features = set()
484 features = set()
485
485
486 # The "store" part of the repository holds versioned data. How it is
486 # The "store" part of the repository holds versioned data. How it is
487 # accessed is determined by various requirements. The ``shared`` or
487 # accessed is determined by various requirements. The ``shared`` or
488 # ``relshared`` requirements indicate the store lives in the path contained
488 # ``relshared`` requirements indicate the store lives in the path contained
489 # in the ``.hg/sharedpath`` file. This is an absolute path for
489 # in the ``.hg/sharedpath`` file. This is an absolute path for
490 # ``shared`` and relative to ``.hg/`` for ``relshared``.
490 # ``shared`` and relative to ``.hg/`` for ``relshared``.
491 if b'shared' in requirements or b'relshared' in requirements:
491 if b'shared' in requirements or b'relshared' in requirements:
492 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
492 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
493 if b'relshared' in requirements:
493 if b'relshared' in requirements:
494 sharedpath = hgvfs.join(sharedpath)
494 sharedpath = hgvfs.join(sharedpath)
495
495
496 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
496 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
497
497
498 if not sharedvfs.exists():
498 if not sharedvfs.exists():
499 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
499 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
500 b'directory %s') % sharedvfs.base)
500 b'directory %s') % sharedvfs.base)
501
501
502 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
502 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
503
503
504 storebasepath = sharedvfs.base
504 storebasepath = sharedvfs.base
505 cachepath = sharedvfs.join(b'cache')
505 cachepath = sharedvfs.join(b'cache')
506 else:
506 else:
507 storebasepath = hgvfs.base
507 storebasepath = hgvfs.base
508 cachepath = hgvfs.join(b'cache')
508 cachepath = hgvfs.join(b'cache')
509
509
510 # The store has changed over time and the exact layout is dictated by
510 # The store has changed over time and the exact layout is dictated by
511 # requirements. The store interface abstracts differences across all
511 # requirements. The store interface abstracts differences across all
512 # of them.
512 # of them.
513 store = makestore(requirements, storebasepath,
513 store = makestore(requirements, storebasepath,
514 lambda base: vfsmod.vfs(base, cacheaudited=True))
514 lambda base: vfsmod.vfs(base, cacheaudited=True))
515 hgvfs.createmode = store.createmode
515 hgvfs.createmode = store.createmode
516
516
517 storevfs = store.vfs
517 storevfs = store.vfs
518 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
518 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
519
519
520 # The cache vfs is used to manage cache files.
520 # The cache vfs is used to manage cache files.
521 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
521 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
522 cachevfs.createmode = store.createmode
522 cachevfs.createmode = store.createmode
523
523
524 # Now resolve the type for the repository object. We do this by repeatedly
524 # Now resolve the type for the repository object. We do this by repeatedly
525 # calling a factory function to produces types for specific aspects of the
525 # calling a factory function to produces types for specific aspects of the
526 # repo's operation. The aggregate returned types are used as base classes
526 # repo's operation. The aggregate returned types are used as base classes
527 # for a dynamically-derived type, which will represent our new repository.
527 # for a dynamically-derived type, which will represent our new repository.
528
528
529 bases = []
529 bases = []
530 extrastate = {}
530 extrastate = {}
531
531
532 for iface, fn in REPO_INTERFACES:
532 for iface, fn in REPO_INTERFACES:
533 # We pass all potentially useful state to give extensions tons of
533 # We pass all potentially useful state to give extensions tons of
534 # flexibility.
534 # flexibility.
535 typ = fn(ui=ui,
535 typ = fn(ui=ui,
536 intents=intents,
536 intents=intents,
537 requirements=requirements,
537 requirements=requirements,
538 features=features,
538 features=features,
539 wdirvfs=wdirvfs,
539 wdirvfs=wdirvfs,
540 hgvfs=hgvfs,
540 hgvfs=hgvfs,
541 store=store,
541 store=store,
542 storevfs=storevfs,
542 storevfs=storevfs,
543 storeoptions=storevfs.options,
543 storeoptions=storevfs.options,
544 cachevfs=cachevfs,
544 cachevfs=cachevfs,
545 extensionmodulenames=extensionmodulenames,
545 extensionmodulenames=extensionmodulenames,
546 extrastate=extrastate,
546 extrastate=extrastate,
547 baseclasses=bases)
547 baseclasses=bases)
548
548
549 if not isinstance(typ, type):
549 if not isinstance(typ, type):
550 raise error.ProgrammingError('unable to construct type for %s' %
550 raise error.ProgrammingError('unable to construct type for %s' %
551 iface)
551 iface)
552
552
553 bases.append(typ)
553 bases.append(typ)
554
554
555 # type() allows you to use characters in type names that wouldn't be
555 # type() allows you to use characters in type names that wouldn't be
556 # recognized as Python symbols in source code. We abuse that to add
556 # recognized as Python symbols in source code. We abuse that to add
557 # rich information about our constructed repo.
557 # rich information about our constructed repo.
558 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
558 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
559 wdirvfs.base,
559 wdirvfs.base,
560 b','.join(sorted(requirements))))
560 b','.join(sorted(requirements))))
561
561
562 cls = type(name, tuple(bases), {})
562 cls = type(name, tuple(bases), {})
563
563
564 return cls(
564 return cls(
565 baseui=baseui,
565 baseui=baseui,
566 ui=ui,
566 ui=ui,
567 origroot=path,
567 origroot=path,
568 wdirvfs=wdirvfs,
568 wdirvfs=wdirvfs,
569 hgvfs=hgvfs,
569 hgvfs=hgvfs,
570 requirements=requirements,
570 requirements=requirements,
571 supportedrequirements=supportedrequirements,
571 supportedrequirements=supportedrequirements,
572 sharedpath=storebasepath,
572 sharedpath=storebasepath,
573 store=store,
573 store=store,
574 cachevfs=cachevfs,
574 cachevfs=cachevfs,
575 features=features,
575 features=features,
576 intents=intents)
576 intents=intents)
577
577
578 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
578 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
579 """Perform additional actions after .hg/hgrc is loaded.
579 """Perform additional actions after .hg/hgrc is loaded.
580
580
581 This function is called during repository loading immediately after
581 This function is called during repository loading immediately after
582 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
582 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
583
583
584 The function can be used to validate configs, automatically add
584 The function can be used to validate configs, automatically add
585 options (including extensions) based on requirements, etc.
585 options (including extensions) based on requirements, etc.
586 """
586 """
587
587
588 # Map of requirements to list of extensions to load automatically when
588 # Map of requirements to list of extensions to load automatically when
589 # requirement is present.
589 # requirement is present.
590 autoextensions = {
590 autoextensions = {
591 b'largefiles': [b'largefiles'],
591 b'largefiles': [b'largefiles'],
592 b'lfs': [b'lfs'],
592 b'lfs': [b'lfs'],
593 }
593 }
594
594
595 for requirement, names in sorted(autoextensions.items()):
595 for requirement, names in sorted(autoextensions.items()):
596 if requirement not in requirements:
596 if requirement not in requirements:
597 continue
597 continue
598
598
599 for name in names:
599 for name in names:
600 if not ui.hasconfig(b'extensions', name):
600 if not ui.hasconfig(b'extensions', name):
601 ui.setconfig(b'extensions', name, b'', source='autoload')
601 ui.setconfig(b'extensions', name, b'', source='autoload')
602
602
603 def gathersupportedrequirements(ui):
603 def gathersupportedrequirements(ui):
604 """Determine the complete set of recognized requirements."""
604 """Determine the complete set of recognized requirements."""
605 # Start with all requirements supported by this file.
605 # Start with all requirements supported by this file.
606 supported = set(localrepository._basesupported)
606 supported = set(localrepository._basesupported)
607
607
608 # Execute ``featuresetupfuncs`` entries if they belong to an extension
608 # Execute ``featuresetupfuncs`` entries if they belong to an extension
609 # relevant to this ui instance.
609 # relevant to this ui instance.
610 modules = {m.__name__ for n, m in extensions.extensions(ui)}
610 modules = {m.__name__ for n, m in extensions.extensions(ui)}
611
611
612 for fn in featuresetupfuncs:
612 for fn in featuresetupfuncs:
613 if fn.__module__ in modules:
613 if fn.__module__ in modules:
614 fn(ui, supported)
614 fn(ui, supported)
615
615
616 # Add derived requirements from registered compression engines.
616 # Add derived requirements from registered compression engines.
617 for name in util.compengines:
617 for name in util.compengines:
618 engine = util.compengines[name]
618 engine = util.compengines[name]
619 if engine.revlogheader():
619 if engine.revlogheader():
620 supported.add(b'exp-compression-%s' % name)
620 supported.add(b'exp-compression-%s' % name)
621
621
622 return supported
622 return supported
623
623
624 def ensurerequirementsrecognized(requirements, supported):
624 def ensurerequirementsrecognized(requirements, supported):
625 """Validate that a set of local requirements is recognized.
625 """Validate that a set of local requirements is recognized.
626
626
627 Receives a set of requirements. Raises an ``error.RepoError`` if there
627 Receives a set of requirements. Raises an ``error.RepoError`` if there
628 exists any requirement in that set that currently loaded code doesn't
628 exists any requirement in that set that currently loaded code doesn't
629 recognize.
629 recognize.
630
630
631 Returns a set of supported requirements.
631 Returns a set of supported requirements.
632 """
632 """
633 missing = set()
633 missing = set()
634
634
635 for requirement in requirements:
635 for requirement in requirements:
636 if requirement in supported:
636 if requirement in supported:
637 continue
637 continue
638
638
639 if not requirement or not requirement[0:1].isalnum():
639 if not requirement or not requirement[0:1].isalnum():
640 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
640 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
641
641
642 missing.add(requirement)
642 missing.add(requirement)
643
643
644 if missing:
644 if missing:
645 raise error.RequirementError(
645 raise error.RequirementError(
646 _(b'repository requires features unknown to this Mercurial: %s') %
646 _(b'repository requires features unknown to this Mercurial: %s') %
647 b' '.join(sorted(missing)),
647 b' '.join(sorted(missing)),
648 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
648 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
649 b'for more information'))
649 b'for more information'))
650
650
651 def ensurerequirementscompatible(ui, requirements):
651 def ensurerequirementscompatible(ui, requirements):
652 """Validates that a set of recognized requirements is mutually compatible.
652 """Validates that a set of recognized requirements is mutually compatible.
653
653
654 Some requirements may not be compatible with others or require
654 Some requirements may not be compatible with others or require
655 config options that aren't enabled. This function is called during
655 config options that aren't enabled. This function is called during
656 repository opening to ensure that the set of requirements needed
656 repository opening to ensure that the set of requirements needed
657 to open a repository is sane and compatible with config options.
657 to open a repository is sane and compatible with config options.
658
658
659 Extensions can monkeypatch this function to perform additional
659 Extensions can monkeypatch this function to perform additional
660 checking.
660 checking.
661
661
662 ``error.RepoError`` should be raised on failure.
662 ``error.RepoError`` should be raised on failure.
663 """
663 """
664 if b'exp-sparse' in requirements and not sparse.enabled:
664 if b'exp-sparse' in requirements and not sparse.enabled:
665 raise error.RepoError(_(b'repository is using sparse feature but '
665 raise error.RepoError(_(b'repository is using sparse feature but '
666 b'sparse is not enabled; enable the '
666 b'sparse is not enabled; enable the '
667 b'"sparse" extensions to access'))
667 b'"sparse" extensions to access'))
668
668
669 def makestore(requirements, path, vfstype):
669 def makestore(requirements, path, vfstype):
670 """Construct a storage object for a repository."""
670 """Construct a storage object for a repository."""
671 if b'store' in requirements:
671 if b'store' in requirements:
672 if b'fncache' in requirements:
672 if b'fncache' in requirements:
673 return storemod.fncachestore(path, vfstype,
673 return storemod.fncachestore(path, vfstype,
674 b'dotencode' in requirements)
674 b'dotencode' in requirements)
675
675
676 return storemod.encodedstore(path, vfstype)
676 return storemod.encodedstore(path, vfstype)
677
677
678 return storemod.basicstore(path, vfstype)
678 return storemod.basicstore(path, vfstype)
679
679
680 def resolvestorevfsoptions(ui, requirements, features):
680 def resolvestorevfsoptions(ui, requirements, features):
681 """Resolve the options to pass to the store vfs opener.
681 """Resolve the options to pass to the store vfs opener.
682
682
683 The returned dict is used to influence behavior of the storage layer.
683 The returned dict is used to influence behavior of the storage layer.
684 """
684 """
685 options = {}
685 options = {}
686
686
687 if b'treemanifest' in requirements:
687 if b'treemanifest' in requirements:
688 options[b'treemanifest'] = True
688 options[b'treemanifest'] = True
689
689
690 # experimental config: format.manifestcachesize
690 # experimental config: format.manifestcachesize
691 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
691 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
692 if manifestcachesize is not None:
692 if manifestcachesize is not None:
693 options[b'manifestcachesize'] = manifestcachesize
693 options[b'manifestcachesize'] = manifestcachesize
694
694
695 # In the absence of another requirement superseding a revlog-related
695 # In the absence of another requirement superseding a revlog-related
696 # requirement, we have to assume the repo is using revlog version 0.
696 # requirement, we have to assume the repo is using revlog version 0.
697 # This revlog format is super old and we don't bother trying to parse
697 # This revlog format is super old and we don't bother trying to parse
698 # opener options for it because those options wouldn't do anything
698 # opener options for it because those options wouldn't do anything
699 # meaningful on such old repos.
699 # meaningful on such old repos.
700 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
700 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
701 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
701 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
702
702
703 return options
703 return options
704
704
705 def resolverevlogstorevfsoptions(ui, requirements, features):
705 def resolverevlogstorevfsoptions(ui, requirements, features):
706 """Resolve opener options specific to revlogs."""
706 """Resolve opener options specific to revlogs."""
707
707
708 options = {}
708 options = {}
709
709
710 if b'revlogv1' in requirements:
710 if b'revlogv1' in requirements:
711 options[b'revlogv1'] = True
711 options[b'revlogv1'] = True
712 if REVLOGV2_REQUIREMENT in requirements:
712 if REVLOGV2_REQUIREMENT in requirements:
713 options[b'revlogv2'] = True
713 options[b'revlogv2'] = True
714
714
715 if b'generaldelta' in requirements:
715 if b'generaldelta' in requirements:
716 options[b'generaldelta'] = True
716 options[b'generaldelta'] = True
717
717
718 # experimental config: format.chunkcachesize
718 # experimental config: format.chunkcachesize
719 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
719 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
720 if chunkcachesize is not None:
720 if chunkcachesize is not None:
721 options[b'chunkcachesize'] = chunkcachesize
721 options[b'chunkcachesize'] = chunkcachesize
722
722
723 deltabothparents = ui.configbool(b'storage',
723 deltabothparents = ui.configbool(b'storage',
724 b'revlog.optimize-delta-parent-choice')
724 b'revlog.optimize-delta-parent-choice')
725 options[b'deltabothparents'] = deltabothparents
725 options[b'deltabothparents'] = deltabothparents
726
726
727 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
727 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
728
728
729 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
729 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
730 if 0 <= chainspan:
730 if 0 <= chainspan:
731 options[b'maxdeltachainspan'] = chainspan
731 options[b'maxdeltachainspan'] = chainspan
732
732
733 mmapindexthreshold = ui.configbytes(b'experimental',
733 mmapindexthreshold = ui.configbytes(b'experimental',
734 b'mmapindexthreshold')
734 b'mmapindexthreshold')
735 if mmapindexthreshold is not None:
735 if mmapindexthreshold is not None:
736 options[b'mmapindexthreshold'] = mmapindexthreshold
736 options[b'mmapindexthreshold'] = mmapindexthreshold
737
737
738 withsparseread = ui.configbool(b'experimental', b'sparse-read')
738 withsparseread = ui.configbool(b'experimental', b'sparse-read')
739 srdensitythres = float(ui.config(b'experimental',
739 srdensitythres = float(ui.config(b'experimental',
740 b'sparse-read.density-threshold'))
740 b'sparse-read.density-threshold'))
741 srmingapsize = ui.configbytes(b'experimental',
741 srmingapsize = ui.configbytes(b'experimental',
742 b'sparse-read.min-gap-size')
742 b'sparse-read.min-gap-size')
743 options[b'with-sparse-read'] = withsparseread
743 options[b'with-sparse-read'] = withsparseread
744 options[b'sparse-read-density-threshold'] = srdensitythres
744 options[b'sparse-read-density-threshold'] = srdensitythres
745 options[b'sparse-read-min-gap-size'] = srmingapsize
745 options[b'sparse-read-min-gap-size'] = srmingapsize
746
746
747 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
747 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
748 options[b'sparse-revlog'] = sparserevlog
748 options[b'sparse-revlog'] = sparserevlog
749 if sparserevlog:
749 if sparserevlog:
750 options[b'generaldelta'] = True
750 options[b'generaldelta'] = True
751
751
752 maxchainlen = None
752 maxchainlen = None
753 if sparserevlog:
753 if sparserevlog:
754 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
754 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
755 # experimental config: format.maxchainlen
755 # experimental config: format.maxchainlen
756 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
756 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
757 if maxchainlen is not None:
757 if maxchainlen is not None:
758 options[b'maxchainlen'] = maxchainlen
758 options[b'maxchainlen'] = maxchainlen
759
759
760 for r in requirements:
760 for r in requirements:
761 if r.startswith(b'exp-compression-'):
761 if r.startswith(b'exp-compression-'):
762 options[b'compengine'] = r[len(b'exp-compression-'):]
762 options[b'compengine'] = r[len(b'exp-compression-'):]
763
763
764 if repository.NARROW_REQUIREMENT in requirements:
764 if repository.NARROW_REQUIREMENT in requirements:
765 options[b'enableellipsis'] = True
765 options[b'enableellipsis'] = True
766
766
767 return options
767 return options
768
768
769 def makemain(**kwargs):
769 def makemain(**kwargs):
770 """Produce a type conforming to ``ilocalrepositorymain``."""
770 """Produce a type conforming to ``ilocalrepositorymain``."""
771 return localrepository
771 return localrepository
772
772
773 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
773 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
774 class revlogfilestorage(object):
774 class revlogfilestorage(object):
775 """File storage when using revlogs."""
775 """File storage when using revlogs."""
776
776
777 def file(self, path):
777 def file(self, path):
778 if path[0] == b'/':
778 if path[0] == b'/':
779 path = path[1:]
779 path = path[1:]
780
780
781 return filelog.filelog(self.svfs, path)
781 return filelog.filelog(self.svfs, path)
782
782
783 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
783 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
784 class revlognarrowfilestorage(object):
784 class revlognarrowfilestorage(object):
785 """File storage when using revlogs and narrow files."""
785 """File storage when using revlogs and narrow files."""
786
786
787 def file(self, path):
787 def file(self, path):
788 if path[0] == b'/':
788 if path[0] == b'/':
789 path = path[1:]
789 path = path[1:]
790
790
791 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
791 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
792
792
793 def makefilestorage(requirements, features, **kwargs):
793 def makefilestorage(requirements, features, **kwargs):
794 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
794 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
795 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
795 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
796
796
797 if repository.NARROW_REQUIREMENT in requirements:
797 if repository.NARROW_REQUIREMENT in requirements:
798 return revlognarrowfilestorage
798 return revlognarrowfilestorage
799 else:
799 else:
800 return revlogfilestorage
800 return revlogfilestorage
801
801
802 # List of repository interfaces and factory functions for them. Each
802 # List of repository interfaces and factory functions for them. Each
803 # will be called in order during ``makelocalrepository()`` to iteratively
803 # will be called in order during ``makelocalrepository()`` to iteratively
804 # derive the final type for a local repository instance.
804 # derive the final type for a local repository instance.
805 REPO_INTERFACES = [
805 REPO_INTERFACES = [
806 (repository.ilocalrepositorymain, makemain),
806 (repository.ilocalrepositorymain, makemain),
807 (repository.ilocalrepositoryfilestorage, makefilestorage),
807 (repository.ilocalrepositoryfilestorage, makefilestorage),
808 ]
808 ]
809
809
810 @interfaceutil.implementer(repository.ilocalrepositorymain)
810 @interfaceutil.implementer(repository.ilocalrepositorymain)
811 class localrepository(object):
811 class localrepository(object):
812 """Main class for representing local repositories.
812 """Main class for representing local repositories.
813
813
814 All local repositories are instances of this class.
814 All local repositories are instances of this class.
815
815
816 Constructed on its own, instances of this class are not usable as
816 Constructed on its own, instances of this class are not usable as
817 repository objects. To obtain a usable repository object, call
817 repository objects. To obtain a usable repository object, call
818 ``hg.repository()``, ``localrepo.instance()``, or
818 ``hg.repository()``, ``localrepo.instance()``, or
819 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
819 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
820 ``instance()`` adds support for creating new repositories.
820 ``instance()`` adds support for creating new repositories.
821 ``hg.repository()`` adds more extension integration, including calling
821 ``hg.repository()`` adds more extension integration, including calling
822 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
822 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
823 used.
823 used.
824 """
824 """
825
825
826 # obsolete experimental requirements:
826 # obsolete experimental requirements:
827 # - manifestv2: An experimental new manifest format that allowed
827 # - manifestv2: An experimental new manifest format that allowed
828 # for stem compression of long paths. Experiment ended up not
828 # for stem compression of long paths. Experiment ended up not
829 # being successful (repository sizes went up due to worse delta
829 # being successful (repository sizes went up due to worse delta
830 # chains), and the code was deleted in 4.6.
830 # chains), and the code was deleted in 4.6.
831 supportedformats = {
831 supportedformats = {
832 'revlogv1',
832 'revlogv1',
833 'generaldelta',
833 'generaldelta',
834 'treemanifest',
834 'treemanifest',
835 REVLOGV2_REQUIREMENT,
835 REVLOGV2_REQUIREMENT,
836 SPARSEREVLOG_REQUIREMENT,
836 SPARSEREVLOG_REQUIREMENT,
837 }
837 }
838 _basesupported = supportedformats | {
838 _basesupported = supportedformats | {
839 'store',
839 'store',
840 'fncache',
840 'fncache',
841 'shared',
841 'shared',
842 'relshared',
842 'relshared',
843 'dotencode',
843 'dotencode',
844 'exp-sparse',
844 'exp-sparse',
845 'internal-phase'
845 'internal-phase'
846 }
846 }
847
847
848 # list of prefix for file which can be written without 'wlock'
848 # list of prefix for file which can be written without 'wlock'
849 # Extensions should extend this list when needed
849 # Extensions should extend this list when needed
850 _wlockfreeprefix = {
850 _wlockfreeprefix = {
851 # We migh consider requiring 'wlock' for the next
851 # We migh consider requiring 'wlock' for the next
852 # two, but pretty much all the existing code assume
852 # two, but pretty much all the existing code assume
853 # wlock is not needed so we keep them excluded for
853 # wlock is not needed so we keep them excluded for
854 # now.
854 # now.
855 'hgrc',
855 'hgrc',
856 'requires',
856 'requires',
857 # XXX cache is a complicatged business someone
857 # XXX cache is a complicatged business someone
858 # should investigate this in depth at some point
858 # should investigate this in depth at some point
859 'cache/',
859 'cache/',
860 # XXX shouldn't be dirstate covered by the wlock?
860 # XXX shouldn't be dirstate covered by the wlock?
861 'dirstate',
861 'dirstate',
862 # XXX bisect was still a bit too messy at the time
862 # XXX bisect was still a bit too messy at the time
863 # this changeset was introduced. Someone should fix
863 # this changeset was introduced. Someone should fix
864 # the remainig bit and drop this line
864 # the remainig bit and drop this line
865 'bisect.state',
865 'bisect.state',
866 }
866 }
867
867
868 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
868 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
869 supportedrequirements, sharedpath, store, cachevfs,
869 supportedrequirements, sharedpath, store, cachevfs,
870 features, intents=None):
870 features, intents=None):
871 """Create a new local repository instance.
871 """Create a new local repository instance.
872
872
873 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
873 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
874 or ``localrepo.makelocalrepository()`` for obtaining a new repository
874 or ``localrepo.makelocalrepository()`` for obtaining a new repository
875 object.
875 object.
876
876
877 Arguments:
877 Arguments:
878
878
879 baseui
879 baseui
880 ``ui.ui`` instance that ``ui`` argument was based off of.
880 ``ui.ui`` instance that ``ui`` argument was based off of.
881
881
882 ui
882 ui
883 ``ui.ui`` instance for use by the repository.
883 ``ui.ui`` instance for use by the repository.
884
884
885 origroot
885 origroot
886 ``bytes`` path to working directory root of this repository.
886 ``bytes`` path to working directory root of this repository.
887
887
888 wdirvfs
888 wdirvfs
889 ``vfs.vfs`` rooted at the working directory.
889 ``vfs.vfs`` rooted at the working directory.
890
890
891 hgvfs
891 hgvfs
892 ``vfs.vfs`` rooted at .hg/
892 ``vfs.vfs`` rooted at .hg/
893
893
894 requirements
894 requirements
895 ``set`` of bytestrings representing repository opening requirements.
895 ``set`` of bytestrings representing repository opening requirements.
896
896
897 supportedrequirements
897 supportedrequirements
898 ``set`` of bytestrings representing repository requirements that we
898 ``set`` of bytestrings representing repository requirements that we
899 know how to open. May be a supetset of ``requirements``.
899 know how to open. May be a supetset of ``requirements``.
900
900
901 sharedpath
901 sharedpath
902 ``bytes`` Defining path to storage base directory. Points to a
902 ``bytes`` Defining path to storage base directory. Points to a
903 ``.hg/`` directory somewhere.
903 ``.hg/`` directory somewhere.
904
904
905 store
905 store
906 ``store.basicstore`` (or derived) instance providing access to
906 ``store.basicstore`` (or derived) instance providing access to
907 versioned storage.
907 versioned storage.
908
908
909 cachevfs
909 cachevfs
910 ``vfs.vfs`` used for cache files.
910 ``vfs.vfs`` used for cache files.
911
911
912 features
912 features
913 ``set`` of bytestrings defining features/capabilities of this
913 ``set`` of bytestrings defining features/capabilities of this
914 instance.
914 instance.
915
915
916 intents
916 intents
917 ``set`` of system strings indicating what this repo will be used
917 ``set`` of system strings indicating what this repo will be used
918 for.
918 for.
919 """
919 """
920 self.baseui = baseui
920 self.baseui = baseui
921 self.ui = ui
921 self.ui = ui
922 self.origroot = origroot
922 self.origroot = origroot
923 # vfs rooted at working directory.
923 # vfs rooted at working directory.
924 self.wvfs = wdirvfs
924 self.wvfs = wdirvfs
925 self.root = wdirvfs.base
925 self.root = wdirvfs.base
926 # vfs rooted at .hg/. Used to access most non-store paths.
926 # vfs rooted at .hg/. Used to access most non-store paths.
927 self.vfs = hgvfs
927 self.vfs = hgvfs
928 self.path = hgvfs.base
928 self.path = hgvfs.base
929 self.requirements = requirements
929 self.requirements = requirements
930 self.supported = supportedrequirements
930 self.supported = supportedrequirements
931 self.sharedpath = sharedpath
931 self.sharedpath = sharedpath
932 self.store = store
932 self.store = store
933 self.cachevfs = cachevfs
933 self.cachevfs = cachevfs
934 self.features = features
934 self.features = features
935
935
936 self.filtername = None
936 self.filtername = None
937
937
938 if (self.ui.configbool('devel', 'all-warnings') or
938 if (self.ui.configbool('devel', 'all-warnings') or
939 self.ui.configbool('devel', 'check-locks')):
939 self.ui.configbool('devel', 'check-locks')):
940 self.vfs.audit = self._getvfsward(self.vfs.audit)
940 self.vfs.audit = self._getvfsward(self.vfs.audit)
941 # A list of callback to shape the phase if no data were found.
941 # A list of callback to shape the phase if no data were found.
942 # Callback are in the form: func(repo, roots) --> processed root.
942 # Callback are in the form: func(repo, roots) --> processed root.
943 # This list it to be filled by extension during repo setup
943 # This list it to be filled by extension during repo setup
944 self._phasedefaults = []
944 self._phasedefaults = []
945
945
946 color.setup(self.ui)
946 color.setup(self.ui)
947
947
948 self.spath = self.store.path
948 self.spath = self.store.path
949 self.svfs = self.store.vfs
949 self.svfs = self.store.vfs
950 self.sjoin = self.store.join
950 self.sjoin = self.store.join
951 if (self.ui.configbool('devel', 'all-warnings') or
951 if (self.ui.configbool('devel', 'all-warnings') or
952 self.ui.configbool('devel', 'check-locks')):
952 self.ui.configbool('devel', 'check-locks')):
953 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
953 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
954 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
954 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
955 else: # standard vfs
955 else: # standard vfs
956 self.svfs.audit = self._getsvfsward(self.svfs.audit)
956 self.svfs.audit = self._getsvfsward(self.svfs.audit)
957
957
958 self._dirstatevalidatewarned = False
958 self._dirstatevalidatewarned = False
959
959
960 self._branchcaches = {}
960 self._branchcaches = {}
961 self._revbranchcache = None
961 self._revbranchcache = None
962 self._filterpats = {}
962 self._filterpats = {}
963 self._datafilters = {}
963 self._datafilters = {}
964 self._transref = self._lockref = self._wlockref = None
964 self._transref = self._lockref = self._wlockref = None
965
965
966 # A cache for various files under .hg/ that tracks file changes,
966 # A cache for various files under .hg/ that tracks file changes,
967 # (used by the filecache decorator)
967 # (used by the filecache decorator)
968 #
968 #
969 # Maps a property name to its util.filecacheentry
969 # Maps a property name to its util.filecacheentry
970 self._filecache = {}
970 self._filecache = {}
971
971
972 # hold sets of revision to be filtered
972 # hold sets of revision to be filtered
973 # should be cleared when something might have changed the filter value:
973 # should be cleared when something might have changed the filter value:
974 # - new changesets,
974 # - new changesets,
975 # - phase change,
975 # - phase change,
976 # - new obsolescence marker,
976 # - new obsolescence marker,
977 # - working directory parent change,
977 # - working directory parent change,
978 # - bookmark changes
978 # - bookmark changes
979 self.filteredrevcache = {}
979 self.filteredrevcache = {}
980
980
981 # post-dirstate-status hooks
981 # post-dirstate-status hooks
982 self._postdsstatus = []
982 self._postdsstatus = []
983
983
984 # generic mapping between names and nodes
984 # generic mapping between names and nodes
985 self.names = namespaces.namespaces()
985 self.names = namespaces.namespaces()
986
986
987 # Key to signature value.
987 # Key to signature value.
988 self._sparsesignaturecache = {}
988 self._sparsesignaturecache = {}
989 # Signature to cached matcher instance.
989 # Signature to cached matcher instance.
990 self._sparsematchercache = {}
990 self._sparsematchercache = {}
991
991
992 def _getvfsward(self, origfunc):
992 def _getvfsward(self, origfunc):
993 """build a ward for self.vfs"""
993 """build a ward for self.vfs"""
994 rref = weakref.ref(self)
994 rref = weakref.ref(self)
995 def checkvfs(path, mode=None):
995 def checkvfs(path, mode=None):
996 ret = origfunc(path, mode=mode)
996 ret = origfunc(path, mode=mode)
997 repo = rref()
997 repo = rref()
998 if (repo is None
998 if (repo is None
999 or not util.safehasattr(repo, '_wlockref')
999 or not util.safehasattr(repo, '_wlockref')
1000 or not util.safehasattr(repo, '_lockref')):
1000 or not util.safehasattr(repo, '_lockref')):
1001 return
1001 return
1002 if mode in (None, 'r', 'rb'):
1002 if mode in (None, 'r', 'rb'):
1003 return
1003 return
1004 if path.startswith(repo.path):
1004 if path.startswith(repo.path):
1005 # truncate name relative to the repository (.hg)
1005 # truncate name relative to the repository (.hg)
1006 path = path[len(repo.path) + 1:]
1006 path = path[len(repo.path) + 1:]
1007 if path.startswith('cache/'):
1007 if path.startswith('cache/'):
1008 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1008 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1009 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1009 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1010 if path.startswith('journal.'):
1010 if path.startswith('journal.'):
1011 # journal is covered by 'lock'
1011 # journal is covered by 'lock'
1012 if repo._currentlock(repo._lockref) is None:
1012 if repo._currentlock(repo._lockref) is None:
1013 repo.ui.develwarn('write with no lock: "%s"' % path,
1013 repo.ui.develwarn('write with no lock: "%s"' % path,
1014 stacklevel=2, config='check-locks')
1014 stacklevel=2, config='check-locks')
1015 elif repo._currentlock(repo._wlockref) is None:
1015 elif repo._currentlock(repo._wlockref) is None:
1016 # rest of vfs files are covered by 'wlock'
1016 # rest of vfs files are covered by 'wlock'
1017 #
1017 #
1018 # exclude special files
1018 # exclude special files
1019 for prefix in self._wlockfreeprefix:
1019 for prefix in self._wlockfreeprefix:
1020 if path.startswith(prefix):
1020 if path.startswith(prefix):
1021 return
1021 return
1022 repo.ui.develwarn('write with no wlock: "%s"' % path,
1022 repo.ui.develwarn('write with no wlock: "%s"' % path,
1023 stacklevel=2, config='check-locks')
1023 stacklevel=2, config='check-locks')
1024 return ret
1024 return ret
1025 return checkvfs
1025 return checkvfs
1026
1026
1027 def _getsvfsward(self, origfunc):
1027 def _getsvfsward(self, origfunc):
1028 """build a ward for self.svfs"""
1028 """build a ward for self.svfs"""
1029 rref = weakref.ref(self)
1029 rref = weakref.ref(self)
1030 def checksvfs(path, mode=None):
1030 def checksvfs(path, mode=None):
1031 ret = origfunc(path, mode=mode)
1031 ret = origfunc(path, mode=mode)
1032 repo = rref()
1032 repo = rref()
1033 if repo is None or not util.safehasattr(repo, '_lockref'):
1033 if repo is None or not util.safehasattr(repo, '_lockref'):
1034 return
1034 return
1035 if mode in (None, 'r', 'rb'):
1035 if mode in (None, 'r', 'rb'):
1036 return
1036 return
1037 if path.startswith(repo.sharedpath):
1037 if path.startswith(repo.sharedpath):
1038 # truncate name relative to the repository (.hg)
1038 # truncate name relative to the repository (.hg)
1039 path = path[len(repo.sharedpath) + 1:]
1039 path = path[len(repo.sharedpath) + 1:]
1040 if repo._currentlock(repo._lockref) is None:
1040 if repo._currentlock(repo._lockref) is None:
1041 repo.ui.develwarn('write with no lock: "%s"' % path,
1041 repo.ui.develwarn('write with no lock: "%s"' % path,
1042 stacklevel=3)
1042 stacklevel=3)
1043 return ret
1043 return ret
1044 return checksvfs
1044 return checksvfs
1045
1045
1046 def close(self):
1046 def close(self):
1047 self._writecaches()
1047 self._writecaches()
1048
1048
1049 def _writecaches(self):
1049 def _writecaches(self):
1050 if self._revbranchcache:
1050 if self._revbranchcache:
1051 self._revbranchcache.write()
1051 self._revbranchcache.write()
1052
1052
1053 def _restrictcapabilities(self, caps):
1053 def _restrictcapabilities(self, caps):
1054 if self.ui.configbool('experimental', 'bundle2-advertise'):
1054 if self.ui.configbool('experimental', 'bundle2-advertise'):
1055 caps = set(caps)
1055 caps = set(caps)
1056 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1056 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1057 role='client'))
1057 role='client'))
1058 caps.add('bundle2=' + urlreq.quote(capsblob))
1058 caps.add('bundle2=' + urlreq.quote(capsblob))
1059 return caps
1059 return caps
1060
1060
1061 def _writerequirements(self):
1061 def _writerequirements(self):
1062 scmutil.writerequires(self.vfs, self.requirements)
1062 scmutil.writerequires(self.vfs, self.requirements)
1063
1063
1064 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1064 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1065 # self -> auditor -> self._checknested -> self
1065 # self -> auditor -> self._checknested -> self
1066
1066
1067 @property
1067 @property
1068 def auditor(self):
1068 def auditor(self):
1069 # This is only used by context.workingctx.match in order to
1069 # This is only used by context.workingctx.match in order to
1070 # detect files in subrepos.
1070 # detect files in subrepos.
1071 return pathutil.pathauditor(self.root, callback=self._checknested)
1071 return pathutil.pathauditor(self.root, callback=self._checknested)
1072
1072
1073 @property
1073 @property
1074 def nofsauditor(self):
1074 def nofsauditor(self):
1075 # This is only used by context.basectx.match in order to detect
1075 # This is only used by context.basectx.match in order to detect
1076 # files in subrepos.
1076 # files in subrepos.
1077 return pathutil.pathauditor(self.root, callback=self._checknested,
1077 return pathutil.pathauditor(self.root, callback=self._checknested,
1078 realfs=False, cached=True)
1078 realfs=False, cached=True)
1079
1079
1080 def _checknested(self, path):
1080 def _checknested(self, path):
1081 """Determine if path is a legal nested repository."""
1081 """Determine if path is a legal nested repository."""
1082 if not path.startswith(self.root):
1082 if not path.startswith(self.root):
1083 return False
1083 return False
1084 subpath = path[len(self.root) + 1:]
1084 subpath = path[len(self.root) + 1:]
1085 normsubpath = util.pconvert(subpath)
1085 normsubpath = util.pconvert(subpath)
1086
1086
1087 # XXX: Checking against the current working copy is wrong in
1087 # XXX: Checking against the current working copy is wrong in
1088 # the sense that it can reject things like
1088 # the sense that it can reject things like
1089 #
1089 #
1090 # $ hg cat -r 10 sub/x.txt
1090 # $ hg cat -r 10 sub/x.txt
1091 #
1091 #
1092 # if sub/ is no longer a subrepository in the working copy
1092 # if sub/ is no longer a subrepository in the working copy
1093 # parent revision.
1093 # parent revision.
1094 #
1094 #
1095 # However, it can of course also allow things that would have
1095 # However, it can of course also allow things that would have
1096 # been rejected before, such as the above cat command if sub/
1096 # been rejected before, such as the above cat command if sub/
1097 # is a subrepository now, but was a normal directory before.
1097 # is a subrepository now, but was a normal directory before.
1098 # The old path auditor would have rejected by mistake since it
1098 # The old path auditor would have rejected by mistake since it
1099 # panics when it sees sub/.hg/.
1099 # panics when it sees sub/.hg/.
1100 #
1100 #
1101 # All in all, checking against the working copy seems sensible
1101 # All in all, checking against the working copy seems sensible
1102 # since we want to prevent access to nested repositories on
1102 # since we want to prevent access to nested repositories on
1103 # the filesystem *now*.
1103 # the filesystem *now*.
1104 ctx = self[None]
1104 ctx = self[None]
1105 parts = util.splitpath(subpath)
1105 parts = util.splitpath(subpath)
1106 while parts:
1106 while parts:
1107 prefix = '/'.join(parts)
1107 prefix = '/'.join(parts)
1108 if prefix in ctx.substate:
1108 if prefix in ctx.substate:
1109 if prefix == normsubpath:
1109 if prefix == normsubpath:
1110 return True
1110 return True
1111 else:
1111 else:
1112 sub = ctx.sub(prefix)
1112 sub = ctx.sub(prefix)
1113 return sub.checknested(subpath[len(prefix) + 1:])
1113 return sub.checknested(subpath[len(prefix) + 1:])
1114 else:
1114 else:
1115 parts.pop()
1115 parts.pop()
1116 return False
1116 return False
1117
1117
1118 def peer(self):
1118 def peer(self):
1119 return localpeer(self) # not cached to avoid reference cycle
1119 return localpeer(self) # not cached to avoid reference cycle
1120
1120
1121 def unfiltered(self):
1121 def unfiltered(self):
1122 """Return unfiltered version of the repository
1122 """Return unfiltered version of the repository
1123
1123
1124 Intended to be overwritten by filtered repo."""
1124 Intended to be overwritten by filtered repo."""
1125 return self
1125 return self
1126
1126
1127 def filtered(self, name, visibilityexceptions=None):
1127 def filtered(self, name, visibilityexceptions=None):
1128 """Return a filtered version of a repository"""
1128 """Return a filtered version of a repository"""
1129 cls = repoview.newtype(self.unfiltered().__class__)
1129 cls = repoview.newtype(self.unfiltered().__class__)
1130 return cls(self, name, visibilityexceptions)
1130 return cls(self, name, visibilityexceptions)
1131
1131
1132 @repofilecache('bookmarks', 'bookmarks.current')
1132 @repofilecache('bookmarks', 'bookmarks.current')
1133 def _bookmarks(self):
1133 def _bookmarks(self):
1134 return bookmarks.bmstore(self)
1134 return bookmarks.bmstore(self)
1135
1135
1136 @property
1136 @property
1137 def _activebookmark(self):
1137 def _activebookmark(self):
1138 return self._bookmarks.active
1138 return self._bookmarks.active
1139
1139
1140 # _phasesets depend on changelog. what we need is to call
1140 # _phasesets depend on changelog. what we need is to call
1141 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1141 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1142 # can't be easily expressed in filecache mechanism.
1142 # can't be easily expressed in filecache mechanism.
1143 @storecache('phaseroots', '00changelog.i')
1143 @storecache('phaseroots', '00changelog.i')
1144 def _phasecache(self):
1144 def _phasecache(self):
1145 return phases.phasecache(self, self._phasedefaults)
1145 return phases.phasecache(self, self._phasedefaults)
1146
1146
1147 @storecache('obsstore')
1147 @storecache('obsstore')
1148 def obsstore(self):
1148 def obsstore(self):
1149 return obsolete.makestore(self.ui, self)
1149 return obsolete.makestore(self.ui, self)
1150
1150
1151 @storecache('00changelog.i')
1151 @storecache('00changelog.i')
1152 def changelog(self):
1152 def changelog(self):
1153 return changelog.changelog(self.svfs,
1153 return changelog.changelog(self.svfs,
1154 trypending=txnutil.mayhavepending(self.root))
1154 trypending=txnutil.mayhavepending(self.root))
1155
1155
1156 @storecache('00manifest.i')
1156 @storecache('00manifest.i')
1157 def manifestlog(self):
1157 def manifestlog(self):
1158 rootstore = manifest.manifestrevlog(self.svfs)
1158 rootstore = manifest.manifestrevlog(self.svfs)
1159 return manifest.manifestlog(self.svfs, self, rootstore)
1159 return manifest.manifestlog(self.svfs, self, rootstore)
1160
1160
1161 @repofilecache('dirstate')
1161 @repofilecache('dirstate')
1162 def dirstate(self):
1162 def dirstate(self):
1163 return self._makedirstate()
1163 return self._makedirstate()
1164
1164
1165 def _makedirstate(self):
1165 def _makedirstate(self):
1166 """Extension point for wrapping the dirstate per-repo."""
1166 """Extension point for wrapping the dirstate per-repo."""
1167 sparsematchfn = lambda: sparse.matcher(self)
1167 sparsematchfn = lambda: sparse.matcher(self)
1168
1168
1169 return dirstate.dirstate(self.vfs, self.ui, self.root,
1169 return dirstate.dirstate(self.vfs, self.ui, self.root,
1170 self._dirstatevalidate, sparsematchfn)
1170 self._dirstatevalidate, sparsematchfn)
1171
1171
1172 def _dirstatevalidate(self, node):
1172 def _dirstatevalidate(self, node):
1173 try:
1173 try:
1174 self.changelog.rev(node)
1174 self.changelog.rev(node)
1175 return node
1175 return node
1176 except error.LookupError:
1176 except error.LookupError:
1177 if not self._dirstatevalidatewarned:
1177 if not self._dirstatevalidatewarned:
1178 self._dirstatevalidatewarned = True
1178 self._dirstatevalidatewarned = True
1179 self.ui.warn(_("warning: ignoring unknown"
1179 self.ui.warn(_("warning: ignoring unknown"
1180 " working parent %s!\n") % short(node))
1180 " working parent %s!\n") % short(node))
1181 return nullid
1181 return nullid
1182
1182
1183 @storecache(narrowspec.FILENAME)
1183 @storecache(narrowspec.FILENAME)
1184 def narrowpats(self):
1184 def narrowpats(self):
1185 """matcher patterns for this repository's narrowspec
1185 """matcher patterns for this repository's narrowspec
1186
1186
1187 A tuple of (includes, excludes).
1187 A tuple of (includes, excludes).
1188 """
1188 """
1189 return narrowspec.load(self)
1189 return narrowspec.load(self)
1190
1190
1191 @storecache(narrowspec.FILENAME)
1191 @storecache(narrowspec.FILENAME)
1192 def _narrowmatch(self):
1192 def _narrowmatch(self):
1193 if repository.NARROW_REQUIREMENT not in self.requirements:
1193 if repository.NARROW_REQUIREMENT not in self.requirements:
1194 return matchmod.always(self.root, '')
1194 return matchmod.always(self.root, '')
1195 include, exclude = self.narrowpats
1195 include, exclude = self.narrowpats
1196 return narrowspec.match(self.root, include=include, exclude=exclude)
1196 return narrowspec.match(self.root, include=include, exclude=exclude)
1197
1197
1198 # TODO(martinvonz): make this property-like instead?
1198 # TODO(martinvonz): make this property-like instead?
1199 def narrowmatch(self):
1199 def narrowmatch(self):
1200 return self._narrowmatch
1200 return self._narrowmatch
1201
1201
1202 def setnarrowpats(self, newincludes, newexcludes):
1202 def setnarrowpats(self, newincludes, newexcludes):
1203 narrowspec.save(self, newincludes, newexcludes)
1203 narrowspec.save(self, newincludes, newexcludes)
1204 self.invalidate(clearfilecache=True)
1204 self.invalidate(clearfilecache=True)
1205
1205
1206 def __getitem__(self, changeid):
1206 def __getitem__(self, changeid):
1207 if changeid is None:
1207 if changeid is None:
1208 return context.workingctx(self)
1208 return context.workingctx(self)
1209 if isinstance(changeid, context.basectx):
1209 if isinstance(changeid, context.basectx):
1210 return changeid
1210 return changeid
1211 if isinstance(changeid, slice):
1211 if isinstance(changeid, slice):
1212 # wdirrev isn't contiguous so the slice shouldn't include it
1212 # wdirrev isn't contiguous so the slice shouldn't include it
1213 return [context.changectx(self, i)
1213 return [self[i]
1214 for i in pycompat.xrange(*changeid.indices(len(self)))
1214 for i in pycompat.xrange(*changeid.indices(len(self)))
1215 if i not in self.changelog.filteredrevs]
1215 if i not in self.changelog.filteredrevs]
1216 try:
1216 try:
1217 return context.changectx(self, changeid)
1217 return context.changectx(self, changeid)
1218 except error.WdirUnsupported:
1218 except error.WdirUnsupported:
1219 return context.workingctx(self)
1219 return context.workingctx(self)
1220
1220
1221 def __contains__(self, changeid):
1221 def __contains__(self, changeid):
1222 """True if the given changeid exists
1222 """True if the given changeid exists
1223
1223
1224 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1224 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1225 specified.
1225 specified.
1226 """
1226 """
1227 try:
1227 try:
1228 self[changeid]
1228 self[changeid]
1229 return True
1229 return True
1230 except error.RepoLookupError:
1230 except error.RepoLookupError:
1231 return False
1231 return False
1232
1232
1233 def __nonzero__(self):
1233 def __nonzero__(self):
1234 return True
1234 return True
1235
1235
1236 __bool__ = __nonzero__
1236 __bool__ = __nonzero__
1237
1237
1238 def __len__(self):
1238 def __len__(self):
1239 # no need to pay the cost of repoview.changelog
1239 # no need to pay the cost of repoview.changelog
1240 unfi = self.unfiltered()
1240 unfi = self.unfiltered()
1241 return len(unfi.changelog)
1241 return len(unfi.changelog)
1242
1242
1243 def __iter__(self):
1243 def __iter__(self):
1244 return iter(self.changelog)
1244 return iter(self.changelog)
1245
1245
1246 def revs(self, expr, *args):
1246 def revs(self, expr, *args):
1247 '''Find revisions matching a revset.
1247 '''Find revisions matching a revset.
1248
1248
1249 The revset is specified as a string ``expr`` that may contain
1249 The revset is specified as a string ``expr`` that may contain
1250 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1250 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1251
1251
1252 Revset aliases from the configuration are not expanded. To expand
1252 Revset aliases from the configuration are not expanded. To expand
1253 user aliases, consider calling ``scmutil.revrange()`` or
1253 user aliases, consider calling ``scmutil.revrange()`` or
1254 ``repo.anyrevs([expr], user=True)``.
1254 ``repo.anyrevs([expr], user=True)``.
1255
1255
1256 Returns a revset.abstractsmartset, which is a list-like interface
1256 Returns a revset.abstractsmartset, which is a list-like interface
1257 that contains integer revisions.
1257 that contains integer revisions.
1258 '''
1258 '''
1259 expr = revsetlang.formatspec(expr, *args)
1259 expr = revsetlang.formatspec(expr, *args)
1260 m = revset.match(None, expr)
1260 m = revset.match(None, expr)
1261 return m(self)
1261 return m(self)
1262
1262
1263 def set(self, expr, *args):
1263 def set(self, expr, *args):
1264 '''Find revisions matching a revset and emit changectx instances.
1264 '''Find revisions matching a revset and emit changectx instances.
1265
1265
1266 This is a convenience wrapper around ``revs()`` that iterates the
1266 This is a convenience wrapper around ``revs()`` that iterates the
1267 result and is a generator of changectx instances.
1267 result and is a generator of changectx instances.
1268
1268
1269 Revset aliases from the configuration are not expanded. To expand
1269 Revset aliases from the configuration are not expanded. To expand
1270 user aliases, consider calling ``scmutil.revrange()``.
1270 user aliases, consider calling ``scmutil.revrange()``.
1271 '''
1271 '''
1272 for r in self.revs(expr, *args):
1272 for r in self.revs(expr, *args):
1273 yield self[r]
1273 yield self[r]
1274
1274
1275 def anyrevs(self, specs, user=False, localalias=None):
1275 def anyrevs(self, specs, user=False, localalias=None):
1276 '''Find revisions matching one of the given revsets.
1276 '''Find revisions matching one of the given revsets.
1277
1277
1278 Revset aliases from the configuration are not expanded by default. To
1278 Revset aliases from the configuration are not expanded by default. To
1279 expand user aliases, specify ``user=True``. To provide some local
1279 expand user aliases, specify ``user=True``. To provide some local
1280 definitions overriding user aliases, set ``localalias`` to
1280 definitions overriding user aliases, set ``localalias`` to
1281 ``{name: definitionstring}``.
1281 ``{name: definitionstring}``.
1282 '''
1282 '''
1283 if user:
1283 if user:
1284 m = revset.matchany(self.ui, specs,
1284 m = revset.matchany(self.ui, specs,
1285 lookup=revset.lookupfn(self),
1285 lookup=revset.lookupfn(self),
1286 localalias=localalias)
1286 localalias=localalias)
1287 else:
1287 else:
1288 m = revset.matchany(None, specs, localalias=localalias)
1288 m = revset.matchany(None, specs, localalias=localalias)
1289 return m(self)
1289 return m(self)
1290
1290
1291 def url(self):
1291 def url(self):
1292 return 'file:' + self.root
1292 return 'file:' + self.root
1293
1293
1294 def hook(self, name, throw=False, **args):
1294 def hook(self, name, throw=False, **args):
1295 """Call a hook, passing this repo instance.
1295 """Call a hook, passing this repo instance.
1296
1296
1297 This a convenience method to aid invoking hooks. Extensions likely
1297 This a convenience method to aid invoking hooks. Extensions likely
1298 won't call this unless they have registered a custom hook or are
1298 won't call this unless they have registered a custom hook or are
1299 replacing code that is expected to call a hook.
1299 replacing code that is expected to call a hook.
1300 """
1300 """
1301 return hook.hook(self.ui, self, name, throw, **args)
1301 return hook.hook(self.ui, self, name, throw, **args)
1302
1302
1303 @filteredpropertycache
1303 @filteredpropertycache
1304 def _tagscache(self):
1304 def _tagscache(self):
1305 '''Returns a tagscache object that contains various tags related
1305 '''Returns a tagscache object that contains various tags related
1306 caches.'''
1306 caches.'''
1307
1307
1308 # This simplifies its cache management by having one decorated
1308 # This simplifies its cache management by having one decorated
1309 # function (this one) and the rest simply fetch things from it.
1309 # function (this one) and the rest simply fetch things from it.
1310 class tagscache(object):
1310 class tagscache(object):
1311 def __init__(self):
1311 def __init__(self):
1312 # These two define the set of tags for this repository. tags
1312 # These two define the set of tags for this repository. tags
1313 # maps tag name to node; tagtypes maps tag name to 'global' or
1313 # maps tag name to node; tagtypes maps tag name to 'global' or
1314 # 'local'. (Global tags are defined by .hgtags across all
1314 # 'local'. (Global tags are defined by .hgtags across all
1315 # heads, and local tags are defined in .hg/localtags.)
1315 # heads, and local tags are defined in .hg/localtags.)
1316 # They constitute the in-memory cache of tags.
1316 # They constitute the in-memory cache of tags.
1317 self.tags = self.tagtypes = None
1317 self.tags = self.tagtypes = None
1318
1318
1319 self.nodetagscache = self.tagslist = None
1319 self.nodetagscache = self.tagslist = None
1320
1320
1321 cache = tagscache()
1321 cache = tagscache()
1322 cache.tags, cache.tagtypes = self._findtags()
1322 cache.tags, cache.tagtypes = self._findtags()
1323
1323
1324 return cache
1324 return cache
1325
1325
1326 def tags(self):
1326 def tags(self):
1327 '''return a mapping of tag to node'''
1327 '''return a mapping of tag to node'''
1328 t = {}
1328 t = {}
1329 if self.changelog.filteredrevs:
1329 if self.changelog.filteredrevs:
1330 tags, tt = self._findtags()
1330 tags, tt = self._findtags()
1331 else:
1331 else:
1332 tags = self._tagscache.tags
1332 tags = self._tagscache.tags
1333 for k, v in tags.iteritems():
1333 for k, v in tags.iteritems():
1334 try:
1334 try:
1335 # ignore tags to unknown nodes
1335 # ignore tags to unknown nodes
1336 self.changelog.rev(v)
1336 self.changelog.rev(v)
1337 t[k] = v
1337 t[k] = v
1338 except (error.LookupError, ValueError):
1338 except (error.LookupError, ValueError):
1339 pass
1339 pass
1340 return t
1340 return t
1341
1341
1342 def _findtags(self):
1342 def _findtags(self):
1343 '''Do the hard work of finding tags. Return a pair of dicts
1343 '''Do the hard work of finding tags. Return a pair of dicts
1344 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1344 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1345 maps tag name to a string like \'global\' or \'local\'.
1345 maps tag name to a string like \'global\' or \'local\'.
1346 Subclasses or extensions are free to add their own tags, but
1346 Subclasses or extensions are free to add their own tags, but
1347 should be aware that the returned dicts will be retained for the
1347 should be aware that the returned dicts will be retained for the
1348 duration of the localrepo object.'''
1348 duration of the localrepo object.'''
1349
1349
1350 # XXX what tagtype should subclasses/extensions use? Currently
1350 # XXX what tagtype should subclasses/extensions use? Currently
1351 # mq and bookmarks add tags, but do not set the tagtype at all.
1351 # mq and bookmarks add tags, but do not set the tagtype at all.
1352 # Should each extension invent its own tag type? Should there
1352 # Should each extension invent its own tag type? Should there
1353 # be one tagtype for all such "virtual" tags? Or is the status
1353 # be one tagtype for all such "virtual" tags? Or is the status
1354 # quo fine?
1354 # quo fine?
1355
1355
1356
1356
1357 # map tag name to (node, hist)
1357 # map tag name to (node, hist)
1358 alltags = tagsmod.findglobaltags(self.ui, self)
1358 alltags = tagsmod.findglobaltags(self.ui, self)
1359 # map tag name to tag type
1359 # map tag name to tag type
1360 tagtypes = dict((tag, 'global') for tag in alltags)
1360 tagtypes = dict((tag, 'global') for tag in alltags)
1361
1361
1362 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1362 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1363
1363
1364 # Build the return dicts. Have to re-encode tag names because
1364 # Build the return dicts. Have to re-encode tag names because
1365 # the tags module always uses UTF-8 (in order not to lose info
1365 # the tags module always uses UTF-8 (in order not to lose info
1366 # writing to the cache), but the rest of Mercurial wants them in
1366 # writing to the cache), but the rest of Mercurial wants them in
1367 # local encoding.
1367 # local encoding.
1368 tags = {}
1368 tags = {}
1369 for (name, (node, hist)) in alltags.iteritems():
1369 for (name, (node, hist)) in alltags.iteritems():
1370 if node != nullid:
1370 if node != nullid:
1371 tags[encoding.tolocal(name)] = node
1371 tags[encoding.tolocal(name)] = node
1372 tags['tip'] = self.changelog.tip()
1372 tags['tip'] = self.changelog.tip()
1373 tagtypes = dict([(encoding.tolocal(name), value)
1373 tagtypes = dict([(encoding.tolocal(name), value)
1374 for (name, value) in tagtypes.iteritems()])
1374 for (name, value) in tagtypes.iteritems()])
1375 return (tags, tagtypes)
1375 return (tags, tagtypes)
1376
1376
1377 def tagtype(self, tagname):
1377 def tagtype(self, tagname):
1378 '''
1378 '''
1379 return the type of the given tag. result can be:
1379 return the type of the given tag. result can be:
1380
1380
1381 'local' : a local tag
1381 'local' : a local tag
1382 'global' : a global tag
1382 'global' : a global tag
1383 None : tag does not exist
1383 None : tag does not exist
1384 '''
1384 '''
1385
1385
1386 return self._tagscache.tagtypes.get(tagname)
1386 return self._tagscache.tagtypes.get(tagname)
1387
1387
1388 def tagslist(self):
1388 def tagslist(self):
1389 '''return a list of tags ordered by revision'''
1389 '''return a list of tags ordered by revision'''
1390 if not self._tagscache.tagslist:
1390 if not self._tagscache.tagslist:
1391 l = []
1391 l = []
1392 for t, n in self.tags().iteritems():
1392 for t, n in self.tags().iteritems():
1393 l.append((self.changelog.rev(n), t, n))
1393 l.append((self.changelog.rev(n), t, n))
1394 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1394 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1395
1395
1396 return self._tagscache.tagslist
1396 return self._tagscache.tagslist
1397
1397
1398 def nodetags(self, node):
1398 def nodetags(self, node):
1399 '''return the tags associated with a node'''
1399 '''return the tags associated with a node'''
1400 if not self._tagscache.nodetagscache:
1400 if not self._tagscache.nodetagscache:
1401 nodetagscache = {}
1401 nodetagscache = {}
1402 for t, n in self._tagscache.tags.iteritems():
1402 for t, n in self._tagscache.tags.iteritems():
1403 nodetagscache.setdefault(n, []).append(t)
1403 nodetagscache.setdefault(n, []).append(t)
1404 for tags in nodetagscache.itervalues():
1404 for tags in nodetagscache.itervalues():
1405 tags.sort()
1405 tags.sort()
1406 self._tagscache.nodetagscache = nodetagscache
1406 self._tagscache.nodetagscache = nodetagscache
1407 return self._tagscache.nodetagscache.get(node, [])
1407 return self._tagscache.nodetagscache.get(node, [])
1408
1408
1409 def nodebookmarks(self, node):
1409 def nodebookmarks(self, node):
1410 """return the list of bookmarks pointing to the specified node"""
1410 """return the list of bookmarks pointing to the specified node"""
1411 return self._bookmarks.names(node)
1411 return self._bookmarks.names(node)
1412
1412
1413 def branchmap(self):
1413 def branchmap(self):
1414 '''returns a dictionary {branch: [branchheads]} with branchheads
1414 '''returns a dictionary {branch: [branchheads]} with branchheads
1415 ordered by increasing revision number'''
1415 ordered by increasing revision number'''
1416 branchmap.updatecache(self)
1416 branchmap.updatecache(self)
1417 return self._branchcaches[self.filtername]
1417 return self._branchcaches[self.filtername]
1418
1418
1419 @unfilteredmethod
1419 @unfilteredmethod
1420 def revbranchcache(self):
1420 def revbranchcache(self):
1421 if not self._revbranchcache:
1421 if not self._revbranchcache:
1422 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1422 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1423 return self._revbranchcache
1423 return self._revbranchcache
1424
1424
1425 def branchtip(self, branch, ignoremissing=False):
1425 def branchtip(self, branch, ignoremissing=False):
1426 '''return the tip node for a given branch
1426 '''return the tip node for a given branch
1427
1427
1428 If ignoremissing is True, then this method will not raise an error.
1428 If ignoremissing is True, then this method will not raise an error.
1429 This is helpful for callers that only expect None for a missing branch
1429 This is helpful for callers that only expect None for a missing branch
1430 (e.g. namespace).
1430 (e.g. namespace).
1431
1431
1432 '''
1432 '''
1433 try:
1433 try:
1434 return self.branchmap().branchtip(branch)
1434 return self.branchmap().branchtip(branch)
1435 except KeyError:
1435 except KeyError:
1436 if not ignoremissing:
1436 if not ignoremissing:
1437 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1437 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1438 else:
1438 else:
1439 pass
1439 pass
1440
1440
1441 def lookup(self, key):
1441 def lookup(self, key):
1442 return scmutil.revsymbol(self, key).node()
1442 return scmutil.revsymbol(self, key).node()
1443
1443
1444 def lookupbranch(self, key):
1444 def lookupbranch(self, key):
1445 if key in self.branchmap():
1445 if key in self.branchmap():
1446 return key
1446 return key
1447
1447
1448 return scmutil.revsymbol(self, key).branch()
1448 return scmutil.revsymbol(self, key).branch()
1449
1449
1450 def known(self, nodes):
1450 def known(self, nodes):
1451 cl = self.changelog
1451 cl = self.changelog
1452 nm = cl.nodemap
1452 nm = cl.nodemap
1453 filtered = cl.filteredrevs
1453 filtered = cl.filteredrevs
1454 result = []
1454 result = []
1455 for n in nodes:
1455 for n in nodes:
1456 r = nm.get(n)
1456 r = nm.get(n)
1457 resp = not (r is None or r in filtered)
1457 resp = not (r is None or r in filtered)
1458 result.append(resp)
1458 result.append(resp)
1459 return result
1459 return result
1460
1460
1461 def local(self):
1461 def local(self):
1462 return self
1462 return self
1463
1463
1464 def publishing(self):
1464 def publishing(self):
1465 # it's safe (and desirable) to trust the publish flag unconditionally
1465 # it's safe (and desirable) to trust the publish flag unconditionally
1466 # so that we don't finalize changes shared between users via ssh or nfs
1466 # so that we don't finalize changes shared between users via ssh or nfs
1467 return self.ui.configbool('phases', 'publish', untrusted=True)
1467 return self.ui.configbool('phases', 'publish', untrusted=True)
1468
1468
1469 def cancopy(self):
1469 def cancopy(self):
1470 # so statichttprepo's override of local() works
1470 # so statichttprepo's override of local() works
1471 if not self.local():
1471 if not self.local():
1472 return False
1472 return False
1473 if not self.publishing():
1473 if not self.publishing():
1474 return True
1474 return True
1475 # if publishing we can't copy if there is filtered content
1475 # if publishing we can't copy if there is filtered content
1476 return not self.filtered('visible').changelog.filteredrevs
1476 return not self.filtered('visible').changelog.filteredrevs
1477
1477
1478 def shared(self):
1478 def shared(self):
1479 '''the type of shared repository (None if not shared)'''
1479 '''the type of shared repository (None if not shared)'''
1480 if self.sharedpath != self.path:
1480 if self.sharedpath != self.path:
1481 return 'store'
1481 return 'store'
1482 return None
1482 return None
1483
1483
1484 def wjoin(self, f, *insidef):
1484 def wjoin(self, f, *insidef):
1485 return self.vfs.reljoin(self.root, f, *insidef)
1485 return self.vfs.reljoin(self.root, f, *insidef)
1486
1486
1487 def setparents(self, p1, p2=nullid):
1487 def setparents(self, p1, p2=nullid):
1488 with self.dirstate.parentchange():
1488 with self.dirstate.parentchange():
1489 copies = self.dirstate.setparents(p1, p2)
1489 copies = self.dirstate.setparents(p1, p2)
1490 pctx = self[p1]
1490 pctx = self[p1]
1491 if copies:
1491 if copies:
1492 # Adjust copy records, the dirstate cannot do it, it
1492 # Adjust copy records, the dirstate cannot do it, it
1493 # requires access to parents manifests. Preserve them
1493 # requires access to parents manifests. Preserve them
1494 # only for entries added to first parent.
1494 # only for entries added to first parent.
1495 for f in copies:
1495 for f in copies:
1496 if f not in pctx and copies[f] in pctx:
1496 if f not in pctx and copies[f] in pctx:
1497 self.dirstate.copy(copies[f], f)
1497 self.dirstate.copy(copies[f], f)
1498 if p2 == nullid:
1498 if p2 == nullid:
1499 for f, s in sorted(self.dirstate.copies().items()):
1499 for f, s in sorted(self.dirstate.copies().items()):
1500 if f not in pctx and s not in pctx:
1500 if f not in pctx and s not in pctx:
1501 self.dirstate.copy(None, f)
1501 self.dirstate.copy(None, f)
1502
1502
1503 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1503 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1504 """changeid can be a changeset revision, node, or tag.
1504 """changeid can be a changeset revision, node, or tag.
1505 fileid can be a file revision or node."""
1505 fileid can be a file revision or node."""
1506 return context.filectx(self, path, changeid, fileid,
1506 return context.filectx(self, path, changeid, fileid,
1507 changectx=changectx)
1507 changectx=changectx)
1508
1508
1509 def getcwd(self):
1509 def getcwd(self):
1510 return self.dirstate.getcwd()
1510 return self.dirstate.getcwd()
1511
1511
1512 def pathto(self, f, cwd=None):
1512 def pathto(self, f, cwd=None):
1513 return self.dirstate.pathto(f, cwd)
1513 return self.dirstate.pathto(f, cwd)
1514
1514
1515 def _loadfilter(self, filter):
1515 def _loadfilter(self, filter):
1516 if filter not in self._filterpats:
1516 if filter not in self._filterpats:
1517 l = []
1517 l = []
1518 for pat, cmd in self.ui.configitems(filter):
1518 for pat, cmd in self.ui.configitems(filter):
1519 if cmd == '!':
1519 if cmd == '!':
1520 continue
1520 continue
1521 mf = matchmod.match(self.root, '', [pat])
1521 mf = matchmod.match(self.root, '', [pat])
1522 fn = None
1522 fn = None
1523 params = cmd
1523 params = cmd
1524 for name, filterfn in self._datafilters.iteritems():
1524 for name, filterfn in self._datafilters.iteritems():
1525 if cmd.startswith(name):
1525 if cmd.startswith(name):
1526 fn = filterfn
1526 fn = filterfn
1527 params = cmd[len(name):].lstrip()
1527 params = cmd[len(name):].lstrip()
1528 break
1528 break
1529 if not fn:
1529 if not fn:
1530 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1530 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1531 # Wrap old filters not supporting keyword arguments
1531 # Wrap old filters not supporting keyword arguments
1532 if not pycompat.getargspec(fn)[2]:
1532 if not pycompat.getargspec(fn)[2]:
1533 oldfn = fn
1533 oldfn = fn
1534 fn = lambda s, c, **kwargs: oldfn(s, c)
1534 fn = lambda s, c, **kwargs: oldfn(s, c)
1535 l.append((mf, fn, params))
1535 l.append((mf, fn, params))
1536 self._filterpats[filter] = l
1536 self._filterpats[filter] = l
1537 return self._filterpats[filter]
1537 return self._filterpats[filter]
1538
1538
1539 def _filter(self, filterpats, filename, data):
1539 def _filter(self, filterpats, filename, data):
1540 for mf, fn, cmd in filterpats:
1540 for mf, fn, cmd in filterpats:
1541 if mf(filename):
1541 if mf(filename):
1542 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1542 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1543 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1543 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1544 break
1544 break
1545
1545
1546 return data
1546 return data
1547
1547
1548 @unfilteredpropertycache
1548 @unfilteredpropertycache
1549 def _encodefilterpats(self):
1549 def _encodefilterpats(self):
1550 return self._loadfilter('encode')
1550 return self._loadfilter('encode')
1551
1551
1552 @unfilteredpropertycache
1552 @unfilteredpropertycache
1553 def _decodefilterpats(self):
1553 def _decodefilterpats(self):
1554 return self._loadfilter('decode')
1554 return self._loadfilter('decode')
1555
1555
1556 def adddatafilter(self, name, filter):
1556 def adddatafilter(self, name, filter):
1557 self._datafilters[name] = filter
1557 self._datafilters[name] = filter
1558
1558
1559 def wread(self, filename):
1559 def wread(self, filename):
1560 if self.wvfs.islink(filename):
1560 if self.wvfs.islink(filename):
1561 data = self.wvfs.readlink(filename)
1561 data = self.wvfs.readlink(filename)
1562 else:
1562 else:
1563 data = self.wvfs.read(filename)
1563 data = self.wvfs.read(filename)
1564 return self._filter(self._encodefilterpats, filename, data)
1564 return self._filter(self._encodefilterpats, filename, data)
1565
1565
1566 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1566 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1567 """write ``data`` into ``filename`` in the working directory
1567 """write ``data`` into ``filename`` in the working directory
1568
1568
1569 This returns length of written (maybe decoded) data.
1569 This returns length of written (maybe decoded) data.
1570 """
1570 """
1571 data = self._filter(self._decodefilterpats, filename, data)
1571 data = self._filter(self._decodefilterpats, filename, data)
1572 if 'l' in flags:
1572 if 'l' in flags:
1573 self.wvfs.symlink(data, filename)
1573 self.wvfs.symlink(data, filename)
1574 else:
1574 else:
1575 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1575 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1576 **kwargs)
1576 **kwargs)
1577 if 'x' in flags:
1577 if 'x' in flags:
1578 self.wvfs.setflags(filename, False, True)
1578 self.wvfs.setflags(filename, False, True)
1579 else:
1579 else:
1580 self.wvfs.setflags(filename, False, False)
1580 self.wvfs.setflags(filename, False, False)
1581 return len(data)
1581 return len(data)
1582
1582
1583 def wwritedata(self, filename, data):
1583 def wwritedata(self, filename, data):
1584 return self._filter(self._decodefilterpats, filename, data)
1584 return self._filter(self._decodefilterpats, filename, data)
1585
1585
1586 def currenttransaction(self):
1586 def currenttransaction(self):
1587 """return the current transaction or None if non exists"""
1587 """return the current transaction or None if non exists"""
1588 if self._transref:
1588 if self._transref:
1589 tr = self._transref()
1589 tr = self._transref()
1590 else:
1590 else:
1591 tr = None
1591 tr = None
1592
1592
1593 if tr and tr.running():
1593 if tr and tr.running():
1594 return tr
1594 return tr
1595 return None
1595 return None
1596
1596
1597 def transaction(self, desc, report=None):
1597 def transaction(self, desc, report=None):
1598 if (self.ui.configbool('devel', 'all-warnings')
1598 if (self.ui.configbool('devel', 'all-warnings')
1599 or self.ui.configbool('devel', 'check-locks')):
1599 or self.ui.configbool('devel', 'check-locks')):
1600 if self._currentlock(self._lockref) is None:
1600 if self._currentlock(self._lockref) is None:
1601 raise error.ProgrammingError('transaction requires locking')
1601 raise error.ProgrammingError('transaction requires locking')
1602 tr = self.currenttransaction()
1602 tr = self.currenttransaction()
1603 if tr is not None:
1603 if tr is not None:
1604 return tr.nest(name=desc)
1604 return tr.nest(name=desc)
1605
1605
1606 # abort here if the journal already exists
1606 # abort here if the journal already exists
1607 if self.svfs.exists("journal"):
1607 if self.svfs.exists("journal"):
1608 raise error.RepoError(
1608 raise error.RepoError(
1609 _("abandoned transaction found"),
1609 _("abandoned transaction found"),
1610 hint=_("run 'hg recover' to clean up transaction"))
1610 hint=_("run 'hg recover' to clean up transaction"))
1611
1611
1612 idbase = "%.40f#%f" % (random.random(), time.time())
1612 idbase = "%.40f#%f" % (random.random(), time.time())
1613 ha = hex(hashlib.sha1(idbase).digest())
1613 ha = hex(hashlib.sha1(idbase).digest())
1614 txnid = 'TXN:' + ha
1614 txnid = 'TXN:' + ha
1615 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1615 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1616
1616
1617 self._writejournal(desc)
1617 self._writejournal(desc)
1618 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1618 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1619 if report:
1619 if report:
1620 rp = report
1620 rp = report
1621 else:
1621 else:
1622 rp = self.ui.warn
1622 rp = self.ui.warn
1623 vfsmap = {'plain': self.vfs} # root of .hg/
1623 vfsmap = {'plain': self.vfs} # root of .hg/
1624 # we must avoid cyclic reference between repo and transaction.
1624 # we must avoid cyclic reference between repo and transaction.
1625 reporef = weakref.ref(self)
1625 reporef = weakref.ref(self)
1626 # Code to track tag movement
1626 # Code to track tag movement
1627 #
1627 #
1628 # Since tags are all handled as file content, it is actually quite hard
1628 # Since tags are all handled as file content, it is actually quite hard
1629 # to track these movement from a code perspective. So we fallback to a
1629 # to track these movement from a code perspective. So we fallback to a
1630 # tracking at the repository level. One could envision to track changes
1630 # tracking at the repository level. One could envision to track changes
1631 # to the '.hgtags' file through changegroup apply but that fails to
1631 # to the '.hgtags' file through changegroup apply but that fails to
1632 # cope with case where transaction expose new heads without changegroup
1632 # cope with case where transaction expose new heads without changegroup
1633 # being involved (eg: phase movement).
1633 # being involved (eg: phase movement).
1634 #
1634 #
1635 # For now, We gate the feature behind a flag since this likely comes
1635 # For now, We gate the feature behind a flag since this likely comes
1636 # with performance impacts. The current code run more often than needed
1636 # with performance impacts. The current code run more often than needed
1637 # and do not use caches as much as it could. The current focus is on
1637 # and do not use caches as much as it could. The current focus is on
1638 # the behavior of the feature so we disable it by default. The flag
1638 # the behavior of the feature so we disable it by default. The flag
1639 # will be removed when we are happy with the performance impact.
1639 # will be removed when we are happy with the performance impact.
1640 #
1640 #
1641 # Once this feature is no longer experimental move the following
1641 # Once this feature is no longer experimental move the following
1642 # documentation to the appropriate help section:
1642 # documentation to the appropriate help section:
1643 #
1643 #
1644 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1644 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1645 # tags (new or changed or deleted tags). In addition the details of
1645 # tags (new or changed or deleted tags). In addition the details of
1646 # these changes are made available in a file at:
1646 # these changes are made available in a file at:
1647 # ``REPOROOT/.hg/changes/tags.changes``.
1647 # ``REPOROOT/.hg/changes/tags.changes``.
1648 # Make sure you check for HG_TAG_MOVED before reading that file as it
1648 # Make sure you check for HG_TAG_MOVED before reading that file as it
1649 # might exist from a previous transaction even if no tag were touched
1649 # might exist from a previous transaction even if no tag were touched
1650 # in this one. Changes are recorded in a line base format::
1650 # in this one. Changes are recorded in a line base format::
1651 #
1651 #
1652 # <action> <hex-node> <tag-name>\n
1652 # <action> <hex-node> <tag-name>\n
1653 #
1653 #
1654 # Actions are defined as follow:
1654 # Actions are defined as follow:
1655 # "-R": tag is removed,
1655 # "-R": tag is removed,
1656 # "+A": tag is added,
1656 # "+A": tag is added,
1657 # "-M": tag is moved (old value),
1657 # "-M": tag is moved (old value),
1658 # "+M": tag is moved (new value),
1658 # "+M": tag is moved (new value),
1659 tracktags = lambda x: None
1659 tracktags = lambda x: None
1660 # experimental config: experimental.hook-track-tags
1660 # experimental config: experimental.hook-track-tags
1661 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1661 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1662 if desc != 'strip' and shouldtracktags:
1662 if desc != 'strip' and shouldtracktags:
1663 oldheads = self.changelog.headrevs()
1663 oldheads = self.changelog.headrevs()
1664 def tracktags(tr2):
1664 def tracktags(tr2):
1665 repo = reporef()
1665 repo = reporef()
1666 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1666 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1667 newheads = repo.changelog.headrevs()
1667 newheads = repo.changelog.headrevs()
1668 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1668 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1669 # notes: we compare lists here.
1669 # notes: we compare lists here.
1670 # As we do it only once buiding set would not be cheaper
1670 # As we do it only once buiding set would not be cheaper
1671 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1671 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1672 if changes:
1672 if changes:
1673 tr2.hookargs['tag_moved'] = '1'
1673 tr2.hookargs['tag_moved'] = '1'
1674 with repo.vfs('changes/tags.changes', 'w',
1674 with repo.vfs('changes/tags.changes', 'w',
1675 atomictemp=True) as changesfile:
1675 atomictemp=True) as changesfile:
1676 # note: we do not register the file to the transaction
1676 # note: we do not register the file to the transaction
1677 # because we needs it to still exist on the transaction
1677 # because we needs it to still exist on the transaction
1678 # is close (for txnclose hooks)
1678 # is close (for txnclose hooks)
1679 tagsmod.writediff(changesfile, changes)
1679 tagsmod.writediff(changesfile, changes)
1680 def validate(tr2):
1680 def validate(tr2):
1681 """will run pre-closing hooks"""
1681 """will run pre-closing hooks"""
1682 # XXX the transaction API is a bit lacking here so we take a hacky
1682 # XXX the transaction API is a bit lacking here so we take a hacky
1683 # path for now
1683 # path for now
1684 #
1684 #
1685 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1685 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1686 # dict is copied before these run. In addition we needs the data
1686 # dict is copied before these run. In addition we needs the data
1687 # available to in memory hooks too.
1687 # available to in memory hooks too.
1688 #
1688 #
1689 # Moreover, we also need to make sure this runs before txnclose
1689 # Moreover, we also need to make sure this runs before txnclose
1690 # hooks and there is no "pending" mechanism that would execute
1690 # hooks and there is no "pending" mechanism that would execute
1691 # logic only if hooks are about to run.
1691 # logic only if hooks are about to run.
1692 #
1692 #
1693 # Fixing this limitation of the transaction is also needed to track
1693 # Fixing this limitation of the transaction is also needed to track
1694 # other families of changes (bookmarks, phases, obsolescence).
1694 # other families of changes (bookmarks, phases, obsolescence).
1695 #
1695 #
1696 # This will have to be fixed before we remove the experimental
1696 # This will have to be fixed before we remove the experimental
1697 # gating.
1697 # gating.
1698 tracktags(tr2)
1698 tracktags(tr2)
1699 repo = reporef()
1699 repo = reporef()
1700 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1700 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1701 scmutil.enforcesinglehead(repo, tr2, desc)
1701 scmutil.enforcesinglehead(repo, tr2, desc)
1702 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1702 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1703 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1703 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1704 args = tr.hookargs.copy()
1704 args = tr.hookargs.copy()
1705 args.update(bookmarks.preparehookargs(name, old, new))
1705 args.update(bookmarks.preparehookargs(name, old, new))
1706 repo.hook('pretxnclose-bookmark', throw=True,
1706 repo.hook('pretxnclose-bookmark', throw=True,
1707 txnname=desc,
1707 txnname=desc,
1708 **pycompat.strkwargs(args))
1708 **pycompat.strkwargs(args))
1709 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1709 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1710 cl = repo.unfiltered().changelog
1710 cl = repo.unfiltered().changelog
1711 for rev, (old, new) in tr.changes['phases'].items():
1711 for rev, (old, new) in tr.changes['phases'].items():
1712 args = tr.hookargs.copy()
1712 args = tr.hookargs.copy()
1713 node = hex(cl.node(rev))
1713 node = hex(cl.node(rev))
1714 args.update(phases.preparehookargs(node, old, new))
1714 args.update(phases.preparehookargs(node, old, new))
1715 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1715 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1716 **pycompat.strkwargs(args))
1716 **pycompat.strkwargs(args))
1717
1717
1718 repo.hook('pretxnclose', throw=True,
1718 repo.hook('pretxnclose', throw=True,
1719 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1719 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1720 def releasefn(tr, success):
1720 def releasefn(tr, success):
1721 repo = reporef()
1721 repo = reporef()
1722 if success:
1722 if success:
1723 # this should be explicitly invoked here, because
1723 # this should be explicitly invoked here, because
1724 # in-memory changes aren't written out at closing
1724 # in-memory changes aren't written out at closing
1725 # transaction, if tr.addfilegenerator (via
1725 # transaction, if tr.addfilegenerator (via
1726 # dirstate.write or so) isn't invoked while
1726 # dirstate.write or so) isn't invoked while
1727 # transaction running
1727 # transaction running
1728 repo.dirstate.write(None)
1728 repo.dirstate.write(None)
1729 else:
1729 else:
1730 # discard all changes (including ones already written
1730 # discard all changes (including ones already written
1731 # out) in this transaction
1731 # out) in this transaction
1732 narrowspec.restorebackup(self, 'journal.narrowspec')
1732 narrowspec.restorebackup(self, 'journal.narrowspec')
1733 repo.dirstate.restorebackup(None, 'journal.dirstate')
1733 repo.dirstate.restorebackup(None, 'journal.dirstate')
1734
1734
1735 repo.invalidate(clearfilecache=True)
1735 repo.invalidate(clearfilecache=True)
1736
1736
1737 tr = transaction.transaction(rp, self.svfs, vfsmap,
1737 tr = transaction.transaction(rp, self.svfs, vfsmap,
1738 "journal",
1738 "journal",
1739 "undo",
1739 "undo",
1740 aftertrans(renames),
1740 aftertrans(renames),
1741 self.store.createmode,
1741 self.store.createmode,
1742 validator=validate,
1742 validator=validate,
1743 releasefn=releasefn,
1743 releasefn=releasefn,
1744 checkambigfiles=_cachedfiles,
1744 checkambigfiles=_cachedfiles,
1745 name=desc)
1745 name=desc)
1746 tr.changes['origrepolen'] = len(self)
1746 tr.changes['origrepolen'] = len(self)
1747 tr.changes['obsmarkers'] = set()
1747 tr.changes['obsmarkers'] = set()
1748 tr.changes['phases'] = {}
1748 tr.changes['phases'] = {}
1749 tr.changes['bookmarks'] = {}
1749 tr.changes['bookmarks'] = {}
1750
1750
1751 tr.hookargs['txnid'] = txnid
1751 tr.hookargs['txnid'] = txnid
1752 # note: writing the fncache only during finalize mean that the file is
1752 # note: writing the fncache only during finalize mean that the file is
1753 # outdated when running hooks. As fncache is used for streaming clone,
1753 # outdated when running hooks. As fncache is used for streaming clone,
1754 # this is not expected to break anything that happen during the hooks.
1754 # this is not expected to break anything that happen during the hooks.
1755 tr.addfinalize('flush-fncache', self.store.write)
1755 tr.addfinalize('flush-fncache', self.store.write)
1756 def txnclosehook(tr2):
1756 def txnclosehook(tr2):
1757 """To be run if transaction is successful, will schedule a hook run
1757 """To be run if transaction is successful, will schedule a hook run
1758 """
1758 """
1759 # Don't reference tr2 in hook() so we don't hold a reference.
1759 # Don't reference tr2 in hook() so we don't hold a reference.
1760 # This reduces memory consumption when there are multiple
1760 # This reduces memory consumption when there are multiple
1761 # transactions per lock. This can likely go away if issue5045
1761 # transactions per lock. This can likely go away if issue5045
1762 # fixes the function accumulation.
1762 # fixes the function accumulation.
1763 hookargs = tr2.hookargs
1763 hookargs = tr2.hookargs
1764
1764
1765 def hookfunc():
1765 def hookfunc():
1766 repo = reporef()
1766 repo = reporef()
1767 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1767 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1768 bmchanges = sorted(tr.changes['bookmarks'].items())
1768 bmchanges = sorted(tr.changes['bookmarks'].items())
1769 for name, (old, new) in bmchanges:
1769 for name, (old, new) in bmchanges:
1770 args = tr.hookargs.copy()
1770 args = tr.hookargs.copy()
1771 args.update(bookmarks.preparehookargs(name, old, new))
1771 args.update(bookmarks.preparehookargs(name, old, new))
1772 repo.hook('txnclose-bookmark', throw=False,
1772 repo.hook('txnclose-bookmark', throw=False,
1773 txnname=desc, **pycompat.strkwargs(args))
1773 txnname=desc, **pycompat.strkwargs(args))
1774
1774
1775 if hook.hashook(repo.ui, 'txnclose-phase'):
1775 if hook.hashook(repo.ui, 'txnclose-phase'):
1776 cl = repo.unfiltered().changelog
1776 cl = repo.unfiltered().changelog
1777 phasemv = sorted(tr.changes['phases'].items())
1777 phasemv = sorted(tr.changes['phases'].items())
1778 for rev, (old, new) in phasemv:
1778 for rev, (old, new) in phasemv:
1779 args = tr.hookargs.copy()
1779 args = tr.hookargs.copy()
1780 node = hex(cl.node(rev))
1780 node = hex(cl.node(rev))
1781 args.update(phases.preparehookargs(node, old, new))
1781 args.update(phases.preparehookargs(node, old, new))
1782 repo.hook('txnclose-phase', throw=False, txnname=desc,
1782 repo.hook('txnclose-phase', throw=False, txnname=desc,
1783 **pycompat.strkwargs(args))
1783 **pycompat.strkwargs(args))
1784
1784
1785 repo.hook('txnclose', throw=False, txnname=desc,
1785 repo.hook('txnclose', throw=False, txnname=desc,
1786 **pycompat.strkwargs(hookargs))
1786 **pycompat.strkwargs(hookargs))
1787 reporef()._afterlock(hookfunc)
1787 reporef()._afterlock(hookfunc)
1788 tr.addfinalize('txnclose-hook', txnclosehook)
1788 tr.addfinalize('txnclose-hook', txnclosehook)
1789 # Include a leading "-" to make it happen before the transaction summary
1789 # Include a leading "-" to make it happen before the transaction summary
1790 # reports registered via scmutil.registersummarycallback() whose names
1790 # reports registered via scmutil.registersummarycallback() whose names
1791 # are 00-txnreport etc. That way, the caches will be warm when the
1791 # are 00-txnreport etc. That way, the caches will be warm when the
1792 # callbacks run.
1792 # callbacks run.
1793 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1793 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1794 def txnaborthook(tr2):
1794 def txnaborthook(tr2):
1795 """To be run if transaction is aborted
1795 """To be run if transaction is aborted
1796 """
1796 """
1797 reporef().hook('txnabort', throw=False, txnname=desc,
1797 reporef().hook('txnabort', throw=False, txnname=desc,
1798 **pycompat.strkwargs(tr2.hookargs))
1798 **pycompat.strkwargs(tr2.hookargs))
1799 tr.addabort('txnabort-hook', txnaborthook)
1799 tr.addabort('txnabort-hook', txnaborthook)
1800 # avoid eager cache invalidation. in-memory data should be identical
1800 # avoid eager cache invalidation. in-memory data should be identical
1801 # to stored data if transaction has no error.
1801 # to stored data if transaction has no error.
1802 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1802 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1803 self._transref = weakref.ref(tr)
1803 self._transref = weakref.ref(tr)
1804 scmutil.registersummarycallback(self, tr, desc)
1804 scmutil.registersummarycallback(self, tr, desc)
1805 return tr
1805 return tr
1806
1806
1807 def _journalfiles(self):
1807 def _journalfiles(self):
1808 return ((self.svfs, 'journal'),
1808 return ((self.svfs, 'journal'),
1809 (self.vfs, 'journal.dirstate'),
1809 (self.vfs, 'journal.dirstate'),
1810 (self.vfs, 'journal.branch'),
1810 (self.vfs, 'journal.branch'),
1811 (self.vfs, 'journal.desc'),
1811 (self.vfs, 'journal.desc'),
1812 (self.vfs, 'journal.bookmarks'),
1812 (self.vfs, 'journal.bookmarks'),
1813 (self.svfs, 'journal.phaseroots'))
1813 (self.svfs, 'journal.phaseroots'))
1814
1814
1815 def undofiles(self):
1815 def undofiles(self):
1816 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1816 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1817
1817
1818 @unfilteredmethod
1818 @unfilteredmethod
1819 def _writejournal(self, desc):
1819 def _writejournal(self, desc):
1820 self.dirstate.savebackup(None, 'journal.dirstate')
1820 self.dirstate.savebackup(None, 'journal.dirstate')
1821 narrowspec.savebackup(self, 'journal.narrowspec')
1821 narrowspec.savebackup(self, 'journal.narrowspec')
1822 self.vfs.write("journal.branch",
1822 self.vfs.write("journal.branch",
1823 encoding.fromlocal(self.dirstate.branch()))
1823 encoding.fromlocal(self.dirstate.branch()))
1824 self.vfs.write("journal.desc",
1824 self.vfs.write("journal.desc",
1825 "%d\n%s\n" % (len(self), desc))
1825 "%d\n%s\n" % (len(self), desc))
1826 self.vfs.write("journal.bookmarks",
1826 self.vfs.write("journal.bookmarks",
1827 self.vfs.tryread("bookmarks"))
1827 self.vfs.tryread("bookmarks"))
1828 self.svfs.write("journal.phaseroots",
1828 self.svfs.write("journal.phaseroots",
1829 self.svfs.tryread("phaseroots"))
1829 self.svfs.tryread("phaseroots"))
1830
1830
1831 def recover(self):
1831 def recover(self):
1832 with self.lock():
1832 with self.lock():
1833 if self.svfs.exists("journal"):
1833 if self.svfs.exists("journal"):
1834 self.ui.status(_("rolling back interrupted transaction\n"))
1834 self.ui.status(_("rolling back interrupted transaction\n"))
1835 vfsmap = {'': self.svfs,
1835 vfsmap = {'': self.svfs,
1836 'plain': self.vfs,}
1836 'plain': self.vfs,}
1837 transaction.rollback(self.svfs, vfsmap, "journal",
1837 transaction.rollback(self.svfs, vfsmap, "journal",
1838 self.ui.warn,
1838 self.ui.warn,
1839 checkambigfiles=_cachedfiles)
1839 checkambigfiles=_cachedfiles)
1840 self.invalidate()
1840 self.invalidate()
1841 return True
1841 return True
1842 else:
1842 else:
1843 self.ui.warn(_("no interrupted transaction available\n"))
1843 self.ui.warn(_("no interrupted transaction available\n"))
1844 return False
1844 return False
1845
1845
1846 def rollback(self, dryrun=False, force=False):
1846 def rollback(self, dryrun=False, force=False):
1847 wlock = lock = dsguard = None
1847 wlock = lock = dsguard = None
1848 try:
1848 try:
1849 wlock = self.wlock()
1849 wlock = self.wlock()
1850 lock = self.lock()
1850 lock = self.lock()
1851 if self.svfs.exists("undo"):
1851 if self.svfs.exists("undo"):
1852 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1852 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1853
1853
1854 return self._rollback(dryrun, force, dsguard)
1854 return self._rollback(dryrun, force, dsguard)
1855 else:
1855 else:
1856 self.ui.warn(_("no rollback information available\n"))
1856 self.ui.warn(_("no rollback information available\n"))
1857 return 1
1857 return 1
1858 finally:
1858 finally:
1859 release(dsguard, lock, wlock)
1859 release(dsguard, lock, wlock)
1860
1860
1861 @unfilteredmethod # Until we get smarter cache management
1861 @unfilteredmethod # Until we get smarter cache management
1862 def _rollback(self, dryrun, force, dsguard):
1862 def _rollback(self, dryrun, force, dsguard):
1863 ui = self.ui
1863 ui = self.ui
1864 try:
1864 try:
1865 args = self.vfs.read('undo.desc').splitlines()
1865 args = self.vfs.read('undo.desc').splitlines()
1866 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1866 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1867 if len(args) >= 3:
1867 if len(args) >= 3:
1868 detail = args[2]
1868 detail = args[2]
1869 oldtip = oldlen - 1
1869 oldtip = oldlen - 1
1870
1870
1871 if detail and ui.verbose:
1871 if detail and ui.verbose:
1872 msg = (_('repository tip rolled back to revision %d'
1872 msg = (_('repository tip rolled back to revision %d'
1873 ' (undo %s: %s)\n')
1873 ' (undo %s: %s)\n')
1874 % (oldtip, desc, detail))
1874 % (oldtip, desc, detail))
1875 else:
1875 else:
1876 msg = (_('repository tip rolled back to revision %d'
1876 msg = (_('repository tip rolled back to revision %d'
1877 ' (undo %s)\n')
1877 ' (undo %s)\n')
1878 % (oldtip, desc))
1878 % (oldtip, desc))
1879 except IOError:
1879 except IOError:
1880 msg = _('rolling back unknown transaction\n')
1880 msg = _('rolling back unknown transaction\n')
1881 desc = None
1881 desc = None
1882
1882
1883 if not force and self['.'] != self['tip'] and desc == 'commit':
1883 if not force and self['.'] != self['tip'] and desc == 'commit':
1884 raise error.Abort(
1884 raise error.Abort(
1885 _('rollback of last commit while not checked out '
1885 _('rollback of last commit while not checked out '
1886 'may lose data'), hint=_('use -f to force'))
1886 'may lose data'), hint=_('use -f to force'))
1887
1887
1888 ui.status(msg)
1888 ui.status(msg)
1889 if dryrun:
1889 if dryrun:
1890 return 0
1890 return 0
1891
1891
1892 parents = self.dirstate.parents()
1892 parents = self.dirstate.parents()
1893 self.destroying()
1893 self.destroying()
1894 vfsmap = {'plain': self.vfs, '': self.svfs}
1894 vfsmap = {'plain': self.vfs, '': self.svfs}
1895 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1895 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1896 checkambigfiles=_cachedfiles)
1896 checkambigfiles=_cachedfiles)
1897 if self.vfs.exists('undo.bookmarks'):
1897 if self.vfs.exists('undo.bookmarks'):
1898 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1898 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1899 if self.svfs.exists('undo.phaseroots'):
1899 if self.svfs.exists('undo.phaseroots'):
1900 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1900 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1901 self.invalidate()
1901 self.invalidate()
1902
1902
1903 parentgone = (parents[0] not in self.changelog.nodemap or
1903 parentgone = (parents[0] not in self.changelog.nodemap or
1904 parents[1] not in self.changelog.nodemap)
1904 parents[1] not in self.changelog.nodemap)
1905 if parentgone:
1905 if parentgone:
1906 # prevent dirstateguard from overwriting already restored one
1906 # prevent dirstateguard from overwriting already restored one
1907 dsguard.close()
1907 dsguard.close()
1908
1908
1909 narrowspec.restorebackup(self, 'undo.narrowspec')
1909 narrowspec.restorebackup(self, 'undo.narrowspec')
1910 self.dirstate.restorebackup(None, 'undo.dirstate')
1910 self.dirstate.restorebackup(None, 'undo.dirstate')
1911 try:
1911 try:
1912 branch = self.vfs.read('undo.branch')
1912 branch = self.vfs.read('undo.branch')
1913 self.dirstate.setbranch(encoding.tolocal(branch))
1913 self.dirstate.setbranch(encoding.tolocal(branch))
1914 except IOError:
1914 except IOError:
1915 ui.warn(_('named branch could not be reset: '
1915 ui.warn(_('named branch could not be reset: '
1916 'current branch is still \'%s\'\n')
1916 'current branch is still \'%s\'\n')
1917 % self.dirstate.branch())
1917 % self.dirstate.branch())
1918
1918
1919 parents = tuple([p.rev() for p in self[None].parents()])
1919 parents = tuple([p.rev() for p in self[None].parents()])
1920 if len(parents) > 1:
1920 if len(parents) > 1:
1921 ui.status(_('working directory now based on '
1921 ui.status(_('working directory now based on '
1922 'revisions %d and %d\n') % parents)
1922 'revisions %d and %d\n') % parents)
1923 else:
1923 else:
1924 ui.status(_('working directory now based on '
1924 ui.status(_('working directory now based on '
1925 'revision %d\n') % parents)
1925 'revision %d\n') % parents)
1926 mergemod.mergestate.clean(self, self['.'].node())
1926 mergemod.mergestate.clean(self, self['.'].node())
1927
1927
1928 # TODO: if we know which new heads may result from this rollback, pass
1928 # TODO: if we know which new heads may result from this rollback, pass
1929 # them to destroy(), which will prevent the branchhead cache from being
1929 # them to destroy(), which will prevent the branchhead cache from being
1930 # invalidated.
1930 # invalidated.
1931 self.destroyed()
1931 self.destroyed()
1932 return 0
1932 return 0
1933
1933
1934 def _buildcacheupdater(self, newtransaction):
1934 def _buildcacheupdater(self, newtransaction):
1935 """called during transaction to build the callback updating cache
1935 """called during transaction to build the callback updating cache
1936
1936
1937 Lives on the repository to help extension who might want to augment
1937 Lives on the repository to help extension who might want to augment
1938 this logic. For this purpose, the created transaction is passed to the
1938 this logic. For this purpose, the created transaction is passed to the
1939 method.
1939 method.
1940 """
1940 """
1941 # we must avoid cyclic reference between repo and transaction.
1941 # we must avoid cyclic reference between repo and transaction.
1942 reporef = weakref.ref(self)
1942 reporef = weakref.ref(self)
1943 def updater(tr):
1943 def updater(tr):
1944 repo = reporef()
1944 repo = reporef()
1945 repo.updatecaches(tr)
1945 repo.updatecaches(tr)
1946 return updater
1946 return updater
1947
1947
1948 @unfilteredmethod
1948 @unfilteredmethod
1949 def updatecaches(self, tr=None, full=False):
1949 def updatecaches(self, tr=None, full=False):
1950 """warm appropriate caches
1950 """warm appropriate caches
1951
1951
1952 If this function is called after a transaction closed. The transaction
1952 If this function is called after a transaction closed. The transaction
1953 will be available in the 'tr' argument. This can be used to selectively
1953 will be available in the 'tr' argument. This can be used to selectively
1954 update caches relevant to the changes in that transaction.
1954 update caches relevant to the changes in that transaction.
1955
1955
1956 If 'full' is set, make sure all caches the function knows about have
1956 If 'full' is set, make sure all caches the function knows about have
1957 up-to-date data. Even the ones usually loaded more lazily.
1957 up-to-date data. Even the ones usually loaded more lazily.
1958 """
1958 """
1959 if tr is not None and tr.hookargs.get('source') == 'strip':
1959 if tr is not None and tr.hookargs.get('source') == 'strip':
1960 # During strip, many caches are invalid but
1960 # During strip, many caches are invalid but
1961 # later call to `destroyed` will refresh them.
1961 # later call to `destroyed` will refresh them.
1962 return
1962 return
1963
1963
1964 if tr is None or tr.changes['origrepolen'] < len(self):
1964 if tr is None or tr.changes['origrepolen'] < len(self):
1965 # updating the unfiltered branchmap should refresh all the others,
1965 # updating the unfiltered branchmap should refresh all the others,
1966 self.ui.debug('updating the branch cache\n')
1966 self.ui.debug('updating the branch cache\n')
1967 branchmap.updatecache(self.filtered('served'))
1967 branchmap.updatecache(self.filtered('served'))
1968
1968
1969 if full:
1969 if full:
1970 rbc = self.revbranchcache()
1970 rbc = self.revbranchcache()
1971 for r in self.changelog:
1971 for r in self.changelog:
1972 rbc.branchinfo(r)
1972 rbc.branchinfo(r)
1973 rbc.write()
1973 rbc.write()
1974
1974
1975 # ensure the working copy parents are in the manifestfulltextcache
1975 # ensure the working copy parents are in the manifestfulltextcache
1976 for ctx in self['.'].parents():
1976 for ctx in self['.'].parents():
1977 ctx.manifest() # accessing the manifest is enough
1977 ctx.manifest() # accessing the manifest is enough
1978
1978
1979 def invalidatecaches(self):
1979 def invalidatecaches(self):
1980
1980
1981 if '_tagscache' in vars(self):
1981 if '_tagscache' in vars(self):
1982 # can't use delattr on proxy
1982 # can't use delattr on proxy
1983 del self.__dict__['_tagscache']
1983 del self.__dict__['_tagscache']
1984
1984
1985 self.unfiltered()._branchcaches.clear()
1985 self.unfiltered()._branchcaches.clear()
1986 self.invalidatevolatilesets()
1986 self.invalidatevolatilesets()
1987 self._sparsesignaturecache.clear()
1987 self._sparsesignaturecache.clear()
1988
1988
1989 def invalidatevolatilesets(self):
1989 def invalidatevolatilesets(self):
1990 self.filteredrevcache.clear()
1990 self.filteredrevcache.clear()
1991 obsolete.clearobscaches(self)
1991 obsolete.clearobscaches(self)
1992
1992
1993 def invalidatedirstate(self):
1993 def invalidatedirstate(self):
1994 '''Invalidates the dirstate, causing the next call to dirstate
1994 '''Invalidates the dirstate, causing the next call to dirstate
1995 to check if it was modified since the last time it was read,
1995 to check if it was modified since the last time it was read,
1996 rereading it if it has.
1996 rereading it if it has.
1997
1997
1998 This is different to dirstate.invalidate() that it doesn't always
1998 This is different to dirstate.invalidate() that it doesn't always
1999 rereads the dirstate. Use dirstate.invalidate() if you want to
1999 rereads the dirstate. Use dirstate.invalidate() if you want to
2000 explicitly read the dirstate again (i.e. restoring it to a previous
2000 explicitly read the dirstate again (i.e. restoring it to a previous
2001 known good state).'''
2001 known good state).'''
2002 if hasunfilteredcache(self, 'dirstate'):
2002 if hasunfilteredcache(self, 'dirstate'):
2003 for k in self.dirstate._filecache:
2003 for k in self.dirstate._filecache:
2004 try:
2004 try:
2005 delattr(self.dirstate, k)
2005 delattr(self.dirstate, k)
2006 except AttributeError:
2006 except AttributeError:
2007 pass
2007 pass
2008 delattr(self.unfiltered(), 'dirstate')
2008 delattr(self.unfiltered(), 'dirstate')
2009
2009
2010 def invalidate(self, clearfilecache=False):
2010 def invalidate(self, clearfilecache=False):
2011 '''Invalidates both store and non-store parts other than dirstate
2011 '''Invalidates both store and non-store parts other than dirstate
2012
2012
2013 If a transaction is running, invalidation of store is omitted,
2013 If a transaction is running, invalidation of store is omitted,
2014 because discarding in-memory changes might cause inconsistency
2014 because discarding in-memory changes might cause inconsistency
2015 (e.g. incomplete fncache causes unintentional failure, but
2015 (e.g. incomplete fncache causes unintentional failure, but
2016 redundant one doesn't).
2016 redundant one doesn't).
2017 '''
2017 '''
2018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2019 for k in list(self._filecache.keys()):
2019 for k in list(self._filecache.keys()):
2020 # dirstate is invalidated separately in invalidatedirstate()
2020 # dirstate is invalidated separately in invalidatedirstate()
2021 if k == 'dirstate':
2021 if k == 'dirstate':
2022 continue
2022 continue
2023 if (k == 'changelog' and
2023 if (k == 'changelog' and
2024 self.currenttransaction() and
2024 self.currenttransaction() and
2025 self.changelog._delayed):
2025 self.changelog._delayed):
2026 # The changelog object may store unwritten revisions. We don't
2026 # The changelog object may store unwritten revisions. We don't
2027 # want to lose them.
2027 # want to lose them.
2028 # TODO: Solve the problem instead of working around it.
2028 # TODO: Solve the problem instead of working around it.
2029 continue
2029 continue
2030
2030
2031 if clearfilecache:
2031 if clearfilecache:
2032 del self._filecache[k]
2032 del self._filecache[k]
2033 try:
2033 try:
2034 delattr(unfiltered, k)
2034 delattr(unfiltered, k)
2035 except AttributeError:
2035 except AttributeError:
2036 pass
2036 pass
2037 self.invalidatecaches()
2037 self.invalidatecaches()
2038 if not self.currenttransaction():
2038 if not self.currenttransaction():
2039 # TODO: Changing contents of store outside transaction
2039 # TODO: Changing contents of store outside transaction
2040 # causes inconsistency. We should make in-memory store
2040 # causes inconsistency. We should make in-memory store
2041 # changes detectable, and abort if changed.
2041 # changes detectable, and abort if changed.
2042 self.store.invalidatecaches()
2042 self.store.invalidatecaches()
2043
2043
2044 def invalidateall(self):
2044 def invalidateall(self):
2045 '''Fully invalidates both store and non-store parts, causing the
2045 '''Fully invalidates both store and non-store parts, causing the
2046 subsequent operation to reread any outside changes.'''
2046 subsequent operation to reread any outside changes.'''
2047 # extension should hook this to invalidate its caches
2047 # extension should hook this to invalidate its caches
2048 self.invalidate()
2048 self.invalidate()
2049 self.invalidatedirstate()
2049 self.invalidatedirstate()
2050
2050
2051 @unfilteredmethod
2051 @unfilteredmethod
2052 def _refreshfilecachestats(self, tr):
2052 def _refreshfilecachestats(self, tr):
2053 """Reload stats of cached files so that they are flagged as valid"""
2053 """Reload stats of cached files so that they are flagged as valid"""
2054 for k, ce in self._filecache.items():
2054 for k, ce in self._filecache.items():
2055 k = pycompat.sysstr(k)
2055 k = pycompat.sysstr(k)
2056 if k == r'dirstate' or k not in self.__dict__:
2056 if k == r'dirstate' or k not in self.__dict__:
2057 continue
2057 continue
2058 ce.refresh()
2058 ce.refresh()
2059
2059
2060 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2060 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2061 inheritchecker=None, parentenvvar=None):
2061 inheritchecker=None, parentenvvar=None):
2062 parentlock = None
2062 parentlock = None
2063 # the contents of parentenvvar are used by the underlying lock to
2063 # the contents of parentenvvar are used by the underlying lock to
2064 # determine whether it can be inherited
2064 # determine whether it can be inherited
2065 if parentenvvar is not None:
2065 if parentenvvar is not None:
2066 parentlock = encoding.environ.get(parentenvvar)
2066 parentlock = encoding.environ.get(parentenvvar)
2067
2067
2068 timeout = 0
2068 timeout = 0
2069 warntimeout = 0
2069 warntimeout = 0
2070 if wait:
2070 if wait:
2071 timeout = self.ui.configint("ui", "timeout")
2071 timeout = self.ui.configint("ui", "timeout")
2072 warntimeout = self.ui.configint("ui", "timeout.warn")
2072 warntimeout = self.ui.configint("ui", "timeout.warn")
2073 # internal config: ui.signal-safe-lock
2073 # internal config: ui.signal-safe-lock
2074 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2074 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2075
2075
2076 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2076 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2077 releasefn=releasefn,
2077 releasefn=releasefn,
2078 acquirefn=acquirefn, desc=desc,
2078 acquirefn=acquirefn, desc=desc,
2079 inheritchecker=inheritchecker,
2079 inheritchecker=inheritchecker,
2080 parentlock=parentlock,
2080 parentlock=parentlock,
2081 signalsafe=signalsafe)
2081 signalsafe=signalsafe)
2082 return l
2082 return l
2083
2083
2084 def _afterlock(self, callback):
2084 def _afterlock(self, callback):
2085 """add a callback to be run when the repository is fully unlocked
2085 """add a callback to be run when the repository is fully unlocked
2086
2086
2087 The callback will be executed when the outermost lock is released
2087 The callback will be executed when the outermost lock is released
2088 (with wlock being higher level than 'lock')."""
2088 (with wlock being higher level than 'lock')."""
2089 for ref in (self._wlockref, self._lockref):
2089 for ref in (self._wlockref, self._lockref):
2090 l = ref and ref()
2090 l = ref and ref()
2091 if l and l.held:
2091 if l and l.held:
2092 l.postrelease.append(callback)
2092 l.postrelease.append(callback)
2093 break
2093 break
2094 else: # no lock have been found.
2094 else: # no lock have been found.
2095 callback()
2095 callback()
2096
2096
2097 def lock(self, wait=True):
2097 def lock(self, wait=True):
2098 '''Lock the repository store (.hg/store) and return a weak reference
2098 '''Lock the repository store (.hg/store) and return a weak reference
2099 to the lock. Use this before modifying the store (e.g. committing or
2099 to the lock. Use this before modifying the store (e.g. committing or
2100 stripping). If you are opening a transaction, get a lock as well.)
2100 stripping). If you are opening a transaction, get a lock as well.)
2101
2101
2102 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2102 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2103 'wlock' first to avoid a dead-lock hazard.'''
2103 'wlock' first to avoid a dead-lock hazard.'''
2104 l = self._currentlock(self._lockref)
2104 l = self._currentlock(self._lockref)
2105 if l is not None:
2105 if l is not None:
2106 l.lock()
2106 l.lock()
2107 return l
2107 return l
2108
2108
2109 l = self._lock(self.svfs, "lock", wait, None,
2109 l = self._lock(self.svfs, "lock", wait, None,
2110 self.invalidate, _('repository %s') % self.origroot)
2110 self.invalidate, _('repository %s') % self.origroot)
2111 self._lockref = weakref.ref(l)
2111 self._lockref = weakref.ref(l)
2112 return l
2112 return l
2113
2113
2114 def _wlockchecktransaction(self):
2114 def _wlockchecktransaction(self):
2115 if self.currenttransaction() is not None:
2115 if self.currenttransaction() is not None:
2116 raise error.LockInheritanceContractViolation(
2116 raise error.LockInheritanceContractViolation(
2117 'wlock cannot be inherited in the middle of a transaction')
2117 'wlock cannot be inherited in the middle of a transaction')
2118
2118
2119 def wlock(self, wait=True):
2119 def wlock(self, wait=True):
2120 '''Lock the non-store parts of the repository (everything under
2120 '''Lock the non-store parts of the repository (everything under
2121 .hg except .hg/store) and return a weak reference to the lock.
2121 .hg except .hg/store) and return a weak reference to the lock.
2122
2122
2123 Use this before modifying files in .hg.
2123 Use this before modifying files in .hg.
2124
2124
2125 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2125 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2126 'wlock' first to avoid a dead-lock hazard.'''
2126 'wlock' first to avoid a dead-lock hazard.'''
2127 l = self._wlockref and self._wlockref()
2127 l = self._wlockref and self._wlockref()
2128 if l is not None and l.held:
2128 if l is not None and l.held:
2129 l.lock()
2129 l.lock()
2130 return l
2130 return l
2131
2131
2132 # We do not need to check for non-waiting lock acquisition. Such
2132 # We do not need to check for non-waiting lock acquisition. Such
2133 # acquisition would not cause dead-lock as they would just fail.
2133 # acquisition would not cause dead-lock as they would just fail.
2134 if wait and (self.ui.configbool('devel', 'all-warnings')
2134 if wait and (self.ui.configbool('devel', 'all-warnings')
2135 or self.ui.configbool('devel', 'check-locks')):
2135 or self.ui.configbool('devel', 'check-locks')):
2136 if self._currentlock(self._lockref) is not None:
2136 if self._currentlock(self._lockref) is not None:
2137 self.ui.develwarn('"wlock" acquired after "lock"')
2137 self.ui.develwarn('"wlock" acquired after "lock"')
2138
2138
2139 def unlock():
2139 def unlock():
2140 if self.dirstate.pendingparentchange():
2140 if self.dirstate.pendingparentchange():
2141 self.dirstate.invalidate()
2141 self.dirstate.invalidate()
2142 else:
2142 else:
2143 self.dirstate.write(None)
2143 self.dirstate.write(None)
2144
2144
2145 self._filecache['dirstate'].refresh()
2145 self._filecache['dirstate'].refresh()
2146
2146
2147 l = self._lock(self.vfs, "wlock", wait, unlock,
2147 l = self._lock(self.vfs, "wlock", wait, unlock,
2148 self.invalidatedirstate, _('working directory of %s') %
2148 self.invalidatedirstate, _('working directory of %s') %
2149 self.origroot,
2149 self.origroot,
2150 inheritchecker=self._wlockchecktransaction,
2150 inheritchecker=self._wlockchecktransaction,
2151 parentenvvar='HG_WLOCK_LOCKER')
2151 parentenvvar='HG_WLOCK_LOCKER')
2152 self._wlockref = weakref.ref(l)
2152 self._wlockref = weakref.ref(l)
2153 return l
2153 return l
2154
2154
2155 def _currentlock(self, lockref):
2155 def _currentlock(self, lockref):
2156 """Returns the lock if it's held, or None if it's not."""
2156 """Returns the lock if it's held, or None if it's not."""
2157 if lockref is None:
2157 if lockref is None:
2158 return None
2158 return None
2159 l = lockref()
2159 l = lockref()
2160 if l is None or not l.held:
2160 if l is None or not l.held:
2161 return None
2161 return None
2162 return l
2162 return l
2163
2163
2164 def currentwlock(self):
2164 def currentwlock(self):
2165 """Returns the wlock if it's held, or None if it's not."""
2165 """Returns the wlock if it's held, or None if it's not."""
2166 return self._currentlock(self._wlockref)
2166 return self._currentlock(self._wlockref)
2167
2167
2168 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2168 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2169 """
2169 """
2170 commit an individual file as part of a larger transaction
2170 commit an individual file as part of a larger transaction
2171 """
2171 """
2172
2172
2173 fname = fctx.path()
2173 fname = fctx.path()
2174 fparent1 = manifest1.get(fname, nullid)
2174 fparent1 = manifest1.get(fname, nullid)
2175 fparent2 = manifest2.get(fname, nullid)
2175 fparent2 = manifest2.get(fname, nullid)
2176 if isinstance(fctx, context.filectx):
2176 if isinstance(fctx, context.filectx):
2177 node = fctx.filenode()
2177 node = fctx.filenode()
2178 if node in [fparent1, fparent2]:
2178 if node in [fparent1, fparent2]:
2179 self.ui.debug('reusing %s filelog entry\n' % fname)
2179 self.ui.debug('reusing %s filelog entry\n' % fname)
2180 if manifest1.flags(fname) != fctx.flags():
2180 if manifest1.flags(fname) != fctx.flags():
2181 changelist.append(fname)
2181 changelist.append(fname)
2182 return node
2182 return node
2183
2183
2184 flog = self.file(fname)
2184 flog = self.file(fname)
2185 meta = {}
2185 meta = {}
2186 copy = fctx.renamed()
2186 copy = fctx.renamed()
2187 if copy and copy[0] != fname:
2187 if copy and copy[0] != fname:
2188 # Mark the new revision of this file as a copy of another
2188 # Mark the new revision of this file as a copy of another
2189 # file. This copy data will effectively act as a parent
2189 # file. This copy data will effectively act as a parent
2190 # of this new revision. If this is a merge, the first
2190 # of this new revision. If this is a merge, the first
2191 # parent will be the nullid (meaning "look up the copy data")
2191 # parent will be the nullid (meaning "look up the copy data")
2192 # and the second one will be the other parent. For example:
2192 # and the second one will be the other parent. For example:
2193 #
2193 #
2194 # 0 --- 1 --- 3 rev1 changes file foo
2194 # 0 --- 1 --- 3 rev1 changes file foo
2195 # \ / rev2 renames foo to bar and changes it
2195 # \ / rev2 renames foo to bar and changes it
2196 # \- 2 -/ rev3 should have bar with all changes and
2196 # \- 2 -/ rev3 should have bar with all changes and
2197 # should record that bar descends from
2197 # should record that bar descends from
2198 # bar in rev2 and foo in rev1
2198 # bar in rev2 and foo in rev1
2199 #
2199 #
2200 # this allows this merge to succeed:
2200 # this allows this merge to succeed:
2201 #
2201 #
2202 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2202 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2203 # \ / merging rev3 and rev4 should use bar@rev2
2203 # \ / merging rev3 and rev4 should use bar@rev2
2204 # \- 2 --- 4 as the merge base
2204 # \- 2 --- 4 as the merge base
2205 #
2205 #
2206
2206
2207 cfname = copy[0]
2207 cfname = copy[0]
2208 crev = manifest1.get(cfname)
2208 crev = manifest1.get(cfname)
2209 newfparent = fparent2
2209 newfparent = fparent2
2210
2210
2211 if manifest2: # branch merge
2211 if manifest2: # branch merge
2212 if fparent2 == nullid or crev is None: # copied on remote side
2212 if fparent2 == nullid or crev is None: # copied on remote side
2213 if cfname in manifest2:
2213 if cfname in manifest2:
2214 crev = manifest2[cfname]
2214 crev = manifest2[cfname]
2215 newfparent = fparent1
2215 newfparent = fparent1
2216
2216
2217 # Here, we used to search backwards through history to try to find
2217 # Here, we used to search backwards through history to try to find
2218 # where the file copy came from if the source of a copy was not in
2218 # where the file copy came from if the source of a copy was not in
2219 # the parent directory. However, this doesn't actually make sense to
2219 # the parent directory. However, this doesn't actually make sense to
2220 # do (what does a copy from something not in your working copy even
2220 # do (what does a copy from something not in your working copy even
2221 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2221 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2222 # the user that copy information was dropped, so if they didn't
2222 # the user that copy information was dropped, so if they didn't
2223 # expect this outcome it can be fixed, but this is the correct
2223 # expect this outcome it can be fixed, but this is the correct
2224 # behavior in this circumstance.
2224 # behavior in this circumstance.
2225
2225
2226 if crev:
2226 if crev:
2227 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2227 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2228 meta["copy"] = cfname
2228 meta["copy"] = cfname
2229 meta["copyrev"] = hex(crev)
2229 meta["copyrev"] = hex(crev)
2230 fparent1, fparent2 = nullid, newfparent
2230 fparent1, fparent2 = nullid, newfparent
2231 else:
2231 else:
2232 self.ui.warn(_("warning: can't find ancestor for '%s' "
2232 self.ui.warn(_("warning: can't find ancestor for '%s' "
2233 "copied from '%s'!\n") % (fname, cfname))
2233 "copied from '%s'!\n") % (fname, cfname))
2234
2234
2235 elif fparent1 == nullid:
2235 elif fparent1 == nullid:
2236 fparent1, fparent2 = fparent2, nullid
2236 fparent1, fparent2 = fparent2, nullid
2237 elif fparent2 != nullid:
2237 elif fparent2 != nullid:
2238 # is one parent an ancestor of the other?
2238 # is one parent an ancestor of the other?
2239 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2239 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2240 if fparent1 in fparentancestors:
2240 if fparent1 in fparentancestors:
2241 fparent1, fparent2 = fparent2, nullid
2241 fparent1, fparent2 = fparent2, nullid
2242 elif fparent2 in fparentancestors:
2242 elif fparent2 in fparentancestors:
2243 fparent2 = nullid
2243 fparent2 = nullid
2244
2244
2245 # is the file changed?
2245 # is the file changed?
2246 text = fctx.data()
2246 text = fctx.data()
2247 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2247 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2248 changelist.append(fname)
2248 changelist.append(fname)
2249 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2249 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2250 # are just the flags changed during merge?
2250 # are just the flags changed during merge?
2251 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2251 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2252 changelist.append(fname)
2252 changelist.append(fname)
2253
2253
2254 return fparent1
2254 return fparent1
2255
2255
2256 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2256 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2257 """check for commit arguments that aren't committable"""
2257 """check for commit arguments that aren't committable"""
2258 if match.isexact() or match.prefix():
2258 if match.isexact() or match.prefix():
2259 matched = set(status.modified + status.added + status.removed)
2259 matched = set(status.modified + status.added + status.removed)
2260
2260
2261 for f in match.files():
2261 for f in match.files():
2262 f = self.dirstate.normalize(f)
2262 f = self.dirstate.normalize(f)
2263 if f == '.' or f in matched or f in wctx.substate:
2263 if f == '.' or f in matched or f in wctx.substate:
2264 continue
2264 continue
2265 if f in status.deleted:
2265 if f in status.deleted:
2266 fail(f, _('file not found!'))
2266 fail(f, _('file not found!'))
2267 if f in vdirs: # visited directory
2267 if f in vdirs: # visited directory
2268 d = f + '/'
2268 d = f + '/'
2269 for mf in matched:
2269 for mf in matched:
2270 if mf.startswith(d):
2270 if mf.startswith(d):
2271 break
2271 break
2272 else:
2272 else:
2273 fail(f, _("no match under directory!"))
2273 fail(f, _("no match under directory!"))
2274 elif f not in self.dirstate:
2274 elif f not in self.dirstate:
2275 fail(f, _("file not tracked!"))
2275 fail(f, _("file not tracked!"))
2276
2276
2277 @unfilteredmethod
2277 @unfilteredmethod
2278 def commit(self, text="", user=None, date=None, match=None, force=False,
2278 def commit(self, text="", user=None, date=None, match=None, force=False,
2279 editor=False, extra=None):
2279 editor=False, extra=None):
2280 """Add a new revision to current repository.
2280 """Add a new revision to current repository.
2281
2281
2282 Revision information is gathered from the working directory,
2282 Revision information is gathered from the working directory,
2283 match can be used to filter the committed files. If editor is
2283 match can be used to filter the committed files. If editor is
2284 supplied, it is called to get a commit message.
2284 supplied, it is called to get a commit message.
2285 """
2285 """
2286 if extra is None:
2286 if extra is None:
2287 extra = {}
2287 extra = {}
2288
2288
2289 def fail(f, msg):
2289 def fail(f, msg):
2290 raise error.Abort('%s: %s' % (f, msg))
2290 raise error.Abort('%s: %s' % (f, msg))
2291
2291
2292 if not match:
2292 if not match:
2293 match = matchmod.always(self.root, '')
2293 match = matchmod.always(self.root, '')
2294
2294
2295 if not force:
2295 if not force:
2296 vdirs = []
2296 vdirs = []
2297 match.explicitdir = vdirs.append
2297 match.explicitdir = vdirs.append
2298 match.bad = fail
2298 match.bad = fail
2299
2299
2300 wlock = lock = tr = None
2300 wlock = lock = tr = None
2301 try:
2301 try:
2302 wlock = self.wlock()
2302 wlock = self.wlock()
2303 lock = self.lock() # for recent changelog (see issue4368)
2303 lock = self.lock() # for recent changelog (see issue4368)
2304
2304
2305 wctx = self[None]
2305 wctx = self[None]
2306 merge = len(wctx.parents()) > 1
2306 merge = len(wctx.parents()) > 1
2307
2307
2308 if not force and merge and not match.always():
2308 if not force and merge and not match.always():
2309 raise error.Abort(_('cannot partially commit a merge '
2309 raise error.Abort(_('cannot partially commit a merge '
2310 '(do not specify files or patterns)'))
2310 '(do not specify files or patterns)'))
2311
2311
2312 status = self.status(match=match, clean=force)
2312 status = self.status(match=match, clean=force)
2313 if force:
2313 if force:
2314 status.modified.extend(status.clean) # mq may commit clean files
2314 status.modified.extend(status.clean) # mq may commit clean files
2315
2315
2316 # check subrepos
2316 # check subrepos
2317 subs, commitsubs, newstate = subrepoutil.precommit(
2317 subs, commitsubs, newstate = subrepoutil.precommit(
2318 self.ui, wctx, status, match, force=force)
2318 self.ui, wctx, status, match, force=force)
2319
2319
2320 # make sure all explicit patterns are matched
2320 # make sure all explicit patterns are matched
2321 if not force:
2321 if not force:
2322 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2322 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2323
2323
2324 cctx = context.workingcommitctx(self, status,
2324 cctx = context.workingcommitctx(self, status,
2325 text, user, date, extra)
2325 text, user, date, extra)
2326
2326
2327 # internal config: ui.allowemptycommit
2327 # internal config: ui.allowemptycommit
2328 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2328 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2329 or extra.get('close') or merge or cctx.files()
2329 or extra.get('close') or merge or cctx.files()
2330 or self.ui.configbool('ui', 'allowemptycommit'))
2330 or self.ui.configbool('ui', 'allowemptycommit'))
2331 if not allowemptycommit:
2331 if not allowemptycommit:
2332 return None
2332 return None
2333
2333
2334 if merge and cctx.deleted():
2334 if merge and cctx.deleted():
2335 raise error.Abort(_("cannot commit merge with missing files"))
2335 raise error.Abort(_("cannot commit merge with missing files"))
2336
2336
2337 ms = mergemod.mergestate.read(self)
2337 ms = mergemod.mergestate.read(self)
2338 mergeutil.checkunresolved(ms)
2338 mergeutil.checkunresolved(ms)
2339
2339
2340 if editor:
2340 if editor:
2341 cctx._text = editor(self, cctx, subs)
2341 cctx._text = editor(self, cctx, subs)
2342 edited = (text != cctx._text)
2342 edited = (text != cctx._text)
2343
2343
2344 # Save commit message in case this transaction gets rolled back
2344 # Save commit message in case this transaction gets rolled back
2345 # (e.g. by a pretxncommit hook). Leave the content alone on
2345 # (e.g. by a pretxncommit hook). Leave the content alone on
2346 # the assumption that the user will use the same editor again.
2346 # the assumption that the user will use the same editor again.
2347 msgfn = self.savecommitmessage(cctx._text)
2347 msgfn = self.savecommitmessage(cctx._text)
2348
2348
2349 # commit subs and write new state
2349 # commit subs and write new state
2350 if subs:
2350 if subs:
2351 for s in sorted(commitsubs):
2351 for s in sorted(commitsubs):
2352 sub = wctx.sub(s)
2352 sub = wctx.sub(s)
2353 self.ui.status(_('committing subrepository %s\n') %
2353 self.ui.status(_('committing subrepository %s\n') %
2354 subrepoutil.subrelpath(sub))
2354 subrepoutil.subrelpath(sub))
2355 sr = sub.commit(cctx._text, user, date)
2355 sr = sub.commit(cctx._text, user, date)
2356 newstate[s] = (newstate[s][0], sr)
2356 newstate[s] = (newstate[s][0], sr)
2357 subrepoutil.writestate(self, newstate)
2357 subrepoutil.writestate(self, newstate)
2358
2358
2359 p1, p2 = self.dirstate.parents()
2359 p1, p2 = self.dirstate.parents()
2360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2361 try:
2361 try:
2362 self.hook("precommit", throw=True, parent1=hookp1,
2362 self.hook("precommit", throw=True, parent1=hookp1,
2363 parent2=hookp2)
2363 parent2=hookp2)
2364 tr = self.transaction('commit')
2364 tr = self.transaction('commit')
2365 ret = self.commitctx(cctx, True)
2365 ret = self.commitctx(cctx, True)
2366 except: # re-raises
2366 except: # re-raises
2367 if edited:
2367 if edited:
2368 self.ui.write(
2368 self.ui.write(
2369 _('note: commit message saved in %s\n') % msgfn)
2369 _('note: commit message saved in %s\n') % msgfn)
2370 raise
2370 raise
2371 # update bookmarks, dirstate and mergestate
2371 # update bookmarks, dirstate and mergestate
2372 bookmarks.update(self, [p1, p2], ret)
2372 bookmarks.update(self, [p1, p2], ret)
2373 cctx.markcommitted(ret)
2373 cctx.markcommitted(ret)
2374 ms.reset()
2374 ms.reset()
2375 tr.close()
2375 tr.close()
2376
2376
2377 finally:
2377 finally:
2378 lockmod.release(tr, lock, wlock)
2378 lockmod.release(tr, lock, wlock)
2379
2379
2380 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2380 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2381 # hack for command that use a temporary commit (eg: histedit)
2381 # hack for command that use a temporary commit (eg: histedit)
2382 # temporary commit got stripped before hook release
2382 # temporary commit got stripped before hook release
2383 if self.changelog.hasnode(ret):
2383 if self.changelog.hasnode(ret):
2384 self.hook("commit", node=node, parent1=parent1,
2384 self.hook("commit", node=node, parent1=parent1,
2385 parent2=parent2)
2385 parent2=parent2)
2386 self._afterlock(commithook)
2386 self._afterlock(commithook)
2387 return ret
2387 return ret
2388
2388
2389 @unfilteredmethod
2389 @unfilteredmethod
2390 def commitctx(self, ctx, error=False):
2390 def commitctx(self, ctx, error=False):
2391 """Add a new revision to current repository.
2391 """Add a new revision to current repository.
2392 Revision information is passed via the context argument.
2392 Revision information is passed via the context argument.
2393
2393
2394 ctx.files() should list all files involved in this commit, i.e.
2394 ctx.files() should list all files involved in this commit, i.e.
2395 modified/added/removed files. On merge, it may be wider than the
2395 modified/added/removed files. On merge, it may be wider than the
2396 ctx.files() to be committed, since any file nodes derived directly
2396 ctx.files() to be committed, since any file nodes derived directly
2397 from p1 or p2 are excluded from the committed ctx.files().
2397 from p1 or p2 are excluded from the committed ctx.files().
2398 """
2398 """
2399
2399
2400 tr = None
2400 tr = None
2401 p1, p2 = ctx.p1(), ctx.p2()
2401 p1, p2 = ctx.p1(), ctx.p2()
2402 user = ctx.user()
2402 user = ctx.user()
2403
2403
2404 lock = self.lock()
2404 lock = self.lock()
2405 try:
2405 try:
2406 tr = self.transaction("commit")
2406 tr = self.transaction("commit")
2407 trp = weakref.proxy(tr)
2407 trp = weakref.proxy(tr)
2408
2408
2409 if ctx.manifestnode():
2409 if ctx.manifestnode():
2410 # reuse an existing manifest revision
2410 # reuse an existing manifest revision
2411 self.ui.debug('reusing known manifest\n')
2411 self.ui.debug('reusing known manifest\n')
2412 mn = ctx.manifestnode()
2412 mn = ctx.manifestnode()
2413 files = ctx.files()
2413 files = ctx.files()
2414 elif ctx.files():
2414 elif ctx.files():
2415 m1ctx = p1.manifestctx()
2415 m1ctx = p1.manifestctx()
2416 m2ctx = p2.manifestctx()
2416 m2ctx = p2.manifestctx()
2417 mctx = m1ctx.copy()
2417 mctx = m1ctx.copy()
2418
2418
2419 m = mctx.read()
2419 m = mctx.read()
2420 m1 = m1ctx.read()
2420 m1 = m1ctx.read()
2421 m2 = m2ctx.read()
2421 m2 = m2ctx.read()
2422
2422
2423 # check in files
2423 # check in files
2424 added = []
2424 added = []
2425 changed = []
2425 changed = []
2426 removed = list(ctx.removed())
2426 removed = list(ctx.removed())
2427 linkrev = len(self)
2427 linkrev = len(self)
2428 self.ui.note(_("committing files:\n"))
2428 self.ui.note(_("committing files:\n"))
2429 for f in sorted(ctx.modified() + ctx.added()):
2429 for f in sorted(ctx.modified() + ctx.added()):
2430 self.ui.note(f + "\n")
2430 self.ui.note(f + "\n")
2431 try:
2431 try:
2432 fctx = ctx[f]
2432 fctx = ctx[f]
2433 if fctx is None:
2433 if fctx is None:
2434 removed.append(f)
2434 removed.append(f)
2435 else:
2435 else:
2436 added.append(f)
2436 added.append(f)
2437 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2437 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2438 trp, changed)
2438 trp, changed)
2439 m.setflag(f, fctx.flags())
2439 m.setflag(f, fctx.flags())
2440 except OSError as inst:
2440 except OSError as inst:
2441 self.ui.warn(_("trouble committing %s!\n") % f)
2441 self.ui.warn(_("trouble committing %s!\n") % f)
2442 raise
2442 raise
2443 except IOError as inst:
2443 except IOError as inst:
2444 errcode = getattr(inst, 'errno', errno.ENOENT)
2444 errcode = getattr(inst, 'errno', errno.ENOENT)
2445 if error or errcode and errcode != errno.ENOENT:
2445 if error or errcode and errcode != errno.ENOENT:
2446 self.ui.warn(_("trouble committing %s!\n") % f)
2446 self.ui.warn(_("trouble committing %s!\n") % f)
2447 raise
2447 raise
2448
2448
2449 # update manifest
2449 # update manifest
2450 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2450 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2451 drop = [f for f in removed if f in m]
2451 drop = [f for f in removed if f in m]
2452 for f in drop:
2452 for f in drop:
2453 del m[f]
2453 del m[f]
2454 files = changed + removed
2454 files = changed + removed
2455 md = None
2455 md = None
2456 if not files:
2456 if not files:
2457 # if no "files" actually changed in terms of the changelog,
2457 # if no "files" actually changed in terms of the changelog,
2458 # try hard to detect unmodified manifest entry so that the
2458 # try hard to detect unmodified manifest entry so that the
2459 # exact same commit can be reproduced later on convert.
2459 # exact same commit can be reproduced later on convert.
2460 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2460 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2461 if not files and md:
2461 if not files and md:
2462 self.ui.debug('not reusing manifest (no file change in '
2462 self.ui.debug('not reusing manifest (no file change in '
2463 'changelog, but manifest differs)\n')
2463 'changelog, but manifest differs)\n')
2464 if files or md:
2464 if files or md:
2465 self.ui.note(_("committing manifest\n"))
2465 self.ui.note(_("committing manifest\n"))
2466 # we're using narrowmatch here since it's already applied at
2466 # we're using narrowmatch here since it's already applied at
2467 # other stages (such as dirstate.walk), so we're already
2467 # other stages (such as dirstate.walk), so we're already
2468 # ignoring things outside of narrowspec in most cases. The
2468 # ignoring things outside of narrowspec in most cases. The
2469 # one case where we might have files outside the narrowspec
2469 # one case where we might have files outside the narrowspec
2470 # at this point is merges, and we already error out in the
2470 # at this point is merges, and we already error out in the
2471 # case where the merge has files outside of the narrowspec,
2471 # case where the merge has files outside of the narrowspec,
2472 # so this is safe.
2472 # so this is safe.
2473 mn = mctx.write(trp, linkrev,
2473 mn = mctx.write(trp, linkrev,
2474 p1.manifestnode(), p2.manifestnode(),
2474 p1.manifestnode(), p2.manifestnode(),
2475 added, drop, match=self.narrowmatch())
2475 added, drop, match=self.narrowmatch())
2476 else:
2476 else:
2477 self.ui.debug('reusing manifest form p1 (listed files '
2477 self.ui.debug('reusing manifest form p1 (listed files '
2478 'actually unchanged)\n')
2478 'actually unchanged)\n')
2479 mn = p1.manifestnode()
2479 mn = p1.manifestnode()
2480 else:
2480 else:
2481 self.ui.debug('reusing manifest from p1 (no file change)\n')
2481 self.ui.debug('reusing manifest from p1 (no file change)\n')
2482 mn = p1.manifestnode()
2482 mn = p1.manifestnode()
2483 files = []
2483 files = []
2484
2484
2485 # update changelog
2485 # update changelog
2486 self.ui.note(_("committing changelog\n"))
2486 self.ui.note(_("committing changelog\n"))
2487 self.changelog.delayupdate(tr)
2487 self.changelog.delayupdate(tr)
2488 n = self.changelog.add(mn, files, ctx.description(),
2488 n = self.changelog.add(mn, files, ctx.description(),
2489 trp, p1.node(), p2.node(),
2489 trp, p1.node(), p2.node(),
2490 user, ctx.date(), ctx.extra().copy())
2490 user, ctx.date(), ctx.extra().copy())
2491 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2491 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2492 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2492 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2493 parent2=xp2)
2493 parent2=xp2)
2494 # set the new commit is proper phase
2494 # set the new commit is proper phase
2495 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2495 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2496 if targetphase:
2496 if targetphase:
2497 # retract boundary do not alter parent changeset.
2497 # retract boundary do not alter parent changeset.
2498 # if a parent have higher the resulting phase will
2498 # if a parent have higher the resulting phase will
2499 # be compliant anyway
2499 # be compliant anyway
2500 #
2500 #
2501 # if minimal phase was 0 we don't need to retract anything
2501 # if minimal phase was 0 we don't need to retract anything
2502 phases.registernew(self, tr, targetphase, [n])
2502 phases.registernew(self, tr, targetphase, [n])
2503 tr.close()
2503 tr.close()
2504 return n
2504 return n
2505 finally:
2505 finally:
2506 if tr:
2506 if tr:
2507 tr.release()
2507 tr.release()
2508 lock.release()
2508 lock.release()
2509
2509
2510 @unfilteredmethod
2510 @unfilteredmethod
2511 def destroying(self):
2511 def destroying(self):
2512 '''Inform the repository that nodes are about to be destroyed.
2512 '''Inform the repository that nodes are about to be destroyed.
2513 Intended for use by strip and rollback, so there's a common
2513 Intended for use by strip and rollback, so there's a common
2514 place for anything that has to be done before destroying history.
2514 place for anything that has to be done before destroying history.
2515
2515
2516 This is mostly useful for saving state that is in memory and waiting
2516 This is mostly useful for saving state that is in memory and waiting
2517 to be flushed when the current lock is released. Because a call to
2517 to be flushed when the current lock is released. Because a call to
2518 destroyed is imminent, the repo will be invalidated causing those
2518 destroyed is imminent, the repo will be invalidated causing those
2519 changes to stay in memory (waiting for the next unlock), or vanish
2519 changes to stay in memory (waiting for the next unlock), or vanish
2520 completely.
2520 completely.
2521 '''
2521 '''
2522 # When using the same lock to commit and strip, the phasecache is left
2522 # When using the same lock to commit and strip, the phasecache is left
2523 # dirty after committing. Then when we strip, the repo is invalidated,
2523 # dirty after committing. Then when we strip, the repo is invalidated,
2524 # causing those changes to disappear.
2524 # causing those changes to disappear.
2525 if '_phasecache' in vars(self):
2525 if '_phasecache' in vars(self):
2526 self._phasecache.write()
2526 self._phasecache.write()
2527
2527
2528 @unfilteredmethod
2528 @unfilteredmethod
2529 def destroyed(self):
2529 def destroyed(self):
2530 '''Inform the repository that nodes have been destroyed.
2530 '''Inform the repository that nodes have been destroyed.
2531 Intended for use by strip and rollback, so there's a common
2531 Intended for use by strip and rollback, so there's a common
2532 place for anything that has to be done after destroying history.
2532 place for anything that has to be done after destroying history.
2533 '''
2533 '''
2534 # When one tries to:
2534 # When one tries to:
2535 # 1) destroy nodes thus calling this method (e.g. strip)
2535 # 1) destroy nodes thus calling this method (e.g. strip)
2536 # 2) use phasecache somewhere (e.g. commit)
2536 # 2) use phasecache somewhere (e.g. commit)
2537 #
2537 #
2538 # then 2) will fail because the phasecache contains nodes that were
2538 # then 2) will fail because the phasecache contains nodes that were
2539 # removed. We can either remove phasecache from the filecache,
2539 # removed. We can either remove phasecache from the filecache,
2540 # causing it to reload next time it is accessed, or simply filter
2540 # causing it to reload next time it is accessed, or simply filter
2541 # the removed nodes now and write the updated cache.
2541 # the removed nodes now and write the updated cache.
2542 self._phasecache.filterunknown(self)
2542 self._phasecache.filterunknown(self)
2543 self._phasecache.write()
2543 self._phasecache.write()
2544
2544
2545 # refresh all repository caches
2545 # refresh all repository caches
2546 self.updatecaches()
2546 self.updatecaches()
2547
2547
2548 # Ensure the persistent tag cache is updated. Doing it now
2548 # Ensure the persistent tag cache is updated. Doing it now
2549 # means that the tag cache only has to worry about destroyed
2549 # means that the tag cache only has to worry about destroyed
2550 # heads immediately after a strip/rollback. That in turn
2550 # heads immediately after a strip/rollback. That in turn
2551 # guarantees that "cachetip == currenttip" (comparing both rev
2551 # guarantees that "cachetip == currenttip" (comparing both rev
2552 # and node) always means no nodes have been added or destroyed.
2552 # and node) always means no nodes have been added or destroyed.
2553
2553
2554 # XXX this is suboptimal when qrefresh'ing: we strip the current
2554 # XXX this is suboptimal when qrefresh'ing: we strip the current
2555 # head, refresh the tag cache, then immediately add a new head.
2555 # head, refresh the tag cache, then immediately add a new head.
2556 # But I think doing it this way is necessary for the "instant
2556 # But I think doing it this way is necessary for the "instant
2557 # tag cache retrieval" case to work.
2557 # tag cache retrieval" case to work.
2558 self.invalidate()
2558 self.invalidate()
2559
2559
2560 def status(self, node1='.', node2=None, match=None,
2560 def status(self, node1='.', node2=None, match=None,
2561 ignored=False, clean=False, unknown=False,
2561 ignored=False, clean=False, unknown=False,
2562 listsubrepos=False):
2562 listsubrepos=False):
2563 '''a convenience method that calls node1.status(node2)'''
2563 '''a convenience method that calls node1.status(node2)'''
2564 return self[node1].status(node2, match, ignored, clean, unknown,
2564 return self[node1].status(node2, match, ignored, clean, unknown,
2565 listsubrepos)
2565 listsubrepos)
2566
2566
2567 def addpostdsstatus(self, ps):
2567 def addpostdsstatus(self, ps):
2568 """Add a callback to run within the wlock, at the point at which status
2568 """Add a callback to run within the wlock, at the point at which status
2569 fixups happen.
2569 fixups happen.
2570
2570
2571 On status completion, callback(wctx, status) will be called with the
2571 On status completion, callback(wctx, status) will be called with the
2572 wlock held, unless the dirstate has changed from underneath or the wlock
2572 wlock held, unless the dirstate has changed from underneath or the wlock
2573 couldn't be grabbed.
2573 couldn't be grabbed.
2574
2574
2575 Callbacks should not capture and use a cached copy of the dirstate --
2575 Callbacks should not capture and use a cached copy of the dirstate --
2576 it might change in the meanwhile. Instead, they should access the
2576 it might change in the meanwhile. Instead, they should access the
2577 dirstate via wctx.repo().dirstate.
2577 dirstate via wctx.repo().dirstate.
2578
2578
2579 This list is emptied out after each status run -- extensions should
2579 This list is emptied out after each status run -- extensions should
2580 make sure it adds to this list each time dirstate.status is called.
2580 make sure it adds to this list each time dirstate.status is called.
2581 Extensions should also make sure they don't call this for statuses
2581 Extensions should also make sure they don't call this for statuses
2582 that don't involve the dirstate.
2582 that don't involve the dirstate.
2583 """
2583 """
2584
2584
2585 # The list is located here for uniqueness reasons -- it is actually
2585 # The list is located here for uniqueness reasons -- it is actually
2586 # managed by the workingctx, but that isn't unique per-repo.
2586 # managed by the workingctx, but that isn't unique per-repo.
2587 self._postdsstatus.append(ps)
2587 self._postdsstatus.append(ps)
2588
2588
2589 def postdsstatus(self):
2589 def postdsstatus(self):
2590 """Used by workingctx to get the list of post-dirstate-status hooks."""
2590 """Used by workingctx to get the list of post-dirstate-status hooks."""
2591 return self._postdsstatus
2591 return self._postdsstatus
2592
2592
2593 def clearpostdsstatus(self):
2593 def clearpostdsstatus(self):
2594 """Used by workingctx to clear post-dirstate-status hooks."""
2594 """Used by workingctx to clear post-dirstate-status hooks."""
2595 del self._postdsstatus[:]
2595 del self._postdsstatus[:]
2596
2596
2597 def heads(self, start=None):
2597 def heads(self, start=None):
2598 if start is None:
2598 if start is None:
2599 cl = self.changelog
2599 cl = self.changelog
2600 headrevs = reversed(cl.headrevs())
2600 headrevs = reversed(cl.headrevs())
2601 return [cl.node(rev) for rev in headrevs]
2601 return [cl.node(rev) for rev in headrevs]
2602
2602
2603 heads = self.changelog.heads(start)
2603 heads = self.changelog.heads(start)
2604 # sort the output in rev descending order
2604 # sort the output in rev descending order
2605 return sorted(heads, key=self.changelog.rev, reverse=True)
2605 return sorted(heads, key=self.changelog.rev, reverse=True)
2606
2606
2607 def branchheads(self, branch=None, start=None, closed=False):
2607 def branchheads(self, branch=None, start=None, closed=False):
2608 '''return a (possibly filtered) list of heads for the given branch
2608 '''return a (possibly filtered) list of heads for the given branch
2609
2609
2610 Heads are returned in topological order, from newest to oldest.
2610 Heads are returned in topological order, from newest to oldest.
2611 If branch is None, use the dirstate branch.
2611 If branch is None, use the dirstate branch.
2612 If start is not None, return only heads reachable from start.
2612 If start is not None, return only heads reachable from start.
2613 If closed is True, return heads that are marked as closed as well.
2613 If closed is True, return heads that are marked as closed as well.
2614 '''
2614 '''
2615 if branch is None:
2615 if branch is None:
2616 branch = self[None].branch()
2616 branch = self[None].branch()
2617 branches = self.branchmap()
2617 branches = self.branchmap()
2618 if branch not in branches:
2618 if branch not in branches:
2619 return []
2619 return []
2620 # the cache returns heads ordered lowest to highest
2620 # the cache returns heads ordered lowest to highest
2621 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2621 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2622 if start is not None:
2622 if start is not None:
2623 # filter out the heads that cannot be reached from startrev
2623 # filter out the heads that cannot be reached from startrev
2624 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2624 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2625 bheads = [h for h in bheads if h in fbheads]
2625 bheads = [h for h in bheads if h in fbheads]
2626 return bheads
2626 return bheads
2627
2627
2628 def branches(self, nodes):
2628 def branches(self, nodes):
2629 if not nodes:
2629 if not nodes:
2630 nodes = [self.changelog.tip()]
2630 nodes = [self.changelog.tip()]
2631 b = []
2631 b = []
2632 for n in nodes:
2632 for n in nodes:
2633 t = n
2633 t = n
2634 while True:
2634 while True:
2635 p = self.changelog.parents(n)
2635 p = self.changelog.parents(n)
2636 if p[1] != nullid or p[0] == nullid:
2636 if p[1] != nullid or p[0] == nullid:
2637 b.append((t, n, p[0], p[1]))
2637 b.append((t, n, p[0], p[1]))
2638 break
2638 break
2639 n = p[0]
2639 n = p[0]
2640 return b
2640 return b
2641
2641
2642 def between(self, pairs):
2642 def between(self, pairs):
2643 r = []
2643 r = []
2644
2644
2645 for top, bottom in pairs:
2645 for top, bottom in pairs:
2646 n, l, i = top, [], 0
2646 n, l, i = top, [], 0
2647 f = 1
2647 f = 1
2648
2648
2649 while n != bottom and n != nullid:
2649 while n != bottom and n != nullid:
2650 p = self.changelog.parents(n)[0]
2650 p = self.changelog.parents(n)[0]
2651 if i == f:
2651 if i == f:
2652 l.append(n)
2652 l.append(n)
2653 f = f * 2
2653 f = f * 2
2654 n = p
2654 n = p
2655 i += 1
2655 i += 1
2656
2656
2657 r.append(l)
2657 r.append(l)
2658
2658
2659 return r
2659 return r
2660
2660
2661 def checkpush(self, pushop):
2661 def checkpush(self, pushop):
2662 """Extensions can override this function if additional checks have
2662 """Extensions can override this function if additional checks have
2663 to be performed before pushing, or call it if they override push
2663 to be performed before pushing, or call it if they override push
2664 command.
2664 command.
2665 """
2665 """
2666
2666
2667 @unfilteredpropertycache
2667 @unfilteredpropertycache
2668 def prepushoutgoinghooks(self):
2668 def prepushoutgoinghooks(self):
2669 """Return util.hooks consists of a pushop with repo, remote, outgoing
2669 """Return util.hooks consists of a pushop with repo, remote, outgoing
2670 methods, which are called before pushing changesets.
2670 methods, which are called before pushing changesets.
2671 """
2671 """
2672 return util.hooks()
2672 return util.hooks()
2673
2673
2674 def pushkey(self, namespace, key, old, new):
2674 def pushkey(self, namespace, key, old, new):
2675 try:
2675 try:
2676 tr = self.currenttransaction()
2676 tr = self.currenttransaction()
2677 hookargs = {}
2677 hookargs = {}
2678 if tr is not None:
2678 if tr is not None:
2679 hookargs.update(tr.hookargs)
2679 hookargs.update(tr.hookargs)
2680 hookargs = pycompat.strkwargs(hookargs)
2680 hookargs = pycompat.strkwargs(hookargs)
2681 hookargs[r'namespace'] = namespace
2681 hookargs[r'namespace'] = namespace
2682 hookargs[r'key'] = key
2682 hookargs[r'key'] = key
2683 hookargs[r'old'] = old
2683 hookargs[r'old'] = old
2684 hookargs[r'new'] = new
2684 hookargs[r'new'] = new
2685 self.hook('prepushkey', throw=True, **hookargs)
2685 self.hook('prepushkey', throw=True, **hookargs)
2686 except error.HookAbort as exc:
2686 except error.HookAbort as exc:
2687 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2687 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2688 if exc.hint:
2688 if exc.hint:
2689 self.ui.write_err(_("(%s)\n") % exc.hint)
2689 self.ui.write_err(_("(%s)\n") % exc.hint)
2690 return False
2690 return False
2691 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2691 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2692 ret = pushkey.push(self, namespace, key, old, new)
2692 ret = pushkey.push(self, namespace, key, old, new)
2693 def runhook():
2693 def runhook():
2694 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2694 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2695 ret=ret)
2695 ret=ret)
2696 self._afterlock(runhook)
2696 self._afterlock(runhook)
2697 return ret
2697 return ret
2698
2698
2699 def listkeys(self, namespace):
2699 def listkeys(self, namespace):
2700 self.hook('prelistkeys', throw=True, namespace=namespace)
2700 self.hook('prelistkeys', throw=True, namespace=namespace)
2701 self.ui.debug('listing keys for "%s"\n' % namespace)
2701 self.ui.debug('listing keys for "%s"\n' % namespace)
2702 values = pushkey.list(self, namespace)
2702 values = pushkey.list(self, namespace)
2703 self.hook('listkeys', namespace=namespace, values=values)
2703 self.hook('listkeys', namespace=namespace, values=values)
2704 return values
2704 return values
2705
2705
2706 def debugwireargs(self, one, two, three=None, four=None, five=None):
2706 def debugwireargs(self, one, two, three=None, four=None, five=None):
2707 '''used to test argument passing over the wire'''
2707 '''used to test argument passing over the wire'''
2708 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2708 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2709 pycompat.bytestr(four),
2709 pycompat.bytestr(four),
2710 pycompat.bytestr(five))
2710 pycompat.bytestr(five))
2711
2711
2712 def savecommitmessage(self, text):
2712 def savecommitmessage(self, text):
2713 fp = self.vfs('last-message.txt', 'wb')
2713 fp = self.vfs('last-message.txt', 'wb')
2714 try:
2714 try:
2715 fp.write(text)
2715 fp.write(text)
2716 finally:
2716 finally:
2717 fp.close()
2717 fp.close()
2718 return self.pathto(fp.name[len(self.root) + 1:])
2718 return self.pathto(fp.name[len(self.root) + 1:])
2719
2719
2720 # used to avoid circular references so destructors work
2720 # used to avoid circular references so destructors work
2721 def aftertrans(files):
2721 def aftertrans(files):
2722 renamefiles = [tuple(t) for t in files]
2722 renamefiles = [tuple(t) for t in files]
2723 def a():
2723 def a():
2724 for vfs, src, dest in renamefiles:
2724 for vfs, src, dest in renamefiles:
2725 # if src and dest refer to a same file, vfs.rename is a no-op,
2725 # if src and dest refer to a same file, vfs.rename is a no-op,
2726 # leaving both src and dest on disk. delete dest to make sure
2726 # leaving both src and dest on disk. delete dest to make sure
2727 # the rename couldn't be such a no-op.
2727 # the rename couldn't be such a no-op.
2728 vfs.tryunlink(dest)
2728 vfs.tryunlink(dest)
2729 try:
2729 try:
2730 vfs.rename(src, dest)
2730 vfs.rename(src, dest)
2731 except OSError: # journal file does not yet exist
2731 except OSError: # journal file does not yet exist
2732 pass
2732 pass
2733 return a
2733 return a
2734
2734
2735 def undoname(fn):
2735 def undoname(fn):
2736 base, name = os.path.split(fn)
2736 base, name = os.path.split(fn)
2737 assert name.startswith('journal')
2737 assert name.startswith('journal')
2738 return os.path.join(base, name.replace('journal', 'undo', 1))
2738 return os.path.join(base, name.replace('journal', 'undo', 1))
2739
2739
2740 def instance(ui, path, create, intents=None, createopts=None):
2740 def instance(ui, path, create, intents=None, createopts=None):
2741 localpath = util.urllocalpath(path)
2741 localpath = util.urllocalpath(path)
2742 if create:
2742 if create:
2743 createrepository(ui, localpath, createopts=createopts)
2743 createrepository(ui, localpath, createopts=createopts)
2744
2744
2745 return makelocalrepository(ui, localpath, intents=intents)
2745 return makelocalrepository(ui, localpath, intents=intents)
2746
2746
2747 def islocal(path):
2747 def islocal(path):
2748 return True
2748 return True
2749
2749
2750 def newreporequirements(ui, createopts=None):
2750 def newreporequirements(ui, createopts=None):
2751 """Determine the set of requirements for a new local repository.
2751 """Determine the set of requirements for a new local repository.
2752
2752
2753 Extensions can wrap this function to specify custom requirements for
2753 Extensions can wrap this function to specify custom requirements for
2754 new repositories.
2754 new repositories.
2755 """
2755 """
2756 createopts = createopts or {}
2756 createopts = createopts or {}
2757
2757
2758 # If the repo is being created from a shared repository, we copy
2758 # If the repo is being created from a shared repository, we copy
2759 # its requirements.
2759 # its requirements.
2760 if 'sharedrepo' in createopts:
2760 if 'sharedrepo' in createopts:
2761 requirements = set(createopts['sharedrepo'].requirements)
2761 requirements = set(createopts['sharedrepo'].requirements)
2762 if createopts.get('sharedrelative'):
2762 if createopts.get('sharedrelative'):
2763 requirements.add('relshared')
2763 requirements.add('relshared')
2764 else:
2764 else:
2765 requirements.add('shared')
2765 requirements.add('shared')
2766
2766
2767 return requirements
2767 return requirements
2768
2768
2769 requirements = {'revlogv1'}
2769 requirements = {'revlogv1'}
2770 if ui.configbool('format', 'usestore'):
2770 if ui.configbool('format', 'usestore'):
2771 requirements.add('store')
2771 requirements.add('store')
2772 if ui.configbool('format', 'usefncache'):
2772 if ui.configbool('format', 'usefncache'):
2773 requirements.add('fncache')
2773 requirements.add('fncache')
2774 if ui.configbool('format', 'dotencode'):
2774 if ui.configbool('format', 'dotencode'):
2775 requirements.add('dotencode')
2775 requirements.add('dotencode')
2776
2776
2777 compengine = ui.config('experimental', 'format.compression')
2777 compengine = ui.config('experimental', 'format.compression')
2778 if compengine not in util.compengines:
2778 if compengine not in util.compengines:
2779 raise error.Abort(_('compression engine %s defined by '
2779 raise error.Abort(_('compression engine %s defined by '
2780 'experimental.format.compression not available') %
2780 'experimental.format.compression not available') %
2781 compengine,
2781 compengine,
2782 hint=_('run "hg debuginstall" to list available '
2782 hint=_('run "hg debuginstall" to list available '
2783 'compression engines'))
2783 'compression engines'))
2784
2784
2785 # zlib is the historical default and doesn't need an explicit requirement.
2785 # zlib is the historical default and doesn't need an explicit requirement.
2786 if compengine != 'zlib':
2786 if compengine != 'zlib':
2787 requirements.add('exp-compression-%s' % compengine)
2787 requirements.add('exp-compression-%s' % compengine)
2788
2788
2789 if scmutil.gdinitconfig(ui):
2789 if scmutil.gdinitconfig(ui):
2790 requirements.add('generaldelta')
2790 requirements.add('generaldelta')
2791 if ui.configbool('experimental', 'treemanifest'):
2791 if ui.configbool('experimental', 'treemanifest'):
2792 requirements.add('treemanifest')
2792 requirements.add('treemanifest')
2793 # experimental config: format.sparse-revlog
2793 # experimental config: format.sparse-revlog
2794 if ui.configbool('format', 'sparse-revlog'):
2794 if ui.configbool('format', 'sparse-revlog'):
2795 requirements.add(SPARSEREVLOG_REQUIREMENT)
2795 requirements.add(SPARSEREVLOG_REQUIREMENT)
2796
2796
2797 revlogv2 = ui.config('experimental', 'revlogv2')
2797 revlogv2 = ui.config('experimental', 'revlogv2')
2798 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2798 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2799 requirements.remove('revlogv1')
2799 requirements.remove('revlogv1')
2800 # generaldelta is implied by revlogv2.
2800 # generaldelta is implied by revlogv2.
2801 requirements.discard('generaldelta')
2801 requirements.discard('generaldelta')
2802 requirements.add(REVLOGV2_REQUIREMENT)
2802 requirements.add(REVLOGV2_REQUIREMENT)
2803 # experimental config: format.internal-phase
2803 # experimental config: format.internal-phase
2804 if ui.configbool('format', 'internal-phase'):
2804 if ui.configbool('format', 'internal-phase'):
2805 requirements.add('internal-phase')
2805 requirements.add('internal-phase')
2806
2806
2807 if createopts.get('narrowfiles'):
2807 if createopts.get('narrowfiles'):
2808 requirements.add(repository.NARROW_REQUIREMENT)
2808 requirements.add(repository.NARROW_REQUIREMENT)
2809
2809
2810 return requirements
2810 return requirements
2811
2811
2812 def filterknowncreateopts(ui, createopts):
2812 def filterknowncreateopts(ui, createopts):
2813 """Filters a dict of repo creation options against options that are known.
2813 """Filters a dict of repo creation options against options that are known.
2814
2814
2815 Receives a dict of repo creation options and returns a dict of those
2815 Receives a dict of repo creation options and returns a dict of those
2816 options that we don't know how to handle.
2816 options that we don't know how to handle.
2817
2817
2818 This function is called as part of repository creation. If the
2818 This function is called as part of repository creation. If the
2819 returned dict contains any items, repository creation will not
2819 returned dict contains any items, repository creation will not
2820 be allowed, as it means there was a request to create a repository
2820 be allowed, as it means there was a request to create a repository
2821 with options not recognized by loaded code.
2821 with options not recognized by loaded code.
2822
2822
2823 Extensions can wrap this function to filter out creation options
2823 Extensions can wrap this function to filter out creation options
2824 they know how to handle.
2824 they know how to handle.
2825 """
2825 """
2826 known = {
2826 known = {
2827 'narrowfiles',
2827 'narrowfiles',
2828 'sharedrepo',
2828 'sharedrepo',
2829 'sharedrelative',
2829 'sharedrelative',
2830 'shareditems',
2830 'shareditems',
2831 }
2831 }
2832
2832
2833 return {k: v for k, v in createopts.items() if k not in known}
2833 return {k: v for k, v in createopts.items() if k not in known}
2834
2834
2835 def createrepository(ui, path, createopts=None):
2835 def createrepository(ui, path, createopts=None):
2836 """Create a new repository in a vfs.
2836 """Create a new repository in a vfs.
2837
2837
2838 ``path`` path to the new repo's working directory.
2838 ``path`` path to the new repo's working directory.
2839 ``createopts`` options for the new repository.
2839 ``createopts`` options for the new repository.
2840
2840
2841 The following keys for ``createopts`` are recognized:
2841 The following keys for ``createopts`` are recognized:
2842
2842
2843 narrowfiles
2843 narrowfiles
2844 Set up repository to support narrow file storage.
2844 Set up repository to support narrow file storage.
2845 sharedrepo
2845 sharedrepo
2846 Repository object from which storage should be shared.
2846 Repository object from which storage should be shared.
2847 sharedrelative
2847 sharedrelative
2848 Boolean indicating if the path to the shared repo should be
2848 Boolean indicating if the path to the shared repo should be
2849 stored as relative. By default, the pointer to the "parent" repo
2849 stored as relative. By default, the pointer to the "parent" repo
2850 is stored as an absolute path.
2850 is stored as an absolute path.
2851 shareditems
2851 shareditems
2852 Set of items to share to the new repository (in addition to storage).
2852 Set of items to share to the new repository (in addition to storage).
2853 """
2853 """
2854 createopts = createopts or {}
2854 createopts = createopts or {}
2855
2855
2856 unknownopts = filterknowncreateopts(ui, createopts)
2856 unknownopts = filterknowncreateopts(ui, createopts)
2857
2857
2858 if not isinstance(unknownopts, dict):
2858 if not isinstance(unknownopts, dict):
2859 raise error.ProgrammingError('filterknowncreateopts() did not return '
2859 raise error.ProgrammingError('filterknowncreateopts() did not return '
2860 'a dict')
2860 'a dict')
2861
2861
2862 if unknownopts:
2862 if unknownopts:
2863 raise error.Abort(_('unable to create repository because of unknown '
2863 raise error.Abort(_('unable to create repository because of unknown '
2864 'creation option: %s') %
2864 'creation option: %s') %
2865 ', '.join(sorted(unknownopts)),
2865 ', '.join(sorted(unknownopts)),
2866 hint=_('is a required extension not loaded?'))
2866 hint=_('is a required extension not loaded?'))
2867
2867
2868 requirements = newreporequirements(ui, createopts=createopts)
2868 requirements = newreporequirements(ui, createopts=createopts)
2869
2869
2870 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2870 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2871
2871
2872 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2872 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2873 if hgvfs.exists():
2873 if hgvfs.exists():
2874 raise error.RepoError(_('repository %s already exists') % path)
2874 raise error.RepoError(_('repository %s already exists') % path)
2875
2875
2876 if 'sharedrepo' in createopts:
2876 if 'sharedrepo' in createopts:
2877 sharedpath = createopts['sharedrepo'].sharedpath
2877 sharedpath = createopts['sharedrepo'].sharedpath
2878
2878
2879 if createopts.get('sharedrelative'):
2879 if createopts.get('sharedrelative'):
2880 try:
2880 try:
2881 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2881 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2882 except (IOError, ValueError) as e:
2882 except (IOError, ValueError) as e:
2883 # ValueError is raised on Windows if the drive letters differ
2883 # ValueError is raised on Windows if the drive letters differ
2884 # on each path.
2884 # on each path.
2885 raise error.Abort(_('cannot calculate relative path'),
2885 raise error.Abort(_('cannot calculate relative path'),
2886 hint=stringutil.forcebytestr(e))
2886 hint=stringutil.forcebytestr(e))
2887
2887
2888 if not wdirvfs.exists():
2888 if not wdirvfs.exists():
2889 wdirvfs.makedirs()
2889 wdirvfs.makedirs()
2890
2890
2891 hgvfs.makedir(notindexed=True)
2891 hgvfs.makedir(notindexed=True)
2892
2892
2893 if b'store' in requirements and 'sharedrepo' not in createopts:
2893 if b'store' in requirements and 'sharedrepo' not in createopts:
2894 hgvfs.mkdir(b'store')
2894 hgvfs.mkdir(b'store')
2895
2895
2896 # We create an invalid changelog outside the store so very old
2896 # We create an invalid changelog outside the store so very old
2897 # Mercurial versions (which didn't know about the requirements
2897 # Mercurial versions (which didn't know about the requirements
2898 # file) encounter an error on reading the changelog. This
2898 # file) encounter an error on reading the changelog. This
2899 # effectively locks out old clients and prevents them from
2899 # effectively locks out old clients and prevents them from
2900 # mucking with a repo in an unknown format.
2900 # mucking with a repo in an unknown format.
2901 #
2901 #
2902 # The revlog header has version 2, which won't be recognized by
2902 # The revlog header has version 2, which won't be recognized by
2903 # such old clients.
2903 # such old clients.
2904 hgvfs.append(b'00changelog.i',
2904 hgvfs.append(b'00changelog.i',
2905 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2905 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2906 b'layout')
2906 b'layout')
2907
2907
2908 scmutil.writerequires(hgvfs, requirements)
2908 scmutil.writerequires(hgvfs, requirements)
2909
2909
2910 # Write out file telling readers where to find the shared store.
2910 # Write out file telling readers where to find the shared store.
2911 if 'sharedrepo' in createopts:
2911 if 'sharedrepo' in createopts:
2912 hgvfs.write(b'sharedpath', sharedpath)
2912 hgvfs.write(b'sharedpath', sharedpath)
2913
2913
2914 if createopts.get('shareditems'):
2914 if createopts.get('shareditems'):
2915 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2915 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2916 hgvfs.write(b'shared', shared)
2916 hgvfs.write(b'shared', shared)
2917
2917
2918 def poisonrepository(repo):
2918 def poisonrepository(repo):
2919 """Poison a repository instance so it can no longer be used."""
2919 """Poison a repository instance so it can no longer be used."""
2920 # Perform any cleanup on the instance.
2920 # Perform any cleanup on the instance.
2921 repo.close()
2921 repo.close()
2922
2922
2923 # Our strategy is to replace the type of the object with one that
2923 # Our strategy is to replace the type of the object with one that
2924 # has all attribute lookups result in error.
2924 # has all attribute lookups result in error.
2925 #
2925 #
2926 # But we have to allow the close() method because some constructors
2926 # But we have to allow the close() method because some constructors
2927 # of repos call close() on repo references.
2927 # of repos call close() on repo references.
2928 class poisonedrepository(object):
2928 class poisonedrepository(object):
2929 def __getattribute__(self, item):
2929 def __getattribute__(self, item):
2930 if item == r'close':
2930 if item == r'close':
2931 return object.__getattribute__(self, item)
2931 return object.__getattribute__(self, item)
2932
2932
2933 raise error.ProgrammingError('repo instances should not be used '
2933 raise error.ProgrammingError('repo instances should not be used '
2934 'after unshare')
2934 'after unshare')
2935
2935
2936 def close(self):
2936 def close(self):
2937 pass
2937 pass
2938
2938
2939 # We may have a repoview, which intercepts __setattr__. So be sure
2939 # We may have a repoview, which intercepts __setattr__. So be sure
2940 # we operate at the lowest level possible.
2940 # we operate at the lowest level possible.
2941 object.__setattr__(repo, r'__class__', poisonedrepository)
2941 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now